Exemplo n.º 1
0
    def getMember(self, objectname):
        """Get member for this ObjectCollection.

        Checks if requested name is a subdir (see above)
        """

        if self.prefix and self.prefix not in objectname:
            objectname = self.prefix + objectname
        if self.is_subdir(objectname):
            return ObjectCollection(self.container, self.environ,
                                    prefix=objectname)
        if self.environ.get('REQUEST_METHOD') in ['PUT']:
            return ObjectResource(self.container, objectname,
                                  self.environ, self.objects)
        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               objectname,
                               http_conn=self.http_connection)
            return ObjectResource(self.container, objectname,
                                  self.environ, self.objects)
        except client.ClientException:
            pass
        return None
Exemplo n.º 2
0
    def handleCopy(self, destPath, depthInfinity):
        dst = '/'.join(destPath.split('/')[2:])

        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               dst,
                               http_conn=self.http_connection)
        except client.ClientException:
            pass

        headers = {'X-Copy-From': self.path}
        try:
            client.put_object(self.storage_url,
                              self.auth_token,
                              self.container,
                              dst,
                              headers=headers,
                              http_conn=self.http_connection)
            if self.environ.get("HTTP_OVERWRITE", '') != "T":
                raise DAVError(HTTP_CREATED)
            return True
        except client.ClientException:
            return False
Exemplo n.º 3
0
    def createCollection(self, name):
        """Create a pseudo-folder."""
        if self.path:
            tmp = self.path.split('/')
            name = '/'.join(tmp[2:]) + '/' + name
        name = name.strip('/')
        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name,
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name + '/',
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        client.put_object(self.storage_url,
                          self.auth_token,
                          self.container,
                          name + '/',
                          content_type='application/directory',
                          http_conn=self.http_connection)
Exemplo n.º 4
0
    def createCollection(self, name):
        """Create a pseudo-folder."""
        if self.path:
            tmp = self.path.split('/')
            name = '/'.join(tmp[2:]) + '/' + name
        name = name.strip('/')
        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name,
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name + '/',
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        client.put_object(self.storage_url,
                          self.auth_token,
                          self.container,
                          sanitize(name).rstrip('/') + '/',
                          content_type='application/directory',
                          http_conn=self.http_connection)
Exemplo n.º 5
0
    def getMember(self, objectname):
        """Get member for this ObjectCollection.

        Checks if requested name is a subdir (see above)
        """

        if self.prefix and self.prefix not in objectname:
            objectname = self.prefix + objectname
        if self.is_subdir(objectname):
            return ObjectCollection(self.container,
                                    self.environ,
                                    prefix=objectname)
        if self.environ.get('REQUEST_METHOD') in ['PUT']:
            return ObjectResource(self.container, objectname, self.environ,
                                  self.objects)
        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               objectname,
                               http_conn=self.http_connection)
            return ObjectResource(self.container, objectname, self.environ,
                                  self.objects)
        except client.ClientException:
            pass
        return None
Exemplo n.º 6
0
def get_status(request, container, object_name):
    """Download an object from Swift
    :param container: container of swift where object is stored
    :param object_name: ID of object
    :return: object bytestream
    """
    try:
        if not request.session.get('storage_url') and not request.session.get('auth_token'):
            return Response('Please Contact your administrator', status=status.HTTP_401_UNAUTHORIZED)
        storage_url, auth_token = _get_auth_data(request.session)
        obj = {object_name: {}}
        # TODO: Currently using swift as first option, switch to in-memory
        headers = client.head_object(storage_url, auth_token, container, object_name)
        vid_format = 'mp4'
        id_header = 'x-object-meta-' + vid_format + '-id'
        if id_header in headers:
            video_headers = client.head_object(storage_url, auth_token, container, headers[id_header])
            obj[object_name][vid_format + 'status'] = video_headers['x-object-meta-status'] if 'x-object-meta-status' in video_headers else 0
        else:
            print "DOESN'T EXIST"
            # print video_keys
            obj[object_name]['status'] = 'DONE'
            return Response(obj, status=status.HTTP_200_OK)
        return Response(obj)
        # if object_name in video_keys:
        #     keys = video_keys[object_name]
        #     obj = {object_name: keys}
        #     print obj
        #     return Response(obj)
    except swift_exception.ClientException as e:
        print e
        return Response('Please Contact Your Administrator', status=status.HTTP_500_INTERNAL_SERVER_ERROR)
    except Exception as e:
        print e
        return Response(e.message, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
Exemplo n.º 7
0
 def exists(self, name):
     """
     Returns True if a file referenced by the given name already exists in
     the storage system, or False if the name is available for a new file.
     """
     try:
         (host, port, path, is_ssl) = self.connection.connection_args
         storage_url = 'https://%s:%d/%s' % (host, port, path)
         head_object(storage_url, self.connection.token, self.container_name,
             name)
         return True
     except ClientException:
         return False
Exemplo n.º 8
0
    def get_headers(self):
        """Execute HEAD object request.

        Since this info is used in different methods (see below),
        do it once and then use this info.
        """

        if self.headers is None:
            data = self.objects.get(self.objectname)
            if data:
                self.headers = {
                    'content-length': data.get('bytes'),
                    'etag': data.get('hash'),
                }
            else:
                try:
                    self.headers = client.head_object(
                        self.storage_url,
                        self.auth_token,
                        self.container,
                        self.objectname,
                        http_conn=self.http_connection)
                except client.ClientException:
                    self.headers = {}
                    pass
Exemplo n.º 9
0
def _set_headers(storage_url, auth_token, container, object_name, deleted):
    try:
        header = client.head_object(storage_url, auth_token, container,
                                    object_name)
        new_header = {
            'X-Object-Meta-Deleted': deleted,
            'X-Object-Meta-Format': header['x-object-meta-format'],
            'X-Object-Meta-Resolution': header['x-object-meta-resolution'],
            'X-Object-Meta-Name': header['x-object-meta-name'],
            'X-Object-Meta-Type': header['x-object-meta-type']
        }
        copy_of = None
        if header['x-object-meta-type'] == 'thumbnail':
            new_header['X-Object-Meta-Original'] = header[
                'x-object-meta-original']
            copy_of = header['x-object-meta-original']
        if header['x-object-meta-type'] == 'original':
            new_header['X-Object-Meta-Thumb'] = header['x-object-meta-thumb']
            copy_of = header['x-object-meta-thumb']
        client.post_object(storage_url,
                           auth_token,
                           container,
                           object_name,
                           headers=new_header)
        return copy_of
    except swift_exception.ClientException as e:
        raise e
    except Exception as e:
        raise e
Exemplo n.º 10
0
    def get_headers(self):
        """Execute HEAD object request.

        Since this info is used in different methods (see below),
        do it once and then use this info.
        """

        if self.headers is None:
            data = self.objects.get(self.objectname)
            if data:
                self.headers = {'content-length': data.get('bytes'),
                                'etag': data.get('hash'),
                                'last_modified': data.get('last_modified'),
                                }
            else:
                try:
                    self.headers = client.head_object(
                        self.storage_url,
                        self.auth_token,
                        self.container,
                        self.objectname,
                        http_conn=self.http_connection)
                except client.ClientException:
                    self.headers = {}
                    pass
    def test_async_updates_after_PUT_and_POST(self):
        # verify correct update values when PUT update and POST updates are
        # missed but then async updates are sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url,
                             self.token,
                             'c1',
                             headers={'X-Storage-Policy': self.policy.name})

        # PUT and POST to object while one container server is stopped so that
        # we force async updates to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url,
                          self.token,
                          'c1',
                          'o1',
                          contents=content,
                          content_type='test/ctype')
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # use internal client for POST so we can force fast-post mode
        int_client = self.make_internal_client(object_post_as_copy=False)
        int_client.set_object_metadata(self.account, 'c1', 'o1',
                                       {'X-Object-Meta-Fruit': 'Tomato'})
        self.assertEqual('Tomato',
                         int_client.get_object_metadata(
                             self.account, 'c1',
                             'o1')['x-object-meta-fruit'])  # sanity

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(
            direct_client.direct_get_container(cnodes[0], cpart, self.account,
                                               'c1')[1])

        # Run the object-updaters to send the async pendings
        Manager(['object-updater']).once()

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, 'c1')
        self.assertEqual(1, len(listing))
        self.assertEqual('o1', listing[0]['name'])
        self.assertEqual(len(content), listing[0]['bytes'])
        self.assertEqual(meta['etag'], listing[0]['hash'])
        self.assertEqual('test/ctype', listing[0]['content_type'])
Exemplo n.º 12
0
    def _test_sync(self, object_post_as_copy):
        source_container, dest_container = self._setup_synced_containers()

        # upload to source
        object_name = 'object-%s' % uuid.uuid4()
        put_headers = {'X-Object-Meta-Test': 'put_value'}
        client.put_object(self.url, self.token, source_container, object_name,
                          'test-body', headers=put_headers)

        # cycle container-sync
        Manager(['container-sync']).once()

        resp_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, 'test-body')
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('put_value', resp_headers['x-object-meta-test'])

        # update metadata with a POST, using an internal client so we can
        # vary the object_post_as_copy setting - first use post-as-copy
        post_headers = {'Content-Type': 'image/jpeg',
                        'X-Object-Meta-Test': 'post_value'}
        int_client = self.make_internal_client(
            object_post_as_copy=object_post_as_copy)
        int_client.set_object_metadata(self.account, source_container,
                                       object_name, post_headers)
        # sanity checks...
        resp_headers = client.head_object(
            self.url, self.token, source_container, object_name)
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('post_value', resp_headers['x-object-meta-test'])
        self.assertEqual('image/jpeg', resp_headers['content-type'])

        # cycle container-sync
        Manager(['container-sync']).once()

        # verify that metadata changes were sync'd
        resp_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, 'test-body')
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('post_value', resp_headers['x-object-meta-test'])
        self.assertEqual('image/jpeg', resp_headers['content-type'])

        # delete the object
        client.delete_object(
            self.url, self.token, source_container, object_name)
        with self.assertRaises(ClientException) as cm:
            client.get_object(
                self.url, self.token, source_container, object_name)
        self.assertEqual(404, cm.exception.http_status)  # sanity check

        # cycle container-sync
        Manager(['container-sync']).once()

        # verify delete has been sync'd
        with self.assertRaises(ClientException) as cm:
            client.get_object(
                self.url, self.token, dest_container, object_name)
        self.assertEqual(404, cm.exception.http_status)  # sanity check
Exemplo n.º 13
0
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        fragment_archive_etag = self.direct_get(node, part)

        # remove data from the selected node
        part_dir = self.storage_dir('object', node, part=part)
        shutil.rmtree(part_dir, True)

        # this node can't servce the data any more
        try:
            self.direct_get(node, part)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' % (node, ))

        # make sure we can still GET the object and its correct, the
        # proxy is doing decode on remaining fragments to get the obj
        self.assertEqual(etag, self.proxy_get())

        # fire up reconstructor
        self.reconstructor.once()

        # fragment is rebuilt exactly as it was before!
        self.assertEqual(fragment_archive_etag, self.direct_get(node, part))

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])
Exemplo n.º 14
0
    def test_copy_from(self):
        resp = dict()
        objname = self.storlet_file + '-copy'
        req_headers = {
            'X-Run-Storlet': self.storlet_name,
            'X-Copy-From': '%s/%s' % (self.container, self.storlet_file)
        }
        client.put_object(self.url,
                          self.token,
                          self.container,
                          objname,
                          self.content,
                          response_dict=resp,
                          headers=req_headers)

        self.assertEqual(201, resp['status'])
        resp_header = resp['headers']
        self.assertEqual('%s/%s' % (self.container, self.storlet_file),
                         resp_header['x-storlet-generated-from'])
        self.assertEqual(self.acct,
                         resp_header['x-storlet-generated-from-account'])
        self.assertIn('x-storlet-generated-from-last-modified', resp_header)

        headers = client.head_object(self.url, self.token, self.container,
                                     objname)
        self.assertEqual(str(len(self.content)), headers['content-length'])

        resp = dict()
        client.delete_object(self.url,
                             self.token,
                             self.container,
                             objname,
                             response_dict=resp)
        self.assertEqual(204, resp['status'])
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        fragment_archive_etag = self.direct_get(node, part)

        # remove the .durable from the selected node
        part_dir = self.storage_dir('object', node, part=part)
        for dirs, subdirs, files in os.walk(part_dir):
            for fname in files:
                if fname.endswith('.durable'):
                    durable = os.path.join(dirs, fname)
                    os.remove(durable)
                    break
        try:
            os.remove(os.path.join(part_dir, 'hashes.pkl'))
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        # fire up reconstructor to propogate the .durable
        self.reconstructor.once()

        # fragment is still exactly as it was before!
        self.assertEqual(fragment_archive_etag,
                         self.direct_get(node, part))

        # check meta
        meta = client.head_object(self.url, self.token,
                                  self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertTrue(key in meta)
            self.assertEqual(meta[key], headers_post[key])
Exemplo n.º 16
0
def get_all(request, container):
    """ Returns List of all Objects present in specified container
    :param container: Name of Swift Container
    :return: meta data and id's of all the objects
    """
    try:
        response_dict = dict()
        storage_url, auth_token = _get_auth_data(request.session)
        data_container = client.get_container(storage_url, auth_token, container)
        for obj in data_container[1]:
            meta_object = client.head_object(storage_url, auth_token, container, obj['name'])
            if not meta_object['x-object-meta-deleted']:
                if meta_object['x-object-meta-type'] in ['thumbnail', 'original-thumbnail']:
                    if meta_object['x-object-meta-format'] not in response_dict:
                        form = meta_object['x-object-meta-format']
                        response_dict[form] = []
                    if meta_object['x-object-meta-format'] in response_dict:
                        new_obj = {'thumbnail_id': obj['name'],
                                   'name': meta_object['x-object-meta-name'],
                                   'type': meta_object['x-object-meta-type'],
                                   'resolution': meta_object['x-object-meta-resolution']}
                        if meta_object['x-object-meta-format'] is 'thumbnail':
                            new_obj['original_id'] = meta_object['x-object-meta-original']
                        response_dict[form].append(new_obj)
        return Response(response_dict)
    except swift_exception.ClientException as e:
        print e
        return Response('Please contact your admininstrator', status=status.HTTP_500_INTERNAL_SERVER_ERROR)
    except Exception as e:
        print e
        return Response('Please contact your admininstrator', status=status.HTTP_500_INTERNAL_SERVER_ERROR)
Exemplo n.º 17
0
    def test_copy_dest(self):
        # No COPY in swiftclient. Using urllib instead...
        url = os.path.join(self.url, self.container, self.storlet_file)
        objname = self.storlet_file + '-copy-ex'
        headers = {
            'X-Auth-Token': self.token,
            'X-Run-Storlet': self.storlet_name,
            'Destination': '%s/%s' % (self.container, objname)
        }
        headers.update(self.additional_headers)
        req = urllib2.Request(url, headers=headers)
        req.get_method = lambda: 'COPY'
        conn = urllib2.urlopen(req, timeout=10)

        self.assertEqual(201, conn.getcode())
        self.assertEqual('%s/%s' % (self.container, self.storlet_file),
                         conn.info()['x-storlet-generated-from'])
        self.assertEqual(self.acct,
                         conn.info()['x-storlet-generated-from-account'])
        self.assertIn('x-storlet-generated-from-last-modified', conn.info())

        headers = client.head_object(self.url, self.token, self.container,
                                     objname)
        self.assertEqual(str(len(self.content)), headers['content-length'])

        resp = dict()
        client.delete_object(self.url,
                             self.token,
                             self.container,
                             objname,
                             response_dict=resp)
        self.assertEqual(204, resp['status'])
Exemplo n.º 18
0
    def invoke_storlet_on_copy_from(self):
        headers = {'X-Run-Storlet': self.storlet_name,
                   'X-Object-Meta-Name': 'thumbnail',
                   'X-Copy-From': '%s/%s' %
                   (self.container, self.storlet_file)}
        headers.update(self.additional_headers)
        resp = dict()
        c.put_object(self.url, self.token,
                     self.container, 'gen_thumb_on_copy.jpg', '',
                     headers=headers,
                     response_dict=resp)

        status = resp.get('status')
        self.assertIn(status, [201, 202])
        rh = resp['headers']
        self.assertEqual(rh['x-storlet-generated-from'],
                         '%s/%s' %
                         (self.container, self.storlet_file))
        self.assertEqual(rh['x-storlet-generated-from-account'],
                         self.acct)
        self.assertIn('x-storlet-generated-from-last-modified', rh)

        headers = c.head_object(self.url, self.token,
                                self.container, 'gen_thumb_on_copy.jpg')
        self.assertEqual('49032', headers['content-length'])
        self.assertEqual('thumbnail', headers['x-object-meta-name'])
        self.assertTrue('x-object-meta-x-timestamp' not in headers)
        self.assertTrue('x-timestamp' in headers)
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url, self.token, 'c1',
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1])

        # Run the object-updaters to be sure updates are done
        Manager(['object-updater']).once()

        # check the re-started container server has update with override values
        obj = direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])
Exemplo n.º 20
0
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url,
                             self.token,
                             'c1',
                             headers={'X-Storage-Policy': self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(
            direct_client.direct_get_container(cnodes[0], cpart, self.account,
                                               'c1')[1])

        # Run the object-updaters to be sure updates are done
        Manager(['object-updater']).once()

        # check the re-started container server has update with override values
        obj = direct_client.direct_get_container(cnodes[0], cpart,
                                                 self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])
Exemplo n.º 21
0
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        fragment_archive_etag = self.direct_get(node, part)

        # remove the .durable from the selected node
        part_dir = self.storage_dir('object', node, part=part)
        for dirs, subdirs, files in os.walk(part_dir):
            for fname in files:
                if fname.endswith('.durable'):
                    durable = os.path.join(dirs, fname)
                    os.remove(durable)
                    break
        try:
            os.remove(os.path.join(part_dir, 'hashes.pkl'))
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        # fire up reconstructor to propogate the .durable
        self.reconstructor.once()

        # fragment is still exactly as it was before!
        self.assertEqual(fragment_archive_etag, self.direct_get(node, part))

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertTrue(key in meta)
            self.assertEqual(meta[key], headers_post[key])
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        fragment_archive_etag = self.direct_get(node, part)

        # remove data from the selected node
        part_dir = self.storage_dir('object', node, part=part)
        shutil.rmtree(part_dir, True)

        # this node can't servce the data any more
        try:
            self.direct_get(node, part)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (node,))

        # make sure we can still GET the object and its correct, the
        # proxy is doing decode on remaining fragments to get the obj
        self.assertEqual(etag, self.proxy_get())

        # fire up reconstructor
        self.reconstructor.once()

        # fragment is rebuilt exactly as it was before!
        self.assertEqual(fragment_archive_etag,
                         self.direct_get(node, part))

        # check meta
        meta = client.head_object(self.url, self.token,
                                  self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertTrue(key in meta)
            self.assertEqual(meta[key], headers_post[key])
Exemplo n.º 23
0
 def read_temp_url(url, token, container, object):
     head = swift_client.head_object(url=url, token=token, container=container, name=object)
     print head
     try:
         print  head['x-object-meta-tempurl']
         return head['x-object-meta-tempurl'], head['x-object-meta-tempurlexp']
     except KeyError:
         return False, False
Exemplo n.º 24
0
    def test_put_ctype_replicated_when_subsequent_post(self):
        # primary half                     handoff half
        # ------------                     ------------
        # t0.data: ctype = foo
        #                                  t1.data: ctype = bar
        # t2.meta:
        #
        #              ...run replicator and expect...
        #
        #               t1.data: ctype = bar
        #               t2.meta:
        self.brain.put_container()

        # incomplete write
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._put_object(headers={'Content-Type': 'foo'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        # handoff write
        self.brain.stop_primary_half()
        self.container_brain.stop_primary_half()
        self._put_object(headers={'Content-Type': 'bar'})
        self.brain.start_primary_half()
        self.container_brain.start_primary_half()

        # metadata update with newest data unavailable
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        self.get_to_final_state()

        # check object metadata
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)

        # check container listing metadata
        container_metadata, objs = client.get_container(self.url, self.token,
                                                        self.container_name)

        for obj in objs:
            if obj['name'] == self.object_name:
                break
        else:
            self.fail('obj not found in container listing')
        expected = 'bar'
        self.assertEqual(obj['content_type'], expected)
        self.assertEqual(metadata['x-object-meta-color'], 'Blue')
        self._assert_object_metadata_matches_listing(obj, metadata)
        self._assert_consistent_container_dbs()
        self._assert_consistent_object_metadata()
        self._assert_consistent_suffix_hashes()
Exemplo n.º 25
0
    def test_put_ctype_replicated_when_subsequent_post(self):
        # primary half                     handoff half
        # ------------                     ------------
        # t0.data: ctype = foo
        #                                  t1.data: ctype = bar
        # t2.meta:
        #
        #              ...run replicator and expect...
        #
        #               t1.data: ctype = bar
        #               t2.meta:
        self.brain.put_container(policy_index=0)

        # incomplete write
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._put_object(headers={'Content-Type': 'foo'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        # handoff write
        self.brain.stop_primary_half()
        self.container_brain.stop_primary_half()
        self._put_object(headers={'Content-Type': 'bar'})
        self.brain.start_primary_half()
        self.container_brain.start_primary_half()

        # metadata update with newest data unavailable
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        self.get_to_final_state()

        # check object metadata
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)

        # check container listing metadata
        container_metadata, objs = client.get_container(self.url, self.token,
                                                        self.container_name)

        for obj in objs:
            if obj['name'] == self.object_name:
                break
        else:
            self.fail('obj not found in container listing')
        expected = 'bar'
        self.assertEqual(obj['content_type'], expected)
        self.assertEqual(metadata['x-object-meta-color'], 'Blue')
        self._assert_object_metadata_matches_listing(obj, metadata)
        self._assert_consistent_container_dbs()
        self._assert_consistent_object_metadata()
        self._assert_consistent_suffix_hashes()
Exemplo n.º 26
0
    def test_update_during_POST_only(self):
        # verify correct update values when PUT update is missed but then a
        # POST update succeeds *before* the PUT async pending update is sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, "c1")
        client.put_container(self.url, self.token, "c1", headers={"X-Storage-Policy": self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server)
        content = u"stuff"
        client.put_object(self.url, self.token, "c1", "o1", contents=content, content_type="test/ctype")
        meta = client.head_object(self.url, self.token, "c1", "o1")

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(cnodes[0], cpart, self.account, "c1")[1])

        # use internal client for POST so we can force fast-post mode
        int_client = self.make_internal_client(object_post_as_copy=False)
        int_client.set_object_metadata(self.account, "c1", "o1", {"X-Object-Meta-Fruit": "Tomato"})
        self.assertEqual(
            "Tomato", int_client.get_object_metadata(self.account, "c1", "o1")["x-object-meta-fruit"]
        )  # sanity

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(cnode, cpart, self.account, "c1")[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]["bytes"])
            self.assertEqual("test/ctype", listing[0]["content_type"])
            listing_etags.add(listing[0]["hash"])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, "c1")
        self.assertEqual(1, len(listing))
        self.assertEqual("o1", listing[0]["name"])
        self.assertEqual(len(content), listing[0]["bytes"])
        self.assertEqual(meta["etag"], listing[0]["hash"])
        self.assertEqual("test/ctype", listing[0]["content_type"])

        # Run the object-updaters to send the async pending from the PUT
        Manager(["object-updater"]).once()

        # check container listing metadata is still correct
        for cnode in cnodes:
            listing = direct_client.direct_get_container(cnode, cpart, self.account, "c1")[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]["bytes"])
            self.assertEqual("test/ctype", listing[0]["content_type"])
            listing_etags.add(listing[0]["hash"])
        self.assertEqual(1, len(listing_etags))
Exemplo n.º 27
0
def _set_headers(storage_url, auth_token, container, object_name, deleted, request):
    try:
        try:
            header = client.head_object(storage_url, auth_token, container, object_name)
        except swift_exception.ClientException as e:
            storage_url, auth_token = _reauthorize(request)
            header = client.head_object(storage_url, auth_token, container, object_name)

        new_header = {'X-Object-Meta-Deleted': deleted,
                      'X-Object-Meta-Format': header['x-object-meta-format'],
                      'X-Object-Meta-Resolution': header['x-object-meta-resolution'],
                      'X-Object-Meta-Name': header['x-object-meta-name'],
                      'X-Object-Meta-Type': header['x-object-meta-type']
                      }
        copy_of = None
        if (header['x-object-meta-format'] in video_types) and (header['x-object-meta-type'] == 'original'):
            new_header['X-Object-Meta-Mp4-Id'] = header['x-object-meta-mp4-id']

        if 'x-object-meta-status' in header:
            new_header['X-Object-Meta-Status'] = header['x-object-meta-status']

        if header['x-object-meta-type'] == 'thumbnail':
            new_header['X-Object-Meta-Original'] = header['x-object-meta-original']
            copy_of = header['x-object-meta-original']
        if header['x-object-meta-type'] == 'original':
            new_header['X-Object-Meta-Thumb'] = header['x-object-meta-thumb']
            if container == 'Video':
                copy_of = [header['x-object-meta-thumb'], header['x-object-meta-mp4-id']]
            else:
                copy_of = header['x-object-meta-thumb']
            if 'x-object-meta-preview-id' in header:
                new_header['X-Object-Meta-Preview-Id'] = header['x-object-meta-preview-id']
                copy_of = [header['x-object-meta-thumb'], header['x-object-meta-preview-id']]
        try:
            client.post_object(storage_url, auth_token, container, object_name, headers=new_header)
        except swift_exception.ClientException as e:
            storage_url, auth_token = _reauthorize(request)
            client.post_object(storage_url, auth_token, container, object_name, headers=new_header)
        return copy_of
    except swift_exception.ClientException as e:
        raise e
    except Exception as e:
        raise e
    def test_async_updates_after_PUT_and_POST(self):
        # verify correct update values when PUT update and POST updates are
        # missed but then async updates are sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url, self.token, 'c1',
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # PUT and POST to object while one container server is stopped so that
        # we force async updates to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content,
                          content_type='test/ctype')
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # use internal client for POST so we can force fast-post mode
        int_client = self.make_internal_client(object_post_as_copy=False)
        int_client.set_object_metadata(
            self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
        self.assertEqual(
            'Tomato',
            int_client.get_object_metadata(self.account, 'c1', 'o1')
            ['x-object-meta-fruit'])  # sanity

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1])

        # Run the object-updaters to send the async pendings
        Manager(['object-updater']).once()

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, 'c1')
        self.assertEqual(1, len(listing))
        self.assertEqual('o1', listing[0]['name'])
        self.assertEqual(len(content), listing[0]['bytes'])
        self.assertEqual(meta['etag'], listing[0]['hash'])
        self.assertEqual('test/ctype', listing[0]['content_type'])
Exemplo n.º 29
0
    def test_post_ctype_replicated_when_previous_incomplete_puts(self):
        # primary half                     handoff half
        # ------------                     ------------
        # t0.data: ctype = foo
        #                                  t1.data: ctype = bar
        # t2.meta: ctype = baz
        #
        #              ...run replicator and expect...
        #
        #               t1.data:
        #               t2.meta: ctype = baz
        self.brain.put_container()

        # incomplete write to primary half
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._put_object(headers={'Content-Type': 'foo'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        # handoff write
        self.brain.stop_primary_half()
        self.container_brain.stop_primary_half()
        self._put_object(headers={'Content-Type': 'bar'})
        self.brain.start_primary_half()
        self.container_brain.start_primary_half()

        # content-type update to primary half
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._post_object(headers={'Content-Type': 'baz'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        self.get_to_final_state()

        # check object metadata
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)

        # check container listing metadata
        container_metadata, objs = client.get_container(self.url, self.token,
                                                        self.container_name)

        for obj in objs:
            if obj['name'] == self.object_name:
                break
        expected = 'baz'
        self.assertEqual(obj['content_type'], expected)
        self._assert_object_metadata_matches_listing(obj, metadata)
        self._assert_consistent_container_dbs()
        self._assert_consistent_object_metadata()
        self._assert_consistent_suffix_hashes()
Exemplo n.º 30
0
    def test_post_ctype_replicated_when_previous_incomplete_puts(self):
        # primary half                     handoff half
        # ------------                     ------------
        # t0.data: ctype = foo
        #                                  t1.data: ctype = bar
        # t2.meta: ctype = baz
        #
        #              ...run replicator and expect...
        #
        #               t1.data:
        #               t2.meta: ctype = baz
        self.brain.put_container(policy_index=0)

        # incomplete write to primary half
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._put_object(headers={'Content-Type': 'foo'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        # handoff write
        self.brain.stop_primary_half()
        self.container_brain.stop_primary_half()
        self._put_object(headers={'Content-Type': 'bar'})
        self.brain.start_primary_half()
        self.container_brain.start_primary_half()

        # content-type update to primary half
        self.brain.stop_handoff_half()
        self.container_brain.stop_handoff_half()
        self._post_object(headers={'Content-Type': 'baz'})
        self.brain.start_handoff_half()
        self.container_brain.start_handoff_half()

        self.get_to_final_state()

        # check object metadata
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)

        # check container listing metadata
        container_metadata, objs = client.get_container(self.url, self.token,
                                                        self.container_name)

        for obj in objs:
            if obj['name'] == self.object_name:
                break
        expected = 'baz'
        self.assertEqual(obj['content_type'], expected)
        self._assert_object_metadata_matches_listing(obj, metadata)
        self._assert_consistent_container_dbs()
        self._assert_consistent_object_metadata()
        self._assert_consistent_suffix_hashes()
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url,
                             self.token,
                             'c1',
                             headers={'X-Storage-Policy': self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url,
                          self.token,
                          'c1',
                          'o1',
                          contents=content,
                          content_type='test/ctype')
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(
            direct_client.direct_get_container(cnodes[0], cpart, self.account,
                                               'c1')[1])

        # Run the object-updaters to be sure updates are done
        Manager(['object-updater']).once()

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, 'c1')
        self.assertEqual(1, len(listing))
        self.assertEqual('o1', listing[0]['name'])
        self.assertEqual(len(content), listing[0]['bytes'])
        self.assertEqual(meta['etag'], listing[0]['hash'])
        self.assertEqual('test/ctype', listing[0]['content_type'])
Exemplo n.º 32
0
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        headers, fragment_archive_etag = self.direct_get(node, part)
        self.assertIn('X-Backend-Durable-Timestamp', headers)  # sanity check
        durable_timestamp = headers['X-Backend-Durable-Timestamp']

        # make the data file non-durable on the selected node
        part_dir = self.storage_dir('object', node, part=part)
        for dirs, subdirs, files in os.walk(part_dir):
            for fname in files:
                if fname.endswith('.data'):
                    non_durable_fname = fname.replace('#d', '')
                    os.rename(os.path.join(dirs, fname),
                              os.path.join(dirs, non_durable_fname))
        try:
            os.remove(os.path.join(part_dir, 'hashes.pkl'))
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        # sanity check that fragment is no longer durable
        headers = direct_client.direct_head_object(
            node,
            part,
            self.account,
            self.container_name,
            self.object_name,
            headers={
                'X-Backend-Storage-Policy-Index': int(self.policy),
                'X-Backend-Fragment-Preferences': json.dumps([])
            })
        self.assertNotIn('X-Backend-Durable-Timestamp', headers)

        # fire up reconstructor to propagate durable state
        self.reconstructor.once()

        # fragment is still exactly as it was before!
        headers, fragment_archive_etag_2 = self.direct_get(node, part)
        self.assertEqual(fragment_archive_etag, fragment_archive_etag_2)
        self.assertIn('X-Backend-Durable-Timestamp', headers)
        self.assertEqual(durable_timestamp,
                         headers['X-Backend-Durable-Timestamp'])

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])
Exemplo n.º 33
0
    def invoke_storlet_on_copy_dest(self):
        # No COPY in swiftclient. Using urllib instead...
        url = '%s/%s/%s' % (self.url, 'myobjects', self.storlet_file)
        headers = {'X-Auth-Token': self.token,
                   'X-Run-Storlet': self.storlet_name,
                   'Destination': 'myobjects/gen_thumb_on_copy_.jpg'}
        req = urllib2.Request(url, headers=headers)
        req.get_method = lambda: 'COPY'
        conn = urllib2.urlopen(req, timeout=10)
        status = conn.getcode()
        self.assertTrue(status in [201, 202])

        headers = c.head_object(self.url, self.token,
                                'myobjects', 'gen_thumb_on_copy_.jpg')
        self.assertEqual(headers['content-length'], '49032')
Exemplo n.º 34
0
    def invoke_storlet_on_put(self):
        headers = {'X-Run-Storlet': self.storlet_name}
        resp = dict()
        source_file = '%s/%s' % (self.path_to_bundle, self.storlet_file)
        with open(source_file, 'r') as f:
            c.put_object(self.url, self.token,
                         'myobjects', 'gen_thumb_on_put.jpg', f,
                         headers=headers,
                         response_dict=resp)

        status = resp.get('status')
        self.assertTrue(status in [201, 202])

        headers = c.head_object(self.url, self.token,
                                'myobjects', 'gen_thumb_on_put.jpg')
        self.assertEqual(headers['content-length'], '49032')
Exemplo n.º 35
0
def _get_video_headers(storage_url, auth_token, container, object_name):
    try:
        header = client.head_object(storage_url, auth_token, container, object_name)
        new_header = {'X-Object-Meta-Deleted': header['x-object-meta-deleted'],
                      'X-Object-Meta-Format': header['x-object-meta-format'],
                      'X-Object-Meta-Resolution': header['x-object-meta-resolution'],
                      'X-Object-Meta-Name': header['x-object-meta-name'],
                      'X-Object-Meta-Type': header['x-object-meta-type']
                      }
        # if 'x-delete-after' in header:
        #     new_header['X-Delete-After'] = header['x-delete-after']
        return new_header
    except swift_exception.ClientException as e:
        raise e
    except Exception as e:
        raise e
Exemplo n.º 36
0
    def test_update_during_POST_only(self):
        # verify correct update values when PUT update is missed but then a
        # POST update succeeds *before* the PUT async pending update is sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url,
                             self.token,
                             'c1',
                             headers={'X-Storage-Policy': self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(
            direct_client.direct_get_container(cnodes[0], cpart, self.account,
                                               'c1')[1])

        # use internal client for POST so we can force fast-post mode
        int_client = self.make_internal_client(object_post_as_copy=False)
        int_client.set_object_metadata(self.account, 'c1', 'o1',
                                       {'X-Object-Meta-Fruit': 'Tomato'})
        self.assertEqual('Tomato',
                         int_client.get_object_metadata(
                             self.account, 'c1',
                             'o1')['x-object-meta-fruit'])  # sanity

        # check the re-started container server has update with override values
        obj = direct_client.direct_get_container(cnodes[0], cpart,
                                                 self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])

        # Run the object-updaters to send the async pending from the PUT
        Manager(['object-updater']).once()

        # check container listing metadata is still correct
        obj = direct_client.direct_get_container(cnodes[0], cpart,
                                                 self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])
    def test_update_during_POST_only(self):
        # verify correct update values when PUT update is missed but then a
        # POST update succeeds *before* the PUT async pending update is sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url, self.token, 'c1',
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1])

        # use internal client for POST so we can force fast-post mode
        int_client = self.make_internal_client(object_post_as_copy=False)
        int_client.set_object_metadata(
            self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
        self.assertEqual(
            'Tomato',
            int_client.get_object_metadata(self.account, 'c1', 'o1')
            ['x-object-meta-fruit'])  # sanity

        # check the re-started container server has update with override values
        obj = direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])

        # Run the object-updaters to send the async pending from the PUT
        Manager(['object-updater']).once()

        # check container listing metadata is still correct
        obj = direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])
Exemplo n.º 38
0
    def invoke_storlet_on_put(self):
        headers = {'X-Run-Storlet': self.storlet_name,
                   'x-object-meta-name': 'thumbnail'}
        headers.update(self.additional_headers)
        resp = dict()
        source_file = '%s/%s' % (self.path_to_bundle, self.storlet_file)
        with open(source_file, 'r') as f:
            c.put_object(self.url, self.token,
                         self.container, 'gen_thumb_on_put.jpg', f,
                         headers=headers,
                         response_dict=resp)

        status = resp.get('status')
        self.assertIn(status, [201, 202])

        headers = c.head_object(self.url, self.token,
                                self.container, 'gen_thumb_on_put.jpg')
        self.assertEqual('49032', headers['content-length'])
        self.assertEqual('thumbnail', headers['x-object-meta-name'])
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url, self.token, 'c1',
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content,
                          content_type='test/ctype')
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1])

        # Run the object-updaters to be sure updates are done
        Manager(['object-updater']).once()

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, 'c1')
        self.assertEqual(1, len(listing))
        self.assertEqual('o1', listing[0]['name'])
        self.assertEqual(len(content), listing[0]['bytes'])
        self.assertEqual(meta['etag'], listing[0]['hash'])
        self.assertEqual('test/ctype', listing[0]['content_type'])
Exemplo n.º 40
0
def get_all(request, container):
    """ Returns List of all Objects present in specified container
    :param container: Name of Swift Container
    :return: meta data and id's of all the objects
    """
    try:
        response_dict = dict()
        storage_url, auth_token = _get_auth_data(request.session)
        data_container = client.get_container(storage_url, auth_token,
                                              container)
        for obj in data_container[1]:
            meta_object = client.head_object(storage_url, auth_token,
                                             container, obj['name'])
            if not meta_object['x-object-meta-deleted']:
                if meta_object['x-object-meta-type'] in [
                        'thumbnail', 'original-thumbnail'
                ]:
                    if meta_object[
                            'x-object-meta-format'] not in response_dict:
                        form = meta_object['x-object-meta-format']
                        response_dict[form] = []
                    if meta_object['x-object-meta-format'] in response_dict:
                        new_obj = {
                            'thumbnail_id': obj['name'],
                            'name': meta_object['x-object-meta-name'],
                            'type': meta_object['x-object-meta-type'],
                            'resolution':
                            meta_object['x-object-meta-resolution']
                        }
                        if meta_object['x-object-meta-format'] is 'thumbnail':
                            new_obj['original_id'] = meta_object[
                                'x-object-meta-original']
                        response_dict[form].append(new_obj)
        return Response(response_dict)
    except swift_exception.ClientException as e:
        print e
        return Response('Please contact your admininstrator',
                        status=status.HTTP_501_NOT_IMPLEMENTED)
    except Exception as e:
        print e
        return Response('Please contact your admininstrator',
                        status=status.HTTP_501_NOT_IMPLEMENTED)
Exemplo n.º 41
0
    def register(self, filepath, protocol, extra=None):
        ''' Reigster filepath and protocol with the ID service.  If there is
        an interface specified use that.  The interface parameter is useful
        when testing on the same host as the ID service and you want to use
        the VM IP Auth '''

        if protocol == "swift":
            path_parts = filepath.split("/")
            object_name = path_parts.pop()
            url = "%s//%s/%s/%s" % ((path_parts[0], ) + tuple(path_parts[2:5]))
            container = "/".join(path_parts[5:])
            if self.swift_auth_url == self.os_auth_url:
                token = self.auth_token
            else:
                _, token = get_auth(
                    self.swift_auth_url,
                    "%s:%s" % (self.swift_tenant, self.swift_username),
                    self.swift_password)

            size = head_object(url, token, container,
                               object_name)["content-length"]

            swift = {
                "swift": {
                    "url": url,
                    "container": container,
                    "object": object_name,
                    "auth_url": self.swift_auth_url
                },
                "filesize": size
            }
            if extra:
                extra.update(swift)
            else:
                extra = swift

        record = {"filepath": filepath, "protocol": protocol}
        if extra:
            record.update(extra)

        return self.http_post("%s%s" % (self.id_service, self.ID_PATH),
                              json.dumps(record)).text
Exemplo n.º 42
0
    def invoke_storlet_on_copy_dest(self):
        # No COPY in swiftclient. Using urllib instead...
        url = '%s/%s/%s' % (self.url, self.container, self.storlet_file)
        headers = {'X-Auth-Token': self.token,
                   'X-Run-Storlet': self.storlet_name,
                   'X-Object-Meta-Name': 'thumbnail',
                   'Destination': '%s/gen_thumb_on_copy_.jpg' % self.container}
        headers.update(self.additional_headers)
        req = urllib2.Request(url, headers=headers)
        req.get_method = lambda: 'COPY'
        conn = urllib2.urlopen(req, timeout=10)
        status = conn.getcode()
        self.assertIn(status, [201, 202])

        headers = c.head_object(self.url, self.token,
                                self.container, 'gen_thumb_on_copy_.jpg')
        self.assertEqual('49032', headers['content-length'])
        self.assertEqual('thumbnail', headers['x-object-meta-name'])
        self.assertTrue('x-object-meta-x-timestamp' not in headers)
        self.assertTrue('x-timestamp' in headers)
Exemplo n.º 43
0
    def invoke_storlet_on_copy_from(self):
        headers = {'X-Run-Storlet': self.storlet_name,
                   'X-Copy-From': 'myobjects/%s' % self.storlet_file}
        resp = dict()
        c.put_object(self.url, self.token,
                     'myobjects', 'gen_thumb_on_copy.jpg', '',
                     headers=headers,
                     response_dict=resp)

        status = resp.get('status')
        self.assertTrue(status in [201, 202])
        rh = resp['headers']
        self.assertEqual(rh['x-storlet-generated-from'],
                         'myobjects/%s' % self.storlet_file)
        self.assertEqual(rh['x-storlet-generated-from-account'],
                         self.acct)
        self.assertTrue('x-storlet-generated-from-last-modified' in rh)

        headers = c.head_object(self.url, self.token,
                                'myobjects', 'gen_thumb_on_copy.jpg')
        self.assertEqual(headers['content-length'], '49032')
Exemplo n.º 44
0
    def register(self, filepath, protocol, extra=None):
        ''' Reigster filepath and protocol with the ID service.  If there is
        an interface specified use that.  The interface parameter is useful
        when testing on the same host as the ID service and you want to use
        the VM IP Auth '''

        if protocol == "swift":
            path_parts = filepath.split("/")
            object_name = path_parts.pop()
            url =  "%s//%s/%s/%s" % ((path_parts[0],) + tuple(path_parts[2:5]))
            container = "/".join(path_parts[5:])
            if self.swift_auth_url == self.os_auth_url:
                token = self.auth_token
            else:
                _, token = get_auth(self.swift_auth_url, "%s:%s" % (
                        self.swift_tenant, self.swift_username),
                        self.swift_password)

            size = head_object(url, token, container,
                    object_name)["content-length"]

            swift = {"swift": {
                    "url": url,
                    "container": container,
                    "object": object_name,
                    "auth_url": self.swift_auth_url
                    },
                    "filesize": size
                }
            if extra:
                extra.update(swift)
            else:
                extra = swift

        record = {"filepath": filepath, "protocol": protocol}
        if extra:
            record.update(extra)

        return self.http_post("%s%s" % (self.id_service, self.ID_PATH),
                json.dumps(record)).text
Exemplo n.º 45
0
def _set_headers(storage_url, auth_token, container, object_name, deleted):
    try:
        header = client.head_object(storage_url, auth_token, container, object_name)
        new_header = {'X-Object-Meta-Deleted': deleted,
                      'X-Object-Meta-Format': header['x-object-meta-format'],
                      'X-Object-Meta-Resolution': header['x-object-meta-resolution'],
                      'X-Object-Meta-Name': header['x-object-meta-name'],
                      'X-Object-Meta-Type': header['x-object-meta-type']
                      }
        copy_of = None
        if header['x-object-meta-type'] == 'thumbnail':
            new_header['X-Object-Meta-Original'] = header['x-object-meta-original']
            copy_of = header['x-object-meta-original']
        if header['x-object-meta-type'] == 'original':
            new_header['X-Object-Meta-Thumb'] = header['x-object-meta-thumb']
            copy_of = header['x-object-meta-thumb']
        client.post_object(storage_url, auth_token, container, object_name, headers=new_header)
        return copy_of
    except swift_exception.ClientException as e:
        raise e
    except Exception as e:
        raise e
Exemplo n.º 46
0
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, "c1")
        client.put_container(self.url, self.token, "c1", headers={"X-Storage-Policy": self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server)
        content = u"stuff"
        client.put_object(self.url, self.token, "c1", "o1", contents=content, content_type="test/ctype")
        meta = client.head_object(self.url, self.token, "c1", "o1")

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(cnodes[0], cpart, self.account, "c1")[1])

        # Run the object-updaters to be sure updates are done
        Manager(["object-updater"]).once()

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(cnode, cpart, self.account, "c1")[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]["bytes"])
            self.assertEqual("test/ctype", listing[0]["content_type"])
            listing_etags.add(listing[0]["hash"])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, "c1")
        self.assertEqual(1, len(listing))
        self.assertEqual("o1", listing[0]["name"])
        self.assertEqual(len(content), listing[0]["bytes"])
        self.assertEqual(meta["etag"], listing[0]["hash"])
        self.assertEqual("test/ctype", listing[0]["content_type"])
Exemplo n.º 47
0
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        exc = None
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except direct_client.ClientException, err:
            exc = err
        self.assertEquals(exc.http_status, 404)

        kill_server(onode['port'], self.port2server, self.pids)
        client.delete_object(self.url, self.token, container, obj)
        exc = None
        try:
            client.head_object(self.url, self.token, container, obj)
        except direct_client.ClientException, err:
            exc = err
        self.assertEquals(exc.http_status, 404)
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            if obj in objs:
Exemplo n.º 48
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # Kill one container/obj primary server
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]
        kill_server((onode['ip'], onode['port']), self.ipport2server)

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, b'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != b'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Stash the on disk data from a primary for future comparison with the
        # handoff - this may not equal 'VERIFY' if for example the proxy has
        # crypto enabled
        direct_get_data = direct_client.direct_get_object(
            onodes[1], opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]

        # Kill other two container/obj primary servers
        #   to ensure GET handoff works
        for node in onodes[1:]:
            kill_server((node['ip'], node['port']), self.ipport2server)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != b'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server((node['ip'], node['port']), self.ipport2server)

        # We've indirectly verified the handoff node has the container/object,
        #   but let's directly verify it.
        another_onode = next(self.object_ring.get_more_nodes(opart))
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        self.assertEqual(direct_get_data, odata)

        # drop a tempfile in the handoff's datadir, like it might have
        # had if there was an rsync failure while it was previously a
        # primary
        handoff_device_path = self.device_dir(another_onode)
        data_filename = None
        for root, dirs, files in os.walk(handoff_device_path):
            for filename in files:
                if filename.endswith('.data'):
                    data_filename = filename
                    temp_filename = '.%s.6MbL6r' % data_filename
                    temp_filepath = os.path.join(root, temp_filename)
        if not data_filename:
            self.fail('Did not find any data files on %r' %
                      handoff_device_path)
        open(temp_filepath, 'w')

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))

        # Bring the first container/obj primary server back up
        start_server((onode['ip'], onode['port']), self.ipport2server)

        # Assert that it doesn't have container/obj yet
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) // 10
            Manager(['object-replicator']).once(number=node_id)
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']
        another_num = (another_port_num - 6000) // 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        self.assertEqual(direct_get_data, odata)

        # and that it does *not* have a temporary rsync dropping!
        found_data_filename = False
        primary_device_path = self.device_dir(onode)
        for root, dirs, files in os.walk(primary_device_path):
            for filename in files:
                if filename.endswith('.6MbL6r'):
                    self.fail('Found unexpected file %s' %
                              os.path.join(root, filename))
                if filename == data_filename:
                    found_data_filename = True
        self.assertTrue(found_data_filename,
                        'Did not find data file %r on %r' % (
                            data_filename, primary_device_path))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Kill the first container/obj primary server again (we have two
        #   primaries and the handoff up now)
        kill_server((onode['ip'], onode['port']), self.ipport2server)

        # Delete container/obj
        try:
            client.delete_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            if self.object_ring.replica_count > 2:
                raise
            # Object DELETE returning 503 for (404, 204)
            # remove this with fix for
            # https://bugs.launchpad.net/swift/+bug/1318375
            self.assertEqual(503, err.http_status)

        # Assert we can't head container/obj
        try:
            client.head_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Assert container/obj is not in the container listing, both indirectly
        #   and directly
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))

        # Restart the first container/obj primary server again
        start_server((onode['ip'], onode['port']), self.ipport2server)

        # Assert it still has container/obj
        direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) // 10
            Manager(['object-replicator']).once(number=node_id)
        another_node_id = (another_port_num - 6000) // 10
        Manager(['object-replicator']).once(number=another_node_id)

        # Assert primary node no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
Exemplo n.º 49
0
    def test_revert_object(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)

        # kill 2 a parity count number of primary nodes so we can
        # force data onto handoffs, we do that by renaming dev dirs
        # to induce 507
        p_dev1 = self.device_dir(onodes[0])
        p_dev2 = self.device_dir(onodes[1])
        self.kill_drive(p_dev1)
        self.kill_drive(p_dev2)

        # PUT object
        contents = Body()
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents,
                          headers=headers)
        client.post_object(self.url,
                           self.token,
                           self.container_name,
                           self.object_name,
                           headers=headers_post)
        # (Some versions of?) swiftclient will mutate the headers dict on post
        headers_post.pop('X-Auth-Token', None)

        # these primaries can't serve the data any more, we expect 507
        # here and not 404 because we're using mount_check to kill nodes
        for onode in (onodes[0], onodes[1]):
            try:
                self.direct_get(onode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 507)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (onode, ))

        # now take out another primary
        p_dev3 = self.device_dir(onodes[2])
        self.kill_drive(p_dev3)

        # this node can't servce the data any more
        try:
            self.direct_get(onodes[2], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 507)
        else:
            self.fail('Node data on %r was not fully destroyed!' % (onode, ))

        # make sure we can still GET the object and its correct
        # we're now pulling from handoffs and reconstructing
        etag = self.proxy_get()
        self.assertEqual(etag, contents.etag)

        # rename the dev dirs so they don't 507 anymore
        self.revive_drive(p_dev1)
        self.revive_drive(p_dev2)
        self.revive_drive(p_dev3)

        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) // 10
            self.reconstructor.once(number=hnode_id)

        # first three primaries have data again
        for onode in (onodes[0], onodes[2]):
            self.direct_get(onode, opart)

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])

        # handoffs are empty
        for hnode in hnodes:
            try:
                self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (hnode, ))
Exemplo n.º 50
0
    def test_rebuild_quarantines_lonely_frag(self):
        # fail one device while the object is deleted so we are left with one
        # fragment and some tombstones
        failed_node = self.onodes[0]
        device_path = self.device_dir(failed_node)
        self.kill_drive(device_path)
        self.assert_direct_get_fails(failed_node, self.opart, 507)  # sanity

        # delete object
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # check we have tombstones
        for node in self.onodes[1:]:
            err = self.assert_direct_get_fails(node, self.opart, 404)
            self.assertIn('X-Backend-Timestamp', err.http_headers)

        # run the reconstructor with zero reclaim age to clean up tombstones
        for conf_index in self.configs['object-reconstructor'].keys():
            self.run_custom_daemon(ObjectReconstructor, 'object-reconstructor',
                                   conf_index, {'reclaim_age': '0'})

        # check we no longer have tombstones
        for node in self.onodes[1:]:
            err = self.assert_direct_get_fails(node, self.opart, 404)
            self.assertNotIn('X-Timestamp', err.http_headers)

        # revive the failed device and check it has a fragment
        self.revive_drive(device_path)
        self.assert_direct_get_succeeds(failed_node, self.opart)

        # restart proxy to clear error-limiting so that the revived drive
        # participates again
        Manager(['proxy-server']).restart()

        # client GET will fail with 503 ...
        with self.assertRaises(ClientException) as cm:
            client.get_object(self.url, self.token, self.container_name,
                              self.object_name)
        self.assertEqual(503, cm.exception.http_status)
        # ... but client GET succeeds
        headers = client.head_object(self.url, self.token, self.container_name,
                                     self.object_name)
        for key in self.headers_post:
            self.assertIn(key, headers)
            self.assertEqual(self.headers_post[key], headers[key])

        # run the reconstructor without quarantine_threshold set
        error_lines = []
        warning_lines = []
        for conf_index in self.configs['object-reconstructor'].keys():
            reconstructor = self.run_custom_daemon(ObjectReconstructor,
                                                   'object-reconstructor',
                                                   conf_index,
                                                   {'quarantine_age': '0'})
            logger = reconstructor.logger.logger
            error_lines.append(logger.get_lines_for_level('error'))
            warning_lines.append(logger.get_lines_for_level('warning'))

        # check logs for errors
        found_lines = False
        for lines in error_lines:
            if not lines:
                continue
            self.assertFalse(found_lines, error_lines)
            found_lines = True
            for line in itertools.islice(lines, 0, 6, 2):
                self.assertIn(
                    'Unable to get enough responses (1/4 from 1 ok '
                    'responses)', line, lines)
            for line in itertools.islice(lines, 1, 7, 2):
                self.assertIn(
                    'Unable to get enough responses (4 x 404 error '
                    'responses)', line, lines)
        self.assertTrue(found_lines, 'error lines not found')

        for lines in warning_lines:
            self.assertEqual([], lines)

        # check we have still have a single fragment and no tombstones
        self.assert_direct_get_succeeds(failed_node, self.opart)
        for node in self.onodes[1:]:
            err = self.assert_direct_get_fails(node, self.opart, 404)
            self.assertNotIn('X-Timestamp', err.http_headers)

        # run the reconstructor to quarantine the lonely frag
        error_lines = []
        warning_lines = []
        for conf_index in self.configs['object-reconstructor'].keys():
            reconstructor = self.run_custom_daemon(
                ObjectReconstructor, 'object-reconstructor', conf_index, {
                    'quarantine_age': '0',
                    'quarantine_threshold': '1'
                })
            logger = reconstructor.logger.logger
            error_lines.append(logger.get_lines_for_level('error'))
            warning_lines.append(logger.get_lines_for_level('warning'))

        # check logs for errors
        found_lines = False
        for index, lines in enumerate(error_lines):
            if not lines:
                continue
            self.assertFalse(found_lines, error_lines)
            found_lines = True
            for line in itertools.islice(lines, 0, 6, 2):
                self.assertIn(
                    'Unable to get enough responses (1/4 from 1 ok '
                    'responses)', line, lines)
            for line in itertools.islice(lines, 1, 7, 2):
                self.assertIn(
                    'Unable to get enough responses (6 x 404 error '
                    'responses)', line, lines)
        self.assertTrue(found_lines, 'error lines not found')

        # check logs for quarantine warning
        found_lines = False
        for lines in warning_lines:
            if not lines:
                continue
            self.assertFalse(found_lines, warning_lines)
            found_lines = True
            self.assertEqual(1, len(lines), lines)
            self.assertIn('Quarantined object', lines[0])
        self.assertTrue(found_lines, 'warning lines not found')

        # check we have nothing
        for node in self.onodes:
            err = self.assert_direct_get_fails(node, self.opart, 404)
            self.assertNotIn('X-Backend-Timestamp', err.http_headers)
        # client HEAD and GET now both 404
        with self.assertRaises(ClientException) as cm:
            client.get_object(self.url, self.token, self.container_name,
                              self.object_name)
        self.assertEqual(404, cm.exception.http_status)
        with self.assertRaises(ClientException) as cm:
            client.head_object(self.url, self.token, self.container_name,
                               self.object_name)
        self.assertEqual(404, cm.exception.http_status)

        # run the reconstructor once more - should see no errors in logs!
        error_lines = []
        warning_lines = []
        for conf_index in self.configs['object-reconstructor'].keys():
            reconstructor = self.run_custom_daemon(
                ObjectReconstructor, 'object-reconstructor', conf_index, {
                    'quarantine_age': '0',
                    'quarantine_threshold': '1'
                })
            logger = reconstructor.logger.logger
            error_lines.append(logger.get_lines_for_level('error'))
            warning_lines.append(logger.get_lines_for_level('warning'))

        for lines in error_lines:
            self.assertEqual([], lines)
        for lines in warning_lines:
            self.assertEqual([], lines)
Exemplo n.º 51
0
 def head_object(self, container_name, object_name):
     return client.head_object(self.url, self.token, container_name,
                               object_name)
Exemplo n.º 52
0
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(list(POLICIES))
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in POLICIES if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(
            self.url).scheme, urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(self.url).scheme,
                                     urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice([p for p in ENABLED_POLICIES
                                if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url, self.token, self.container_name,
                              part_name, contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url, self.token, self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }
        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
                                         in self.container_ring.devs),
            'x-container-device': ','.join(n['device'] for n in
                                           self.container_ring.devs),
            'x-container-partition': container_part,
            'X-Backend-Storage-Policy-Index': wrong_policy.idx,
            'X-Static-Large-Object': 'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node, part, self.account, self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
                                       for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Exemplo n.º 55
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # Kill one container/obj primary server
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]
        kill_server((onode['ip'], onode['port']), self.ipport2server)

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Kill other two container/obj primary servers
        #   to ensure GET handoff works
        for node in onodes[1:]:
            kill_server((node['ip'], node['port']), self.ipport2server)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server((node['ip'], node['port']), self.ipport2server)

        # We've indirectly verified the handoff node has the container/object,
        #   but let's directly verify it.
        another_onode = next(self.object_ring.get_more_nodes(opart))
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # drop a tempfile in the handoff's datadir, like it might have
        # had if there was an rsync failure while it was previously a
        # primary
        handoff_device_path = self.device_dir('object', another_onode)
        data_filename = None
        for root, dirs, files in os.walk(handoff_device_path):
            for filename in files:
                if filename.endswith('.data'):
                    data_filename = filename
                    temp_filename = '.%s.6MbL6r' % data_filename
                    temp_filepath = os.path.join(root, temp_filename)
        if not data_filename:
            self.fail('Did not find any data files on %r' %
                      handoff_device_path)
        open(temp_filepath, 'w')

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))

        # Bring the first container/obj primary server back up
        start_server((onode['ip'], onode['port']), self.ipport2server)

        # Assert that it doesn't have container/obj yet
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']
        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # and that it does *not* have a temporary rsync dropping!
        found_data_filename = False
        primary_device_path = self.device_dir('object', onode)
        for root, dirs, files in os.walk(primary_device_path):
            for filename in files:
                if filename.endswith('.6MbL6r'):
                    self.fail('Found unexpected file %s' %
                              os.path.join(root, filename))
                if filename == data_filename:
                    found_data_filename = True
        self.assertTrue(found_data_filename,
                        'Did not find data file %r on %r' % (
                            data_filename, primary_device_path))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Kill the first container/obj primary server again (we have two
        #   primaries and the handoff up now)
        kill_server((onode['ip'], onode['port']), self.ipport2server)

        # Delete container/obj
        try:
            client.delete_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            if self.object_ring.replica_count > 2:
                raise
            # Object DELETE returning 503 for (404, 204)
            # remove this with fix for
            # https://bugs.launchpad.net/swift/+bug/1318375
            self.assertEqual(503, err.http_status)

        # Assert we can't head container/obj
        try:
            client.head_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Assert container/obj is not in the container listing, both indirectly
        #   and directly
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))

        # Restart the first container/obj primary server again
        start_server((onode['ip'], onode['port']), self.ipport2server)

        # Assert it still has container/obj
        direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        another_node_id = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_node_id)

        # Assert primary node no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
Exemplo n.º 56
0
    def test_sync_with_stale_container_rows(self):
        source_container, dest_container = self._setup_synced_containers()
        brain = BrainSplitter(self.url, self.token, source_container,
                              None, 'container')

        # upload to source
        object_name = 'object-%s' % uuid.uuid4()
        client.put_object(self.url, self.token, source_container, object_name,
                          'test-body')

        # check source container listing
        _, listing = client.get_container(
            self.url, self.token, source_container)
        for expected_obj_dict in listing:
            if expected_obj_dict['name'] == object_name:
                break
        else:
            self.fail('Failed to find source object %r in container listing %r'
                      % (object_name, listing))

        # stop all container servers
        brain.stop_primary_half()
        brain.stop_handoff_half()

        # upload new object content to source - container updates will fail
        client.put_object(self.url, self.token, source_container, object_name,
                          'new-test-body')
        source_headers = client.head_object(
            self.url, self.token, source_container, object_name)

        # start all container servers
        brain.start_primary_half()
        brain.start_handoff_half()

        # sanity check: source container listing should not have changed
        _, listing = client.get_container(
            self.url, self.token, source_container)
        for actual_obj_dict in listing:
            if actual_obj_dict['name'] == object_name:
                self.assertDictEqual(expected_obj_dict, actual_obj_dict)
                break
        else:
            self.fail('Failed to find source object %r in container listing %r'
                      % (object_name, listing))

        # cycle container-sync - object should be correctly sync'd despite
        # stale info in container row
        Manager(['container-sync']).once()

        # verify sync'd object has same content and headers
        dest_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, b'new-test-body')
        mismatched_headers = []
        for k in ('etag', 'content-length', 'content-type', 'x-timestamp',
                  'last-modified'):
            if source_headers[k] == dest_headers[k]:
                continue
            mismatched_headers.append((k, source_headers[k], dest_headers[k]))
        if mismatched_headers:
            msg = '\n'.join([('Mismatched header %r, expected %r but got %r'
                              % item) for item in mismatched_headers])
            self.fail(msg)
Exemplo n.º 57
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)

        # Kill one container/obj primary server
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]
        kill_server(onode['port'], self.port2server, self.pids)

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Kill other two container/obj primary servers
        #   to ensure GET handoff works
        for node in onodes[1:]:
            kill_server(node['port'], self.port2server, self.pids)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server(node['port'], self.port2server, self.pids)

        # We've indirectly verified the handoff node has the container/object,
        #   but let's directly verify it.
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))

        # Bring the first container/obj primary server back up
        start_server(onode['port'], self.port2server, self.pids)

        # Assert that it doesn't have container/obj yet
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']
        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Kill the first container/obj primary server again (we have two
        #   primaries and the handoff up now)
        kill_server(onode['port'], self.port2server, self.pids)

        # Delete container/obj
        try:
            client.delete_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            if self.object_ring.replica_count > 2:
                raise
            # Object DELETE returning 503 for (404, 204)
            # remove this with fix for
            # https://bugs.launchpad.net/swift/+bug/1318375
            self.assertEqual(503, err.http_status)

        # Assert we can't head container/obj
        try:
            client.head_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Assert container/obj is not in the container listing, both indirectly
        #   and directly
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))

        # Restart the first container/obj primary server again
        start_server(onode['port'], self.port2server, self.pids)

        # Assert it still has container/obj
        direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        another_node_id = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_node_id)

        # Assert primary node no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
Exemplo n.º 58
0
    def test_main(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        apart, anodes = self.account_ring.get_nodes(self.account)

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]
        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Kill all primaries to ensure GET handoff works
        for node in onodes[1:]:
            kill(self.pids[self.port2server[node['port']]], SIGTERM)
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        for node in onodes[1:]:
            self.pids[self.port2server[node['port']]] = Popen([
                'swift-object-server',
                '/etc/swift/object-server/%d.conf' %
                ((node['port'] - 6000) / 10)]).pid
        sleep(2)
        # We've indirectly verified the handoff node has the object, but let's
        # directly verify it.
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(another_onode, opart,
                    self.account, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(cnode, cpart,
                        self.account, container)[1]]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'swift-object-server',
            '/etc/swift/object-server/%d.conf' %
            ((onode['port'] - 6000) / 10)]).pid
        sleep(2)
        exc = False
        try:
            direct_client.direct_get_object(onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Previously downed object server had test object')
        # Run the extra server last so it'll remove its extra partition
        ps = []
        for n in onodes:
            ps.append(Popen(['swift-object-replicator',
                             '/etc/swift/object-server/%d.conf' %
                             ((n['port'] - 6000) / 10), 'once']))
        for p in ps:
            p.wait()
        call(['swift-object-replicator',
              '/etc/swift/object-server/%d.conf' %
              ((another_onode['port'] - 6000) / 10), 'once'])
        odata = direct_client.direct_get_object(onode, opart, self.account,
                                                container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had test object')

# Because POST has changed to a COPY by default, POSTs will succeed on all up
# nodes now if at least one up node has the object.
#       kill(self.pids[self.port2server[onode['port']]], SIGTERM)
#       client.post_object(self.url, self.token, container, obj,
#                          headers={'x-object-meta-probe': 'value'})
#       oheaders = client.head_object(self.url, self.token, container, obj)
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception('Metadata incorrect, was %s' % repr(oheaders))
#       exc = False
#       try:
#           direct_client.direct_get_object(another_onode, opart, self.account,
#                                           container, obj)
#       except Exception:
#           exc = True
#       if not exc:
#           raise Exception('Handoff server claimed it had the object when '
#                           'it should not have it')
#       self.pids[self.port2server[onode['port']]] = Popen([
#           'swift-object-server',
#           '/etc/swift/object-server/%d.conf' %
#           ((onode['port'] - 6000) / 10)]).pid
#       sleep(2)
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') == 'value':
#           raise Exception('Previously downed object server had the new '
#                           'metadata when it should not have it')
#       # Run the extra server last so it'll remove its extra partition
#       ps = []
#       for n in onodes:
#           ps.append(Popen(['swift-object-replicator',
#                            '/etc/swift/object-server/%d.conf' %
#                            ((n['port'] - 6000) / 10), 'once']))
#       for p in ps:
#           p.wait()
#       call(['swift-object-replicator',
#             '/etc/swift/object-server/%d.conf' %
#             ((another_onode['port'] - 6000) / 10), 'once'])
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception(
#               'Previously downed object server did not have the new metadata')

        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.delete_object(self.url, self.token, container, obj)
        exc = False
        try:
            client.head_object(self.url, self.token, container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Regular object HEAD was still successful')
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'swift-object-server',
            '/etc/swift/object-server/%d.conf' %
            ((onode['port'] - 6000) / 10)]).pid
        sleep(2)
        direct_client.direct_get_object(onode, opart, self.account, container,
                                        obj)
        # Run the extra server last so it'll remove its extra partition
        ps = []
        for n in onodes:
            ps.append(Popen(['swift-object-replicator',
                             '/etc/swift/object-server/%d.conf' %
                             ((n['port'] - 6000) / 10), 'once']))
        for p in ps:
            p.wait()
        call(['swift-object-replicator',
              '/etc/swift/object-server/%d.conf' %
              ((another_onode['port'] - 6000) / 10), 'once'])
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had the object')
Exemplo n.º 59
0
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' %
                      (self.account, self.container_name, self.object_name,
                       expected_policy_index, TIMEOUT))