Ejemplo n.º 1
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)

        # Kill container servers excepting two of the primaries
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        kill_nonprimary_server(cnodes, self.ipport2server, self.pids)
        kill_server((cnode['ip'], cnode['port']),
                    self.ipport2server, self.pids)

        # Create container/obj
        obj = 'object-%s' % uuid4()
        client.put_object(self.url, self.token, container, obj, '')

        # Restart other primary server
        start_server((cnode['ip'], cnode['port']),
                     self.ipport2server, self.pids)

        # Assert it does not know about container/obj
        self.assert_(not direct_client.direct_get_container(
            cnode, cpart, self.account, container)[1])

        # Run the object-updaters
        Manager(['object-updater']).once()

        # Assert the other primary server now knows about container/obj
        objs = [o['name'] for o in direct_client.direct_get_container(
            cnode, cpart, self.account, container)[1]]
        self.assert_(obj in objs)
def put_storlet_object(url, token, storlet_name, storlet_path,
                       dependency, main_class):
    # Delete previous storlet
    # resp = dict()
    '''try:

        c.delete_object(url, token, 'storlet', storlet_name, None,
                        None, None, None, resp)
    except Exception as e:
        if (resp.get('status')== 404):
            print 'Nothing to delete'
    '''
    metadata = {'X-Object-Meta-Storlet-Language': 'Java',
                'X-Object-Meta-Storlet-Interface-Version': '1.0',
                'X-Object-Meta-Storlet-Dependency': dependency,
                'X-Object-Meta-Storlet-Object-Metadata': 'no',
                'X-Object-Meta-Storlet-Main': main_class}
    f = open('%s/%s' % (storlet_path, storlet_name), 'r')
    content_length = None
    response = dict()
    c.put_object(url, token, 'storlet', storlet_name, f,
                 content_length, None, None,
                 "application/octet-stream", metadata,
                 None, None, None, response)
    f.close()
    status = response.get('status')
    assert (status == 200 or status == 201)
Ejemplo n.º 3
0
 def createEmptyResource(self, name):
     client.put_object(self.storage_url,
                       self.auth_token,
                       self.container,
                       sanitize(name),
                       http_conn=self.http_connection)
     return ObjectResource(self.container, name, self.environ, self.objects)
Ejemplo n.º 4
0
    def uploadNote(self, note, title):
        if len(title) == 0:
            title = self._generateObjectTitle(note.getTitle())
        metaManager = MetaManager(self._storage_url, self._token, title)
        currentCreateDate = metaManager.getCreateDate()
        content = note.getContent().encode('ascii', 'ignore')
        put_object(
            self._storage_url,
            self._token,
            Configuration.container_name,
            title,
            content)
        lastModifiedDate = MetaManager.dateNow()
        # currentCreateDate may be None because note may be new
        # this comment is just a companion for the one above, he felt lonely
        if currentCreateDate is None:
            currentCreateDate = lastModifiedDate

        metaManager.setCreateDate(currentCreateDate)
        metaManager.setLastModifiedDate(lastModifiedDate)
        # we only need to keep the previous tags, NONE IS CORRECT DO NOT CHANGE
        metaManager.setTags(None)
        metaManager.commitMeta()
        print title
        return title
    def test_async_update_after_PUT(self):
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url, self.token, 'c1',
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1])

        # Run the object-updaters to be sure updates are done
        Manager(['object-updater']).once()

        # check the re-started container server has update with override values
        obj = direct_client.direct_get_container(
            cnodes[0], cpart, self.account, 'c1')[1][0]
        self.assertEqual(meta['etag'], obj['hash'])
        self.assertEqual(len(content), obj['bytes'])
Ejemplo n.º 6
0
    def handleCopy(self, destPath, depthInfinity):
        dst = '/'.join(destPath.split('/')[2:])

        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               dst,
                               http_conn=self.http_connection)
        except client.ClientException:
            pass

        headers = {'X-Copy-From': self.path}
        try:
            client.put_object(self.storage_url,
                              self.auth_token,
                              self.container,
                              dst,
                              headers=headers,
                              http_conn=self.http_connection)
            if self.environ.get("HTTP_OVERWRITE", '') != "T":
                raise DAVError(HTTP_CREATED)
            return True
        except client.ClientException:
            return False
Ejemplo n.º 7
0
def create_pseudofolder_from_prefix(storage_url, auth_token, container,
                                    prefix, prefixlist):
    #Recursively creates pseudofolders from a given prefix, if the
    #prefix is not included in the prefixlist
    subprefix = '/'.join(prefix.split('/')[0:-1])
    if subprefix == '' or prefix in prefixlist:
        return prefixlist

    prefixlist = create_pseudofolder_from_prefix(
        storage_url,
        auth_token,
        container,
        subprefix,
        prefixlist)

    content_type = 'application/directory'
    obj = None

    client.put_object(
        storage_url,
        auth_token,
        container,
        prefix + '/',
        obj,
        content_type=content_type)
    prefixlist.append(prefix)

    return prefixlist
Ejemplo n.º 8
0
    def put_SLO(self):
        # Create temp files
        assembly = []
        for i in range(1, 10):
            oname = '/tmp/slo_chunk_%d' % i
            f = open(oname, 'r')
            content_length = None
            response = dict()
            c.put_object(self.url, self.token,
                         self.container, oname, f,
                         content_length, None, None,
                         "application/octet-stream",
                         None, None, None, None, response)
            f.close()
            status = response.get('status')
            self.assertTrue(status >= 200 and status < 300)

            headers = response.get('headers')
            segment = dict()
            segment['path'] = 'myobjects/%s' % oname
            segment['size_bytes'] = 1048576
            segment['etag'] = headers['etag']
            assembly.append(segment)

        content_length = None
        response = dict()
        headers = {'x-object-meta-prop1': 'val1'}
        c.put_object(self.url, self.token, self.container,
                     'assembly', json.dumps(assembly),
                     content_length=None, etag=None, chunk_size=None,
                     headers=headers, query_string='multipart-manifest=put',
                     response_dict=response)
        status = response.get('status')
        self.assertTrue(status >= 200 and status < 300)
Ejemplo n.º 9
0
    def _test_sync(self, object_post_as_copy):
        source_container, dest_container = self._setup_synced_containers()

        # upload to source
        object_name = 'object-%s' % uuid.uuid4()
        put_headers = {'X-Object-Meta-Test': 'put_value'}
        client.put_object(self.url, self.token, source_container, object_name,
                          'test-body', headers=put_headers)

        # cycle container-sync
        Manager(['container-sync']).once()

        resp_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, 'test-body')
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('put_value', resp_headers['x-object-meta-test'])

        # update metadata with a POST, using an internal client so we can
        # vary the object_post_as_copy setting - first use post-as-copy
        post_headers = {'Content-Type': 'image/jpeg',
                        'X-Object-Meta-Test': 'post_value'}
        int_client = self.make_internal_client(
            object_post_as_copy=object_post_as_copy)
        int_client.set_object_metadata(self.account, source_container,
                                       object_name, post_headers)
        # sanity checks...
        resp_headers = client.head_object(
            self.url, self.token, source_container, object_name)
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('post_value', resp_headers['x-object-meta-test'])
        self.assertEqual('image/jpeg', resp_headers['content-type'])

        # cycle container-sync
        Manager(['container-sync']).once()

        # verify that metadata changes were sync'd
        resp_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, 'test-body')
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('post_value', resp_headers['x-object-meta-test'])
        self.assertEqual('image/jpeg', resp_headers['content-type'])

        # delete the object
        client.delete_object(
            self.url, self.token, source_container, object_name)
        with self.assertRaises(ClientException) as cm:
            client.get_object(
                self.url, self.token, source_container, object_name)
        self.assertEqual(404, cm.exception.http_status)  # sanity check

        # cycle container-sync
        Manager(['container-sync']).once()

        # verify delete has been sync'd
        with self.assertRaises(ClientException) as cm:
            client.get_object(
                self.url, self.token, dest_container, object_name)
        self.assertEqual(404, cm.exception.http_status)  # sanity check
Ejemplo n.º 10
0
    def setUp(self):
        super(TestAccountReaper, self).setUp()
        self.all_objects = []
        # upload some containers
        body = 'test-body'
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            client.put_object(self.url, self.token, container, obj, body)
            self.all_objects.append((policy, container, obj))
            policy.load_ring('/etc/swift')

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)

        for node in nodes:
            direct_delete_account(node, part, self.account)
Ejemplo n.º 11
0
    def test_main(self):
        # Create container
        container = "container-%s" % uuid4()
        client.put_container(self.url, self.token, container)

        # Kill container servers excepting two of the primaries
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        kill_nonprimary_server(cnodes, self.ipport2server)
        kill_server((cnode["ip"], cnode["port"]), self.ipport2server)

        # Create container/obj
        obj = "object-%s" % uuid4()
        client.put_object(self.url, self.token, container, obj, "")

        # Restart other primary server
        start_server((cnode["ip"], cnode["port"]), self.ipport2server)

        # Assert it does not know about container/obj
        self.assertFalse(direct_client.direct_get_container(cnode, cpart, self.account, container)[1])

        # Run the object-updaters
        Manager(["object-updater"]).once()

        # Assert the other primary server now knows about container/obj
        objs = [o["name"] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]]
        self.assertIn(obj, objs)
Ejemplo n.º 12
0
    def test_raw_upload(self):
        # Raw upload happens when content_length is passed to put_object
        conn = c.http_connection(u'http://www.test.com/')
        resp = MockHttpResponse(status=200)
        conn[1].getresponse = resp.fake_response
        conn[1]._request = resp._fake_request
        astring = 'asdf'
        astring_len = len(astring)
        mock_file = six.StringIO(astring)

        c.put_object(url='http://www.test.com', http_conn=conn,
                     contents=mock_file, content_length=astring_len)
        self.assertTrue(isinstance(resp.requests_params['data'],
                                   swiftclient.utils.LengthWrapper))
        self.assertEqual(astring_len,
                         len(resp.requests_params['data'].read()))

        mock_file = six.StringIO(astring)
        c.put_object(url='http://www.test.com', http_conn=conn,
                     headers={'Content-Length': str(astring_len)},
                     contents=mock_file)
        self.assertTrue(isinstance(resp.requests_params['data'],
                                   swiftclient.utils.LengthWrapper))
        self.assertEqual(astring_len,
                         len(resp.requests_params['data'].read()))
Ejemplo n.º 13
0
def testTestStorlet(url, token):
    print "Deploying test storlet"
    put_storlet_object(url, 
                       token, 
                       TEST_STORLET_NAME,
                       "%s/TestStorlet/bin/" % PATH_TO_STORLETS,
                       '', 
                       'com.ibm.storlet.test.test1')

    print "uploading object to execute test upon"
    c.put_object(url,
                 token,
                 'myobjects',
                 'test_object',
                 'some content')
    print "Invoking test storlet to print"
    invokeTestStorlet(url, token, "print", False)
    print "Invoking test storlet to crash"
    invokeTestStorlet(url, token, "crash")
    print "Invoking test storlet to hold"
    invokeTestStorlet(url, token, "hold")
    print "Invoking test storlet to print"
    invokeTestStorlet(url, token, "print", False)
    print "Invoking test storlet in parallel to print"
    invokeTestStorletinParallel(url, token)
Ejemplo n.º 14
0
 def test_main(self):
     # Create container
     # Kill container servers excepting two of the primaries
     # Create container/obj
     # Restart other primary server
     # Assert it does not know about container/obj
     # Run the object-updaters
     # Assert the other primary server now knows about container/obj
     container = 'container-%s' % uuid4()
     client.put_container(self.url, self.token, container)
     cpart, cnodes = self.container_ring.get_nodes(self.account, container)
     cnode = cnodes[0]
     kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnode['port'], self.port2server, self.pids)
     obj = 'object-%s' % uuid4()
     client.put_object(self.url, self.token, container, obj, '')
     start_server(cnode['port'], self.port2server, self.pids)
     self.assert_(not direct_client.direct_get_container(
         cnode, cpart, self.account, container)[1])
     processes = []
     for node in xrange(1, 5):
         processes.append(Popen(['swift-object-updater',
                                 '/etc/swift/object-server/%d.conf' % node,
                                 'once']))
     for process in processes:
         process.wait()
     objs = [o['name'] for o in direct_client.direct_get_container(
         cnode, cpart, self.account, container)[1]]
     self.assert_(obj in objs)
Ejemplo n.º 15
0
    def test_delete_propogate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, self.container_name, self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)
        p_dev2 = self.device_dir('object', onodes[1])

        # PUT object
        contents = Body()
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name, contents=contents)

        # now lets shut one down
        self.kill_drive(p_dev2)

        # delete on the ones that are left
        client.delete_object(self.url, self.token,
                             self.container_name,
                             self.object_name)

        # spot check a node
        try:
            self.direct_get(onodes[0], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onodes[0],))

        # enable the first node again
        self.revive_drive(p_dev2)

        # propogate the delete...
        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) / 10
            self.reconstructor.once(number=hnode_id)

        # check the first node to make sure its gone
        try:
            self.direct_get(onodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onodes[0]))

        # make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onodes[0]))
Ejemplo n.º 16
0
    def test_sync_lazy_dkey(self):
        # Create synced containers, but with no key at dest
        source_container, dest_container =\
            self._setup_synced_containers('secret', None)

        # upload to source
        object_name = 'object-%s' % uuid.uuid4()
        client.put_object(self.url, self.token, source_container, object_name,
                          'test-body')

        # cycle container-sync, nothing should happen
        Manager(['container-sync']).once()
        with self.assertRaises(ClientException) as err:
            _junk, body = client.get_object(self.url, self.token,
                                            dest_container, object_name)
        self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND)

        # amend dest key
        dest_headers = {'X-Container-Sync-Key': 'secret'}
        client.put_container(self.url, self.token, dest_container,
                             headers=dest_headers)
        # cycle container-sync, should replicate
        Manager(['container-sync']).once()
        _junk, body = client.get_object(self.url, self.token,
                                        dest_container, object_name)
        self.assertEqual(body, 'test-body')
Ejemplo n.º 17
0
def get_segment_number(file_name, request, container, prefix=None):
    '''Return the segment number a given file should create next. If it is 0,
    create a pseudo folder for the file. The folder is created if it doesn't
    exist. '''

    if prefix:
        foldername = prefix + '/' + file_name + '_segments'
    else:
        foldername = file_name + '_segments'

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    foldername = os.path.normpath(foldername)
    foldername = foldername.strip('/')
    foldername += '/'

    content_type = 'application/directory'
    obj = None

    client.put_object(storage_url, auth_token, container, foldername, obj,
                      content_type=content_type)

    meta, objects = client.get_container(storage_url, auth_token, container,
                                         delimiter='/', prefix=foldername)

    pseudofolders, objs = pseudofolder_object_list(objects, prefix)

    return get_first_nonconsecutive(objs)
Ejemplo n.º 18
0
 def test_one_node_fails(self):
     # Create container1
     # Kill container1 servers excepting two of the primaries
     # Delete container1
     # Restart other container1 primary server
     # Create container1/object1 (allowed because at least server thinks the
     #   container exists)
     # Get to a final state
     # Assert all container1 servers indicate container1 is alive and
     #   well with object1
     # Assert account level also indicates container1 is alive and
     #   well with object1
     container1 = 'container-%s' % uuid4()
     cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
     client.put_container(self.url, self.token, container1)
     kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnodes[0]['port'], self.port2server, self.pids)
     client.delete_container(self.url, self.token, container1)
     start_server(cnodes[0]['port'], self.port2server, self.pids)
     client.put_object(self.url, self.token, container1, 'object1', '123')
     get_to_final_state()
     for cnode in cnodes:
         self.assertEquals(
             [o['name'] for o in direct_client.direct_get_container(
                 cnode, cpart, self.account, container1)[1]],
             ['object1'])
     headers, containers = client.get_account(self.url, self.token)
     self.assertEquals(headers['x-account-container-count'], '1')
     self.assertEquals(headers['x-account-object-count'], '1')
     self.assertEquals(headers['x-account-bytes-used'], '3')
Ejemplo n.º 19
0
def convert(vid_format, videos, local_file, container, storage_url, auth_token, this_id, id_original, c):
    temp_file = videos[vid_format]['temp_file']
    headers = _get_video_headers(storage_url, auth_token, container, this_id)
    conv = c.convert(local_file, temp_file, videos[vid_format]['options'])
    for ftimecode in conv:
        timecode = int(ftimecode * 100)
        if timecode == 100:
            break
        video_keys[id_original][vid_format + 'status'] = int(timecode)
        headers['X-Object-Meta-Status'] = timecode
        client.post_object(storage_url, auth_token, container, this_id, headers=headers)
        print "Converting %s (%f) ...%s\n" % (vid_format, timecode, local_file)
    if os.path.exists(temp_file):
        size = os.path.getsize(temp_file)
        print '\n\nSIZE: ', size, '\n\n'
    headers = _get_video_headers(storage_url, auth_token, container, this_id)
    headers['X-Object-Meta-Content-Type'] = 'video/' + vid_format
    headers['X-Object-Meta-Status'] = 100
    try:
        with open(temp_file, 'r+b') as f:
            client.put_object(storage_url, auth_token, container, this_id, f, headers=headers)
    except swift_exception.ClientException as e:
        storage_url, auth_token = _reauthorize(request)
        with open(temp_file, 'r+b') as f:
            client.put_object(storage_url, auth_token, container, this_id, f, headers=headers)
    video_keys[id_original][vid_format + 'status'] = 100
    if os.path.exists(temp_file):
        os.remove(temp_file)
def put_file_as_storlet_input_object(url, token, local_path, local_file):
    resp = dict()
    f = open("%s/%s" % (local_path, local_file), "r")
    c.put_object(url, token, "myobjects", local_file, f, content_type="application/octet-stream", response_dict=resp)
    f.close()
    status = resp.get("status")
    assert status == 200 or status == 201
Ejemplo n.º 21
0
def create_pseudofolder(request, container, prefix=None):
    """ Creates a pseudofolder (empty object of type application/directory) """
    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    form = PseudoFolderForm(request.POST)
    if form.is_valid():
        foldername = request.POST.get('foldername', None)
        if prefix:
            foldername = prefix + '/' + foldername
        foldername = os.path.normpath(foldername)
        foldername = foldername.strip('/')
        foldername += '/'

        content_type = 'application/directory'
        obj = None

        try:
            client.put_object(storage_url, auth_token,
                              container, foldername, obj,
                              content_type=content_type)
            messages.add_message(
                request,
                messages.SUCCESS,
                _(
                    "The folder " +
                    request.POST.get('foldername', None) + " was created.")
            )

        except client.ClientException:
            messages.add_message(request, messages.ERROR, _("Access denied."))

    return JsonResponse({})
Ejemplo n.º 22
0
 def test_main(self):
     container = 'container-%s' % uuid4()
     client.put_container(self.url, self.token, container)
     apart, anodes = self.account_ring.get_nodes(self.account)
     anode = anodes[0]
     cpart, cnodes = self.container_ring.get_nodes(self.account, container)
     cnode = cnodes[0]
     kill(self.pids[self.port2server[cnode['port']]], SIGTERM)
     obj = 'object-%s' % uuid4()
     client.put_object(self.url, self.token, container, obj, '')
     self.pids[self.port2server[cnode['port']]] = \
         Popen(['swift-container-server',
                '/etc/swift/container-server/%d.conf' %
                 ((cnode['port'] - 6001) / 10)]).pid
     sleep(2)
     self.assert_(not direct_client.direct_get_container(cnode, cpart,
                         self.account, container)[1])
     ps = []
     for n in xrange(1, 5):
         ps.append(Popen(['swift-object-updater',
                          '/etc/swift/object-server/%d.conf' % n, 'once']))
     for p in ps:
         p.wait()
     objs = [o['name'] for o in direct_client.direct_get_container(cnode,
                                 cpart, self.account, container)[1]]
     self.assert_(obj in objs)
Ejemplo n.º 23
0
 def _work(self, op):
     running_ops = self.outstanding
     self.outstanding += 1
     t1 = time()
     try:
         # client = Connection(authurl='http://192.168.16.12:8080/auth/v1.0', user='******', key='testing')
         # client.put_object(self.container,'%s-%s' % (self.base, op), self.data)
         put_object(self.storage_url, token=self.token, container=self.container, name='%s-%s' % (self.base, op), contents=self.data)
         t2 = time()
         elapsed = (t2-t1) * 1000
         self.outstanding -= 1
         LOG.info("Operation #%d took %.2f ms (%.2f MB/s, %d ops outstanding on arrival)" % (op, elapsed, (self.data_size / (1024.*1024)) / (t2 - t1) , running_ops))
         entry = { "Operation": op,
                   "Arrival-time": t1,
                   "Completion-time": t2,
                   "Elapsed": elapsed,
                   "Outstanding-on-arrival": running_ops,
                   "Outstanding-on-completion": self.outstanding }
         print '%d, %f, %f, %.2f, %d, %d' % (op, t1, t2, elapsed, running_ops, self.outstanding)
         return entry
     except KeyboardInterrupt:
         self.outstanding -= 1
         self.running = False
         return None
     except Exception:
         self.errors += 1
         raise
Ejemplo n.º 24
0
    def createCollection(self, name):
        """Create a pseudo-folder."""
        if self.path:
            tmp = self.path.split('/')
            name = '/'.join(tmp[2:]) + '/' + name
        name = name.strip('/')
        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name,
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        try:
            client.head_object(self.storage_url,
                               self.auth_token,
                               self.container,
                               name + '/',
                               http_conn=self.http_connection)
            raise dav_error.DAVError(dav_error.HTTP_METHOD_NOT_ALLOWED)
        except client.ClientException:
            pass

        client.put_object(self.storage_url,
                          self.auth_token,
                          self.container,
                          sanitize(name).rstrip('/') + '/',
                          content_type='application/directory',
                          http_conn=self.http_connection)
Ejemplo n.º 25
0
    def copyMoveSingle(self, destPath, isMove):
        src = '/'.join(self.path.split('/')[2:])
        dst = '/'.join(destPath.split('/')[2:])
        src_cont = self.path.split('/')[1]
        dst_cont = destPath.split('/')[1]

        if self.is_subdir(src):
            client.put_object(self.storage_url,
                              self.auth_token,
                              dst_cont,
                              dst.rstrip('/') + '/',
                              content_type='application/directory',
                              http_conn=self.http_connection)
            return

        headers = {'X-Copy-From': self.path}
        try:
            client.put_object(self.storage_url,
                              self.auth_token,
                              dst_cont,
                              dst,
                              headers=headers,
                              http_conn=self.http_connection)

            if isMove:
                client.delete_object(self.storage_url,
                                     self.auth_token,
                                     src_cont,
                                     src,
                                     http_conn=self.http_connection)
        except client.ClientException:
            pass
Ejemplo n.º 26
0
def create_pseudofolder(request, container, prefix=None):
    """ Creates a pseudofolder (empty object of type application/directory) """
    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    form = PseudoFolderForm(request.POST)
    if form.is_valid():
        foldername = request.POST.get('foldername', None)
        if prefix:
            foldername = prefix + '/' + foldername
        foldername = os.path.normpath(foldername)
        foldername = foldername.strip('/')
        foldername += '/'

        content_type = 'application/directory'
        obj = None

        try:
            client.put_object(storage_url, auth_token,
                              container, foldername, obj,
                              content_type=content_type)
            messages.add_message(request, messages.INFO,
                                 _("Pseudofolder created."))
        except client.ClientException:
            traceback.print_exc()
            messages.add_message(request, messages.ERROR, _("Access denied."))

        if prefix:
            return redirect(objectview, container=container, prefix=prefix)
        return redirect(objectview, container=container)

    return render_to_response('create_pseudofolder.html', {
                              'container': container,
                              'prefix': prefix,
                              }, context_instance=RequestContext(request))
Ejemplo n.º 27
0
    def ingest(self):
        ''' Upload a file when creating an id, generally used with swift'''

        put_container(self.url, self.token, self.swift_info["container"])

        put_object(self.url, token=self.token,
                container=self.swift_info["container"],
                name=self.swift_info["object"], contents=open(self.path))
Ejemplo n.º 28
0
def _small_file_to_swift(im, size, original_name, uid, storage_url, auth_token, response_dict):
    id_image = uuid.uuid1()
    im_type = im.format
    main_header = _make_header(False, im_type, original_name, size, 'original-thumbnail', None)
    with open('temp', 'r+') as f:
        client.put_object(storage_url, auth_token, image_container, str(id_image), f, headers=main_header)
    response_dict[uid] = {'original-thumb-key': id_image}
    return
Ejemplo n.º 29
0
    def test_no_content_type(self):
        conn = c.http_connection(u'http://www.test.com/')
        resp = MockHttpResponse(status=200)
        conn[1].getresponse = resp.fake_response
        conn[1]._request = resp._fake_request

        c.put_object(url='http://www.test.com', http_conn=conn)
        request_header = resp.requests_params['headers']
        self.assertEqual(request_header['content-type'], b'')
Ejemplo n.º 30
0
 def test_server_error(self):
     body = 'c' * 60
     c.http_connection = self.fake_http_connection(500, body=body)
     args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
     self.assertRaises(c.ClientException, c.put_object, *args)
     try:
         c.put_object(*args)
     except c.ClientException as e:
         self.assertEqual(e.http_response_content, body)
Ejemplo n.º 31
0
 def put_object(self, account_id, token, s_type, container, name, content):
     cnx = self.get_cnx(account_id, s_type)
     sclient.put_object("", token, container, name, content, http_conn=cnx)
Ejemplo n.º 32
0
    def test_update_during_POST_only(self):
        # verify correct update values when PUT update is missed but then a
        # POST update succeeds *before* the PUT async pending update is sent
        cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
        client.put_container(self.url,
                             self.token,
                             'c1',
                             headers={'X-Storage-Policy': self.policy.name})

        # put an object while one container server is stopped so that we force
        # an async update to it
        kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        content = u'stuff'
        client.put_object(self.url,
                          self.token,
                          'c1',
                          'o1',
                          contents=content,
                          content_type='test/ctype')
        meta = client.head_object(self.url, self.token, 'c1', 'o1')

        # re-start the container server and assert that it does not yet know
        # about the object
        start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
        self.assertFalse(
            direct_client.direct_get_container(cnodes[0], cpart, self.account,
                                               'c1')[1])

        int_client = self.make_internal_client()
        int_client.set_object_metadata(self.account, 'c1', 'o1',
                                       {'X-Object-Meta-Fruit': 'Tomato'})
        self.assertEqual('Tomato',
                         int_client.get_object_metadata(
                             self.account, 'c1',
                             'o1')['x-object-meta-fruit'])  # sanity

        # check the re-started container server got same update as others.
        # we cannot assert the actual etag value because it may be encrypted
        listing_etags = set()
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))

        # check that listing meta returned to client is consistent with object
        # meta returned to client
        hdrs, listing = client.get_container(self.url, self.token, 'c1')
        self.assertEqual(1, len(listing))
        self.assertEqual('o1', listing[0]['name'])
        self.assertEqual(len(content), listing[0]['bytes'])
        self.assertEqual(meta['etag'], listing[0]['hash'])
        self.assertEqual('test/ctype', listing[0]['content_type'])

        # Run the object-updaters to send the async pending from the PUT
        Manager(['object-updater']).once()

        # check container listing metadata is still correct
        for cnode in cnodes:
            listing = direct_client.direct_get_container(
                cnode, cpart, self.account, 'c1')[1]
            self.assertEqual(1, len(listing))
            self.assertEqual(len(content), listing[0]['bytes'])
            self.assertEqual('test/ctype', listing[0]['content_type'])
            listing_etags.add(listing[0]['hash'])
        self.assertEqual(1, len(listing_etags))
    def test_sync_unexpired_object_metadata(self):
        # verify that metadata can be sync'd to a frag that has missed a POST
        # and consequently that frag appears to be expired, when in fact the
        # POST removed the x-delete-at header
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers={'x-storage-policy': self.policy.name})
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        delete_at = int(time.time() + 3)
        contents = ('body-%s' % uuid.uuid4()).encode()
        headers = {'x-delete-at': delete_at}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          headers=headers,
                          contents=contents)
        # fail a primary
        post_fail_node = random.choice(onodes)
        post_fail_path = self.device_dir(post_fail_node)
        self.kill_drive(post_fail_path)
        # post over w/o x-delete-at
        client.post_object(self.url, self.token, self.container_name,
                           self.object_name, {'content-type': 'something-new'})
        # revive failed primary
        self.revive_drive(post_fail_path)
        # wait for the delete_at to pass, and check that it thinks the object
        # is expired
        timeout = time.time() + 5
        err = None
        while time.time() < timeout:
            try:
                direct_client.direct_head_object(
                    post_fail_node,
                    opart,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': int(self.policy)
                    })
            except direct_client.ClientException as client_err:
                if client_err.http_status != 404:
                    raise
                err = client_err
                break
            else:
                time.sleep(0.1)
        else:
            self.fail('Failed to get a 404 from node with expired object')
        self.assertEqual(err.http_status, 404)
        self.assertIn('X-Backend-Timestamp', err.http_headers)

        # but from the proxy we've got the whole story
        headers, body = client.get_object(self.url, self.token,
                                          self.container_name,
                                          self.object_name)
        self.assertNotIn('X-Delete-At', headers)
        self.reconstructor.once()

        # ... and all the nodes have the final unexpired state
        for node in onodes:
            headers = direct_client.direct_head_object(
                node,
                opart,
                self.account,
                self.container_name,
                self.object_name,
                headers={'X-Backend-Storage-Policy-Index': int(self.policy)})
            self.assertNotIn('X-Delete-At', headers)
Ejemplo n.º 34
0
    def test_main(self):
        # Create one account, container and object file.
        # Find node with account, container and object replicas.
        # Delete all directories and files from this node (device).
        # Wait 60 seconds and check replication results.
        # Delete directories and files in objects storage without
        # deleting file "hashes.pkl".
        # Check, that files not replicated.
        # Delete file "hashes.pkl".
        # Check, that all files were replicated.
        path_list = []
        data_dir = get_data_dir(self.policy)
        # Figure out where the devices are
        for node_id in range(1, 5):
            conf = readconf(self.configs['object-server'][node_id])
            device_path = conf['app:object-server']['devices']
            for dev in self.object_ring.devs:
                if dev['port'] == int(conf['app:object-server']['bind_port']):
                    device = dev['device']
            path_list.append(os.path.join(device_path, device))

        # Put data to storage nodes
        container = 'container-%s' % uuid4()
        client.put_container(self.url,
                             self.token,
                             container,
                             headers={'X-Storage-Policy': self.policy.name})

        obj = 'object-%s' % uuid4()
        client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # Get all data file information
        (files_list, dir_list) = collect_info(path_list)
        num = find_max_occupancy_node(dir_list)
        test_node = path_list[num]
        test_node_files_list = []
        for files in files_list[num]:
            if not files.endswith('.pending'):
                test_node_files_list.append(files)
        test_node_dir_list = []
        for d in dir_list[num]:
            if not d.startswith('tmp'):
                test_node_dir_list.append(d)
        # Run all replicators
        try:
            self.replicators.start()

            # Delete some files
            for directory in os.listdir(test_node):
                shutil.rmtree(os.path.join(test_node, directory))

            self.assertFalse(os.listdir(test_node))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                (new_files_list, new_dir_list) = collect_info([test_node])

                try:
                    # Check replicate files and dir
                    for files in test_node_files_list:
                        self.assertIn(files, new_files_list[0])

                    for dir in test_node_dir_list:
                        self.assertIn(dir, new_dir_list[0])
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)

            # Delete directories and files in objects storage without
            # deleting file "hashes.pkl".
            # Check, that files not replicated.
            for directory in os.listdir(os.path.join(test_node, data_dir)):
                for input_dir in os.listdir(
                        os.path.join(test_node, data_dir, directory)):
                    if os.path.isdir(
                            os.path.join(test_node, data_dir, directory,
                                         input_dir)):
                        shutil.rmtree(
                            os.path.join(test_node, data_dir, directory,
                                         input_dir))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                try:
                    for directory in os.listdir(
                            os.path.join(test_node, data_dir)):
                        for input_dir in os.listdir(
                                os.path.join(test_node, data_dir, directory)):
                            self.assertFalse(
                                os.path.isdir(
                                    os.path.join(test_node, data_dir,
                                                 directory, input_dir)))
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)

            # Now, delete file "hashes.pkl".
            # Check, that all files were replicated.
            for directory in os.listdir(os.path.join(test_node, data_dir)):
                os.remove(
                    os.path.join(test_node, data_dir, directory, 'hashes.pkl'))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                try:
                    (new_files_list, new_dir_list) = collect_info([test_node])

                    # Check replicate files and dirs
                    for files in test_node_files_list:
                        self.assertIn(files, new_files_list[0])

                    for directory in test_node_dir_list:
                        self.assertIn(directory, new_dir_list[0])
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)
        finally:
            self.replicators.stop()
Ejemplo n.º 35
0
 def test_query_string(self):
     c.http_connection = self.fake_http_connection(200,
                                                   query_string="hello=20")
     c.put_object('http://www.test.com', 'asdf', 'asdf', 'asdf',
                  query_string="hello=20")
 def test_ok(self):
     c.http_connection = self.fake_http_connection(200)
     args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
     value = c.put_object(*args)
     self.assertTrue(isinstance(value, basestring))
Ejemplo n.º 37
0
 def put_object(self, headers=None):
     """
     issue put for zero byte test object
     """
     client.put_object(self.url, self.token, self.container_name,
                       self.object_name, headers=headers)
Ejemplo n.º 38
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]

        # Kill one container/obj primary server
        kill_server(onode['port'], self.port2server, self.pids)

        # Delete the default data directory for objects on the primary server
        obj_dir = '%s/%s' % (self._get_objects_dir(onode),
                             get_data_dir(self.policy))
        shutil.rmtree(obj_dir, True)
        self.assertFalse(os.path.exists(obj_dir))

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Kill other two container/obj primary servers
        #  to ensure GET handoff works
        for node in onodes[1:]:
            kill_server(node['port'], self.port2server, self.pids)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server(node['port'], self.port2server, self.pids)
            self.assertFalse(os.path.exists(obj_dir))
            # We've indirectly verified the handoff node has the object, but
            # let's directly verify it.

        # Directly to handoff server assert we can get container/obj
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        timeout = time.time() + 5
        found_objs_on_cnode = []
        while time.time() < timeout:
            for cnode in [c for c in cnodes if cnodes not in
                          found_objs_on_cnode]:
                objs = [o['name'] for o in
                        direct_client.direct_get_container(
                            cnode, cpart, self.account, container)[1]]
                if obj in objs:
                    found_objs_on_cnode.append(cnode)
            if len(found_objs_on_cnode) >= len(cnodes):
                break
            time.sleep(0.3)
        if len(found_objs_on_cnode) < len(cnodes):
            missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
                       cnodes if cnode not in found_objs_on_cnode]
            raise Exception('Container servers %r did not know about object' %
                            missing)

        # Bring the first container/obj primary server back up
        start_server(onode['port'], self.port2server, self.pids)

        # Assert that it doesn't have container/obj yet
        self.assertFalse(os.path.exists(obj_dir))
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
            self.assertFalse(os.path.exists(obj_dir))
        else:
            self.fail("Expected ClientException but didn't get it")

        try:
            port_num = onode['replication_port']
        except KeyError:
            port_num = onode['port']
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']

        # Run object replication for first container/obj primary server
        num = (port_num - 6000) / 10
        Manager(['object-replicator']).once(number=num)

        # Run object replication for handoff node
        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(list(POLICIES))
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in POLICIES if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Ejemplo n.º 40
0
    def test_reconstruct_from_reverted_fragment_archive(self):
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)

        # find a primary server that only has one of it's devices in the
        # primary node list
        group_nodes_by_config = defaultdict(list)
        for n in onodes:
            group_nodes_by_config[self.config_number(n)].append(n)
        for config_number, node_list in group_nodes_by_config.items():
            if len(node_list) == 1:
                break
        else:
            self.fail('ring balancing did not use all available nodes')
        primary_node = node_list[0]

        # ... and 507 it's device
        primary_device = self.device_dir(primary_node)
        self.kill_drive(primary_device)

        # PUT object
        contents = Body()
        etag = client.put_object(self.url,
                                 self.token,
                                 self.container_name,
                                 self.object_name,
                                 contents=contents)
        self.assertEqual(contents.etag, etag)

        # fix the primary device and sanity GET
        self.revive_drive(primary_device)
        _headers, actual_etag = self.proxy_get()
        self.assertEqual(etag, actual_etag)

        # find a handoff holding the fragment
        for hnode in self.object_ring.get_more_nodes(opart):
            try:
                _hdrs, reverted_fragment_etag = self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                if err.http_status != 404:
                    raise
            else:
                break
        else:
            self.fail('Unable to find handoff fragment!')

        # we'll force the handoff device to revert instead of potentially
        # racing with rebuild by deleting any other fragments that may be on
        # the same server
        handoff_fragment_etag = None
        for node in onodes:
            if self.is_local_to(node, hnode):
                # we'll keep track of the etag of this fragment we're removing
                # in case we need it later (queue forshadowing music)...
                try:
                    _hdrs, handoff_fragment_etag = self.direct_get(node, opart)
                except direct_client.DirectClientException as err:
                    if err.http_status != 404:
                        raise
                    # this just means our handoff device was on the same
                    # machine as the primary!
                    continue
                # use the primary nodes device - not the hnode device
                part_dir = self.storage_dir(node, part=opart)
                shutil.rmtree(part_dir, True)

        # revert from handoff device with reconstructor
        self.reconstructor.once(number=self.config_number(hnode))

        # verify fragment reverted to primary server
        self.assertEqual(reverted_fragment_etag,
                         self.direct_get(primary_node, opart)[1])

        # now we'll remove some data on one of the primary node's partners
        partner = random.choice(
            reconstructor._get_partners(primary_node['index'], onodes))

        try:
            _hdrs, rebuilt_fragment_etag = self.direct_get(partner, opart)
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
            # partner already had it's fragment removed
            if (handoff_fragment_etag is not None
                    and self.is_local_to(hnode, partner)):
                # oh, well that makes sense then...
                rebuilt_fragment_etag = handoff_fragment_etag
            else:
                # I wonder what happened?
                self.fail('Partner inexplicably missing fragment!')
        part_dir = self.storage_dir(partner, part=opart)
        shutil.rmtree(part_dir, True)

        # sanity, it's gone
        try:
            self.direct_get(partner, opart)
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
        else:
            self.fail('successful GET of removed partner fragment archive!?')

        # and force the primary node to do a rebuild
        self.reconstructor.once(number=self.config_number(primary_node))

        # and validate the partners rebuilt_fragment_etag
        try:
            self.assertEqual(rebuilt_fragment_etag,
                             self.direct_get(partner, opart)[1])
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
            else:
                self.fail('Did not find rebuilt fragment on partner node')
Ejemplo n.º 41
0
def upload(request, container, prefix=None):
    """ Display upload form using swift formpost """

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    #redirect_url = get_base_url(request)
    redirect_url = reverse('objectview', kwargs={
        'container': container,
    })

    swift_url = storage_url + '/' + container + '/'
    if prefix:
        swift_url += prefix
        redirect_url += prefix

    url_parts = urlparse.urlparse(swift_url)
    path = url_parts.path

    max_file_size = 5 * 1024 * 1024 * 1024
    max_file_count = 1
    expires = int(time.time() + 15 * 60)
    key = get_temp_key(storage_url, auth_token)
    if not key:
        messages.add_message(request, messages.ERROR, _("Access denied."))
        if prefix:
            return redirect(objectview, container=container, prefix=prefix)
        else:
            return redirect(objectview, container=container)

    hmac_body = '%s\n%s\n%s\n%s\n%s' % (path, redirect_url, max_file_size,
                                        max_file_count, expires)
    signature = hmac.new(str(key), str(hmac_body), sha1).hexdigest()

    prefixes = prefix_list(prefix)

    if request.method == 'POST':
        post_file = request.FILES.values().pop()
        if prefix:
            post_file_name = prefix + post_file.name
        else:
            post_file_name = post_file.name
        try:
            client.put_object(storage_url, auth_token, container,
                              post_file_name, post_file)
            if prefix:
                return redirect(objectview, container=container, prefix=prefix)
            else:
                return redirect(objectview, container=container)
        except client.ClientException:
            traceback.print_exc()
            messages.add_message(request, messages.ERROR, _("Access denied."))

    return render_to_response('upload_form.html', {
        'swift_url': swift_url,
        'redirect_url': redirect_url,
        'max_file_size': max_file_size,
        'max_file_count': max_file_count,
        'expires': expires,
        'signature': signature,
        'container': container,
        'prefix': prefix,
        'prefixes': prefixes,
    },
                              context_instance=RequestContext(request))
Ejemplo n.º 42
0
 def put_object(self, container, obj_name, content=None, content_type=None, *args):
     return swift_client.put_object(self.storage_url, self.token, container, obj_name, content, content_type, *args)
Ejemplo n.º 43
0
    def test_handoff_non_durable(self):
        # verify that reconstructor reverts non-durable frags from handoff to
        # primary (and also durable frag of same object on same handoff) and
        # cleans up non-durable data files on handoffs after revert
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        pdevs = [self.device_dir(onode) for onode in onodes]
        hnodes = list(
            itertools.islice(self.object_ring.get_more_nodes(opart), 2))

        # kill a primary nodes so we can force data onto a handoff
        self.kill_drive(pdevs[0])

        # PUT object at t1
        contents = Body(total=3.5 * 2**20)
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents,
                          headers=headers)
        client.post_object(self.url,
                           self.token,
                           self.container_name,
                           self.object_name,
                           headers=headers_post)
        # (Some versions of?) swiftclient will mutate the headers dict on post
        headers_post.pop('X-Auth-Token', None)

        # this primary can't serve the data; we expect 507 here and not 404
        # because we're using mount_check to kill nodes
        self.assert_direct_get_fails(onodes[0], opart, 507)
        # these primaries and first handoff do have the data
        for onode in (onodes[1:]):
            self.assert_direct_get_succeeds(onode, opart)
        _hdrs, older_frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart)
        self.assert_direct_get_fails(hnodes[1], opart, 404)

        # make sure we can GET the object; there's 5 primaries and 1 handoff
        headers, older_obj_etag = self.proxy_get()
        self.assertEqual(contents.etag, older_obj_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))

        # PUT object at t2; make all frags non-durable so that the previous
        # durable frags at t1 remain on object server; use InternalClient so
        # that x-backend-no-commit is passed through
        internal_client = self.make_internal_client()
        contents2 = Body(total=2.5 * 2**20)  # different content
        self.assertNotEqual(contents2.etag, older_obj_etag)  # sanity check
        headers = {
            'x-backend-no-commit': 'True',
            'x-object-meta-bar': 'meta-bar-new'
        }
        internal_client.upload_object(contents2, self.account,
                                      self.container_name.decode('utf8'),
                                      self.object_name.decode('utf8'), headers)
        # GET should still return the older durable object
        headers, obj_etag = self.proxy_get()
        self.assertEqual(older_obj_etag, obj_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
        # on handoff we have older durable and newer non-durable
        _hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        _hdrs, newer_frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart, require_durable=False)
        self.assertNotEqual(older_frag_etag, newer_frag_etag)

        # now make all the newer frags durable only on the 5 primaries
        self.assertEqual(5, self.make_durable(onodes[1:], opart))
        # now GET will return the newer object
        headers, newer_obj_etag = self.proxy_get()
        self.assertEqual(contents2.etag, newer_obj_etag)
        self.assertNotEqual(older_obj_etag, newer_obj_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # fix the 507'ing primary
        self.revive_drive(pdevs[0])

        # fire up reconstructor on handoff node only; nondurable_purge_delay is
        # set to zero to ensure the nondurable handoff frag is purged
        hnode_id = (hnodes[0]['port'] % 100) // 10
        self.run_custom_daemon(ObjectReconstructor, 'object-reconstructor',
                               hnode_id, {'nondurable_purge_delay': '0'})

        # primary now has only the newer non-durable frag
        self.assert_direct_get_fails(onodes[0], opart, 404)
        _hdrs, frag_etag = self.assert_direct_get_succeeds(
            onodes[0], opart, require_durable=False)
        self.assertEqual(newer_frag_etag, frag_etag)

        # handoff has only the older durable
        _hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        headers, frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart, require_durable=False)
        self.assertEqual(older_frag_etag, frag_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))

        # fire up reconstructor on handoff node only, again
        self.reconstructor.once(number=hnode_id)

        # primary now has the newer non-durable frag and the older durable frag
        headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
        headers, frag_etag = self.assert_direct_get_succeeds(
            onodes[0], opart, require_durable=False)
        self.assertEqual(newer_frag_etag, frag_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # handoff has nothing
        self.assert_direct_get_fails(hnodes[0],
                                     opart,
                                     404,
                                     require_durable=False)

        # kill all but first two primaries
        for pdev in pdevs[2:]:
            self.kill_drive(pdev)
        # fire up reconstructor on the remaining primary[1]; without the
        # other primaries, primary[1] cannot rebuild the frag but it can let
        # primary[0] know that its non-durable frag can be made durable
        self.reconstructor.once(number=self.config_number(onodes[1]))

        # first primary now has a *durable* *newer* frag - it *was* useful to
        # sync the non-durable!
        headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
        self.assertEqual(newer_frag_etag, frag_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # revive primaries (in case we want to debug)
        for pdev in pdevs[2:]:
            self.revive_drive(pdev)
Ejemplo n.º 44
0
    def test_delete_propagate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = list(
            itertools.islice(self.object_ring.get_more_nodes(opart), 2))

        # PUT object
        contents = Body()
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents)

        # now lets shut down a couple of primaries
        failed_nodes = random.sample(onodes, 2)
        for node in failed_nodes:
            self.kill_drive(self.device_dir(node))

        # Write tombstones over the nodes that are still online
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # spot check the primary nodes that are still online
        delete_timestamp = None
        for node in onodes:
            if node in failed_nodes:
                continue
            try:
                self.direct_get(node, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
                delete_timestamp = err.http_headers['X-Backend-Timestamp']
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (node, ))

        # run the reconstructor on the handoff node multiple times until
        # tombstone is pushed out - each handoff node syncs to a few
        # primaries each time
        iterations = 0
        while iterations < 52:
            self.reconstructor.once(number=self.config_number(hnodes[0]))
            iterations += 1
            # see if the tombstone is reverted
            try:
                self.direct_get(hnodes[0], opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
                if 'X-Backend-Timestamp' not in err.http_headers:
                    # this means the tombstone is *gone* so it's reverted
                    break
        else:
            self.fail('Still found tombstone on %r after %s iterations' %
                      (hnodes[0], iterations))

        # tombstone is still on the *second* handoff
        try:
            self.direct_get(hnodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertEqual(err.http_headers['X-Backend-Timestamp'],
                             delete_timestamp)
        else:
            self.fail('Found obj data on %r' % hnodes[1])

        # repair the primaries
        self.revive_drive(self.device_dir(failed_nodes[0]))
        self.revive_drive(self.device_dir(failed_nodes[1]))

        # run reconstructor on second handoff
        self.reconstructor.once(number=self.config_number(hnodes[1]))

        # verify tombstone is reverted on the first pass
        try:
            self.direct_get(hnodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertNotIn('X-Backend-Timestamp', err.http_headers)
        else:
            self.fail('Found obj data on %r' % hnodes[1])

        # sanity make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destroyed!' % (onodes[0]))
Ejemplo n.º 45
0
    def test_slo_async_delete(self):
        if not self.cluster_info.get('slo', {}).get('allow_async_delete'):
            raise unittest.SkipTest('allow_async_delete not enabled')

        segment_container = self.container_name + '_segments'
        client.put_container(self.url, self.token, self.container_name, {})
        client.put_container(self.url, self.token, segment_container, {})
        client.put_object(self.url, self.token, segment_container, 'segment_1',
                          b'1234')
        client.put_object(self.url, self.token, segment_container, 'segment_2',
                          b'5678')
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'slo',
                          json.dumps([
                              {
                                  'path': segment_container + '/segment_1'
                              },
                              {
                                  'data': 'Cg=='
                              },
                              {
                                  'path': segment_container + '/segment_2'
                              },
                          ]),
                          query_string='multipart-manifest=put')
        _, body = client.get_object(self.url, self.token, self.container_name,
                                    'slo')
        self.assertEqual(body, b'1234\n5678')

        client.delete_object(
            self.url,
            self.token,
            self.container_name,
            'slo',
            query_string='multipart-manifest=delete&async=true')

        # Object's deleted
        _, objects = client.get_container(self.url, self.token,
                                          self.container_name)
        self.assertEqual(objects, [])
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, self.container_name, 'slo')
        self.assertEqual(404, caught.exception.http_status)

        # But segments are still around and accessible
        _, objects = client.get_container(self.url, self.token,
                                          segment_container)
        self.assertEqual([o['name'] for o in objects],
                         ['segment_1', 'segment_2'])
        _, body = client.get_object(self.url, self.token, segment_container,
                                    'segment_1')
        self.assertEqual(body, b'1234')
        _, body = client.get_object(self.url, self.token, segment_container,
                                    'segment_2')
        self.assertEqual(body, b'5678')

        # make sure auto-created expirer-queue containers get in the account
        # listing so the expirer can find them
        Manager(['container-updater']).once()
        self.expirer.once()

        # Now the expirer has cleaned up the segments
        _, objects = client.get_container(self.url, self.token,
                                          segment_container)
        self.assertEqual(objects, [])
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, segment_container,
                              'segment_1')
        self.assertEqual(404, caught.exception.http_status)
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, segment_container,
                              'segment_2')
        self.assertEqual(404, caught.exception.http_status)
Ejemplo n.º 46
0
    def test_main(self):
        # Create container1 and container2
        # Assert account level sees them
        # Create container2/object1
        # Assert account level doesn't see it yet
        # Get to final state
        # Assert account level now sees the container2/object1
        # Kill account servers excepting two of the primaries
        # Delete container1
        # Assert account level knows container1 is gone but doesn't know about
        #   container2/object2 yet
        # Put container2/object2
        # Run container updaters
        # Assert account level now knows about container2/object2
        # Restart other primary account server
        # Assert that server doesn't know about container1's deletion or the
        #   new container2/object2 yet
        # Get to final state
        # Assert that server is now up to date

        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        client.put_object(self.url, self.token, container2, 'object1', '1234')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill_nonprimary_server(anodes, self.port2server, self.pids)
        kill_server(anodes[0]['port'], self.port2server, self.pids)

        client.delete_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container2, 'object2', '12345')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        processes = []
        for node in xrange(1, 5):
            processes.append(
                Popen([
                    'swift-container-updater',
                    '/etc/swift/container-server/%d.conf' % node, 'once'
                ]))
        for process in processes:
            process.wait()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        start_server(anodes[0]['port'], self.port2server, self.pids)

        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
Ejemplo n.º 47
0
    def test_revert_object(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, self.container_name, self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)

        # kill 2 a parity count number of primary nodes so we can
        # force data onto handoffs, we do that by renaming dev dirs
        # to induce 507
        p_dev1 = self.device_dir('object', onodes[0])
        p_dev2 = self.device_dir('object', onodes[1])
        self.kill_drive(p_dev1)
        self.kill_drive(p_dev2)

        # PUT object
        contents = Body()
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name, contents=contents,
                          headers=headers)
        client.post_object(self.url, self.token, self.container_name,
                           self.object_name, headers=headers_post)
        del headers_post['X-Auth-Token']  # WTF, where did this come from?

        # these primaries can't servce the data any more, we expect 507
        # here and not 404 because we're using mount_check to kill nodes
        for onode in (onodes[0], onodes[1]):
            try:
                self.direct_get(onode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 507)
            else:
                self.fail('Node data on %r was not fully destoryed!' %
                          (onode,))

        # now take out another primary
        p_dev3 = self.device_dir('object', onodes[2])
        self.kill_drive(p_dev3)

        # this node can't servce the data any more
        try:
            self.direct_get(onodes[2], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 507)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onode,))

        # make sure we can still GET the object and its correct
        # we're now pulling from handoffs and reconstructing
        etag = self.proxy_get()
        self.assertEqual(etag, contents.etag)

        # rename the dev dirs so they don't 507 anymore
        self.revive_drive(p_dev1)
        self.revive_drive(p_dev2)
        self.revive_drive(p_dev3)

        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) / 10
            self.reconstructor.once(number=hnode_id)

        # first three primaries have data again
        for onode in (onodes[0], onodes[2]):
            self.direct_get(onode, opart)

        # check meta
        meta = client.head_object(self.url, self.token,
                                  self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertTrue(key in meta)
            self.assertEqual(meta[key], headers_post[key])

        # handoffs are empty
        for hnode in hnodes:
            try:
                self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
            else:
                self.fail('Node data on %r was not fully destoryed!' %
                          (hnode,))
    def test_main(self):
        # Create container1 and container2
        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)

        # Assert account level sees them
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        # Create container2/object1
        client.put_object(self.url, self.token, container2, 'object1', '1234')

        # Assert account level doesn't see it yet
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        # Get to final state
        self.get_to_final_state()

        # Assert account level now sees the container2/object1
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill_nonprimary_server(anodes, self.port2server, self.pids)
        kill_server(anodes[0]['port'], self.port2server, self.pids)
        # Kill account servers excepting two of the primaries

        # Delete container1
        client.delete_container(self.url, self.token, container1)

        # Put container2/object2
        client.put_object(self.url, self.token, container2, 'object2', '12345')

        # Assert account level knows container1 is gone but doesn't know about
        #   container2/object2 yet
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        # Run container updaters
        Manager(['container-updater']).once()

        # Assert account level now knows about container2/object2
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        # Restart other primary account server
        start_server(anodes[0]['port'], self.port2server, self.pids)

        # Assert that server doesn't know about container1's deletion or the
        #   new container2/object2 yet
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        # Get to final state
        self.get_to_final_state()

        # Assert that server is now up to date
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
Ejemplo n.º 49
0
    def test_missing_container(self):
        # In this test, we need to put container at handoff devices, so we
        # need container devices more than replica count
        if len(self.container_ring.devs) <= self.container_ring.replica_count:
            raise SkipTest("Need devices more that replica count")

        container = 'container-%s' % uuid4()
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)

        # Kill all primary container servers
        for cnode in cnodes:
            kill_server((cnode['ip'], cnode['port']), self.ipport2server)

        # Create container, and all of its replicas are placed at handoff
        # device
        try:
            client.put_container(self.url, self.token, container)
        except ClientException as err:
            # if the cluster doesn't have enough devices, swift may return
            # error (ex. When we only have 4 devices in 3-replica cluster).
            self.assertEqual(err.http_status, 503)

        # Assert handoff device has a container replica
        another_cnode = next(self.container_ring.get_more_nodes(cpart))
        direct_client.direct_get_container(another_cnode, cpart, self.account,
                                           container)

        # Restart all primary container servers
        for cnode in cnodes:
            start_server((cnode['ip'], cnode['port']), self.ipport2server)

        # Create container/obj
        obj = 'object-%s' % uuid4()
        client.put_object(self.url, self.token, container, obj, '')

        # Run the object-updater
        Manager(['object-updater']).once()

        # Run the container-replicator, and now, container replicas
        # at handoff device get moved to primary servers
        Manager(['container-replicator']).once()

        # Assert container replicas in primary servers, just moved by
        # replicator don't know about the object
        for cnode in cnodes:
            self.assertFalse(
                direct_client.direct_get_container(cnode, cpart, self.account,
                                                   container)[1])

        # since the container is empty - we can delete it!
        client.delete_container(self.url, self.token, container)

        # Re-run the object-updaters and now container replicas in primary
        # container servers should get updated
        Manager(['object-updater']).once()

        # Assert all primary container servers know about container/obj
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            self.assertIn(obj, objs)
Ejemplo n.º 50
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url,
                             self.token,
                             container,
                             headers={'X-Storage-Policy': self.policy.name})

        # Kill one container/obj primary server
        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                   obj)
        onode = onodes[0]
        kill_server((onode['ip'], onode['port']), self.ipport2server,
                    self.pids)

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Kill other two container/obj primary servers
        #   to ensure GET handoff works
        for node in onodes[1:]:
            kill_server((node['ip'], node['port']), self.ipport2server,
                        self.pids)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server((node['ip'], node['port']), self.ipport2server,
                         self.pids)

        # We've indirectly verified the handoff node has the container/object,
        #   but let's directly verify it.
        another_onode = next(self.object_ring.get_more_nodes(opart))
        odata = direct_client.direct_get_object(
            another_onode,
            opart,
            self.account,
            container,
            obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert container listing (via proxy and directly) has container/obj
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))

        # Bring the first container/obj primary server back up
        start_server((onode['ip'], onode['port']), self.ipport2server,
                     self.pids)

        # Assert that it doesn't have container/obj yet
        try:
            direct_client.direct_get_object(
                onode,
                opart,
                self.account,
                container,
                obj,
                headers={'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']
        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode,
            opart,
            self.account,
            container,
            obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode,
                opart,
                self.account,
                container,
                obj,
                headers={'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Kill the first container/obj primary server again (we have two
        #   primaries and the handoff up now)
        kill_server((onode['ip'], onode['port']), self.ipport2server,
                    self.pids)

        # Delete container/obj
        try:
            client.delete_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            if self.object_ring.replica_count > 2:
                raise
            # Object DELETE returning 503 for (404, 204)
            # remove this with fix for
            # https://bugs.launchpad.net/swift/+bug/1318375
            self.assertEqual(503, err.http_status)

        # Assert we can't head container/obj
        try:
            client.head_object(self.url, self.token, container, obj)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Assert container/obj is not in the container listing, both indirectly
        #   and directly
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))

        # Restart the first container/obj primary server again
        start_server((onode['ip'], onode['port']), self.ipport2server,
                     self.pids)

        # Assert it still has container/obj
        direct_client.direct_get_object(
            onode,
            opart,
            self.account,
            container,
            obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})

        # Run object replication, ensuring we run the handoff node last so it
        #   will remove its extra handoff partition
        for node in onodes:
            try:
                port_num = node['replication_port']
            except KeyError:
                port_num = node['port']
            node_id = (port_num - 6000) / 10
            Manager(['object-replicator']).once(number=node_id)
        another_node_id = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_node_id)

        # Assert primary node no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode,
                opart,
                self.account,
                container,
                obj,
                headers={'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
Ejemplo n.º 51
0
    def _test_main(self, cancel=False):
        container = 'container-%s' % uuid4()
        obj = 'object-%s' % uuid4()
        obj2 = 'object-%s' % uuid4()

        # Create container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container, headers=headers)

        # Create a new object
        client.put_object(self.url, self.token, container, obj, self.data)
        client.head_object(self.url, self.token, container, obj)

        # Prepare partition power increase
        builder = RingBuilder.load(self.builder_file)
        builder.prepare_increase_partition_power()
        builder.save(self.builder_file)
        ring_data = builder.get_ring()
        ring_data.save(self.ring_file)

        # Ensure the proxy uses the changed ring
        Manager(['proxy']).restart()

        # Ensure object is still accessible
        client.head_object(self.url, self.token, container, obj)

        # Relink existing objects
        for device in self.devices:
            self.assertEqual(0, relink(skip_mount_check=True, devices=device))

        # Create second object after relinking and ensure it is accessible
        client.put_object(self.url, self.token, container, obj2, self.data)
        client.head_object(self.url, self.token, container, obj2)

        # Remember the original object locations
        org_locations = self._find_objs_ondisk(container, obj)
        org_locations += self._find_objs_ondisk(container, obj2)

        # Remember the new object locations
        new_locations = []
        for loc in org_locations:
            new_locations.append(
                replace_partition_in_path(str(loc),
                                          self.object_ring.part_power + 1))

        # Overwrite existing object - to ensure that older timestamp files
        # will be cleaned up properly later
        client.put_object(self.url, self.token, container, obj, self.data)

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Increase partition power
        builder = RingBuilder.load(self.builder_file)
        if not cancel:
            builder.increase_partition_power()
        else:
            builder.cancel_increase_partition_power()
        builder.save(self.builder_file)
        ring_data = builder.get_ring()
        ring_data.save(self.ring_file)

        # Ensure the proxy uses the changed ring
        Manager(['proxy']).restart()

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Overwrite existing object - to ensure that older timestamp files
        # will be cleaned up properly later
        client.put_object(self.url, self.token, container, obj, self.data)

        # Cleanup old objects in the wrong location
        for device in self.devices:
            self.assertEqual(0, cleanup(skip_mount_check=True, devices=device))

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Ensure data in old or relinked object locations is removed
        if not cancel:
            for fn in org_locations:
                self.assertFalse(os.path.exists(fn))
        else:
            for fn in new_locations:
                self.assertFalse(os.path.exists(fn))
Ejemplo n.º 52
0
    def test_ec_handoff_overwrite(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             container_name,
                             headers=headers)

        # PUT object
        old_contents = Body()
        client.put_object(self.url,
                          self.token,
                          container_name,
                          object_name,
                          contents=old_contents)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   container_name, object_name)

        # shutdown one of the primary data nodes
        failed_primary = random.choice(onodes)
        failed_primary_device_path = self.device_dir('object', failed_primary)
        self.kill_drive(failed_primary_device_path)

        # overwrite our object with some new data
        new_contents = Body()
        client.put_object(self.url,
                          self.token,
                          container_name,
                          object_name,
                          contents=new_contents)
        self.assertNotEqual(new_contents.etag, old_contents.etag)

        # restore failed primary device
        self.revive_drive(failed_primary_device_path)

        # sanity - failed node has old contents
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(failed_primary,
                                                   opart,
                                                   self.account,
                                                   container_name,
                                                   object_name,
                                                   headers=req_headers)
        self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
                         old_contents.etag)

        # we have 1 primary with wrong old etag, and we should have 5 with
        # new etag plus a handoff with the new etag, so killing 2 other
        # primaries forces proxy to try to GET from all primaries plus handoff.
        other_nodes = [n for n in onodes if n != failed_primary]
        random.shuffle(other_nodes)
        for node in other_nodes[:2]:
            self.kill_drive(self.device_dir('object', node))

        # sanity, after taking out two primaries we should be down to
        # only four primaries, one of which has the old etag - but we
        # also have a handoff with the new etag out there
        found_frags = defaultdict(int)
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        for node in onodes + list(self.object_ring.get_more_nodes(opart)):
            try:
                headers = direct_client.direct_head_object(node,
                                                           opart,
                                                           self.account,
                                                           container_name,
                                                           object_name,
                                                           headers=req_headers)
            except Exception:
                continue
            found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
        self.assertEqual(
            found_frags,
            {
                new_contents.etag: 4,  # this should be enough to rebuild!
                old_contents.etag: 1,
            })

        # clear node error limiting
        Manager(['proxy']).restart()

        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, new_contents.etag)
    def test_reconcile_symlink(self):
        if 'symlink' not in self.cluster_info:
            raise unittest.SkipTest("Symlink not enabled in proxy; can't test "
                                    "symlink reconciliation")
        wrong_policy = random.choice(ENABLED_POLICIES)
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some target data
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'target',
                          contents=b'this is the target data')

        # write the symlink
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'symlink',
                          headers={
                              'X-Symlink-Target':
                              '%s/target' % self.container_name,
                              'Content-Type': 'application/symlink',
                          })

        # at this point we have a broken symlink (the container_info has the
        # proxy looking for the target in the wrong policy)
        with self.assertRaises(ClientException) as ctx:
            client.get_object(self.url, self.token, self.container_name,
                              'symlink')
        self.assertEqual(ctx.exception.http_status, 404)

        # of course the symlink itself is fine
        metadata, body = client.get_object(self.url,
                                           self.token,
                                           self.container_name,
                                           'symlink',
                                           query_string='symlink=get')
        self.assertEqual(metadata['x-symlink-target'],
                         '%s/target' % self.container_name)
        self.assertEqual(metadata['content-type'], 'application/symlink')
        self.assertEqual(body, b'')
        # ... although in the wrong policy
        object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)

        # let the reconciler run
        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # now the symlink works
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink')
        self.assertEqual(body, b'this is the target data')
        # and it's in the correct policy
        object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)
Ejemplo n.º 54
0
    def test_sync_with_stale_container_rows(self):
        source_container, dest_container = self._setup_synced_containers()
        brain = BrainSplitter(self.url, self.token, source_container,
                              None, 'container')

        # upload to source
        object_name = 'object-%s' % uuid.uuid4()
        client.put_object(self.url, self.token, source_container, object_name,
                          'test-body')

        # check source container listing
        _, listing = client.get_container(
            self.url, self.token, source_container)
        for expected_obj_dict in listing:
            if expected_obj_dict['name'] == object_name:
                break
        else:
            self.fail('Failed to find source object %r in container listing %r'
                      % (object_name, listing))

        # stop all container servers
        brain.stop_primary_half()
        brain.stop_handoff_half()

        # upload new object content to source - container updates will fail
        client.put_object(self.url, self.token, source_container, object_name,
                          'new-test-body')
        source_headers = client.head_object(
            self.url, self.token, source_container, object_name)

        # start all container servers
        brain.start_primary_half()
        brain.start_handoff_half()

        # sanity check: source container listing should not have changed
        _, listing = client.get_container(
            self.url, self.token, source_container)
        for actual_obj_dict in listing:
            if actual_obj_dict['name'] == object_name:
                self.assertDictEqual(expected_obj_dict, actual_obj_dict)
                break
        else:
            self.fail('Failed to find source object %r in container listing %r'
                      % (object_name, listing))

        # cycle container-sync - object should be correctly sync'd despite
        # stale info in container row
        Manager(['container-sync']).once()

        # verify sync'd object has same content and headers
        dest_headers, body = client.get_object(self.url, self.token,
                                               dest_container, object_name)
        self.assertEqual(body, 'new-test-body')
        mismatched_headers = []
        for k in ('etag', 'content-length', 'content-type', 'x-timestamp',
                  'last-modified'):
            if source_headers[k] == dest_headers[k]:
                continue
            mismatched_headers.append((k, source_headers[k], dest_headers[k]))
        if mismatched_headers:
            msg = '\n'.join([('Mismatched header %r, expected %r but got %r'
                              % item) for item in mismatched_headers])
            self.fail(msg)
Ejemplo n.º 55
0
    def invoke_storlet(self,
                       op,
                       params=None,
                       global_params=None,
                       header_parameters=False):
        headers = {'X-Run-Storlet': self.storlet_name}
        headers.update(self.additional_headers)
        if params is not None:
            if header_parameters:
                querystring = None
                count = 1
                for key in params:
                    headers['X-Storlet-Parameter-' + str(count)] = \
                        key + ':' + params[key]
                    count = count + 1
            else:
                querystring = '&'.join(
                    ['%s=%s' % (k, v) for k, v in params.items()])
        else:
            querystring = None

        if op == 'GET':
            # Get original object
            original_h, original_c = c.get_object(self.url,
                                                  self.token,
                                                  self.container,
                                                  self.storlet_file,
                                                  response_dict=dict())
            # print original_headers
            file_length = int(original_h['content-length'])
            processed_h, returned_c = c.get_object(self.url,
                                                   self.token,
                                                   self.container,
                                                   self.storlet_file,
                                                   query_string=querystring,
                                                   response_dict=dict(),
                                                   headers=headers,
                                                   resp_chunk_size=file_length)
            processed_c = b''
            for chunk in returned_c:
                if chunk:
                    processed_c += chunk

            if params is not None and params.get('execute', None) is not None:
                mdv = processed_h['X-Object-Meta-Execution-Result'.lower()]
                self.assertEqual('42', mdv)
            if params is not None and params.get('double', None) == 'true':
                self.assertEqual(original_c, processed_c[:file_length])
                self.assertEqual(original_c, processed_c[file_length:])
            else:
                self.assertEqual(original_c, processed_c)
            self.assertEqual(original_h['X-Object-Meta-Testkey'.lower()],
                             processed_h['X-Object-Meta-Testkey'.lower()])

        if op == 'PUT':
            # PUT a random file
            response = dict()
            uploaded_c = ''.join(
                random.choice(string.ascii_uppercase + string.digits)
                for _ in range(1024)).encode('ascii')
            random_md = ''.join(
                random.choice(string.ascii_uppercase + string.digits)
                for _ in range(32))
            content_length = None
            headers['X-Object-Meta-Testkey'] = random_md
            c.put_object(self.url, self.token, self.container,
                         'identity_random_source', uploaded_c, content_length,
                         None, None, "application/octet-stream", headers, None,
                         None, querystring, response)
            resp_headers, saved_c = c.get_object(self.url,
                                                 self.token,
                                                 self.container,
                                                 'identity_random_source',
                                                 response_dict=dict())

            if params is not None and params.get('double', None) == 'true':
                assert (uploaded_c == saved_c[:1024])
                assert (uploaded_c == saved_c[1024:])
            else:
                assert (uploaded_c == saved_c)

            if params is not None and params.get('execute', None) is not None:
                mdv = resp_headers['X-Object-Meta-Execution-Result'.lower()]
                self.assertEqual('42', mdv)

            self.assertEqual(random_md,
                             resp_headers['X-Object-Meta-Testkey'.lower()])
Ejemplo n.º 56
0
    def test_sync_slo_manifest(self):
        # Verify that SLO manifests are sync'd even if their segments can not
        # be found in the destination account at time of sync'ing.
        # Create source and dest containers for manifest in separate accounts.
        dest_account = self.account_2
        source_container, dest_container = self._setup_synced_containers(
            dest_overrides=dest_account
        )

        # Create source and dest containers for segments in separate accounts.
        # These containers must have same name for the destination SLO manifest
        # to be able to resolve segments. Initially the destination has no sync
        # key so segments will not sync.
        segs_container = 'segments-%s' % uuid.uuid4()
        dest_segs_info = dict(dest_account)
        dest_segs_info.update({'name': segs_container, 'sync_key': None})
        self._setup_synced_containers(
            source_overrides={'name': segs_container, 'sync_key': 'segs_key'},
            dest_overrides=dest_segs_info)

        # upload a segment to source
        segment_name = 'segment-%s' % uuid.uuid4()
        segment_data = 'segment body'  # it's ok for first segment to be small
        segment_etag = client.put_object(
            self.url, self.token, segs_container, segment_name,
            segment_data)

        manifest = [{'etag': segment_etag,
                     'size_bytes': len(segment_data),
                     'path': '/%s/%s' % (segs_container, segment_name)}]
        manifest_name = 'manifest-%s' % uuid.uuid4()
        put_headers = {'X-Object-Meta-Test': 'put_value'}
        client.put_object(
            self.url, self.token, source_container, manifest_name,
            json.dumps(manifest), headers=put_headers,
            query_string='multipart-manifest=put')

        resp_headers, manifest_body = client.get_object(
            self.url, self.token, source_container, manifest_name,
            query_string='multipart-manifest=get')
        int_manifest = json.loads(manifest_body)

        # cycle container-sync
        Manager(['container-sync']).once()

        # verify manifest was sync'd
        resp_headers, dest_listing = client.get_container(
            dest_account['url'], dest_account['token'], dest_container)
        self.assertFalse(dest_listing[1:])
        self.assertEqual(manifest_name, dest_listing[0]['name'])

        # verify manifest body
        resp_headers, body = client.get_object(
            dest_account['url'], dest_account['token'], dest_container,
            manifest_name, query_string='multipart-manifest=get')
        self.assertEqual(int_manifest, json.loads(body))
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('put_value', resp_headers['x-object-meta-test'])

        # attempt to GET the SLO will fail because the segment wasn't sync'd
        with self.assertRaises(ClientException) as cm:
            client.get_object(dest_account['url'], dest_account['token'],
                              dest_container, manifest_name)
        self.assertEqual(409, cm.exception.http_status)

        # now set sync key on destination segments container
        client.put_container(
            dest_account['url'], dest_account['token'], segs_container,
            headers={'X-Container-Sync-Key': 'segs_key'})

        # cycle container-sync
        Manager(['container-sync']).once()

        # sanity check - verify manifest body
        resp_headers, body = client.get_object(
            dest_account['url'], dest_account['token'], dest_container,
            manifest_name, query_string='multipart-manifest=get')
        self.assertEqual(int_manifest, json.loads(body))
        self.assertIn('x-object-meta-test', resp_headers)
        self.assertEqual('put_value', resp_headers['x-object-meta-test'])

        # verify GET of SLO manifest now succeeds
        resp_headers, body = client.get_object(
            dest_account['url'], dest_account['token'], dest_container,
            manifest_name)
        self.assertEqual(segment_data, body)
Ejemplo n.º 57
0
    def test_delete_propogate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)
        p_dev2 = self.device_dir('object', onodes[1])

        # PUT object
        contents = Body()
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents)

        # now lets shut one down
        self.kill_drive(p_dev2)

        # delete on the ones that are left
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # spot check a node
        try:
            self.direct_get(onodes[0], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onodes[0], ))

        # enable the first node again
        self.revive_drive(p_dev2)

        # propagate the delete...
        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) / 10
            self.reconstructor.once(number=hnode_id)

        # check the first node to make sure its gone
        try:
            self.direct_get(onodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' % (onodes[0]))

        # make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' % (onodes[0]))
Ejemplo n.º 58
0
    def test_metadata_sync(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy': self.policy.name,
                                      'X-Container-Meta-A': '1',
                                      'X-Container-Meta-B': '1',
                                      'X-Container-Meta-C': '1'})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes.pop()
        # 2 of 3 container servers are temporarily down
        for node in cnodes:
            kill_server((node['ip'], node['port']),
                        self.ipport2server)

        # Put some meta on the lone server, to make sure it's merged properly
        # This will 503 (since we don't have a quorum), but we don't care (!)
        try:
            client.post_container(self.url, self.token, container,
                                  headers={'X-Container-Meta-A': '2',
                                           'X-Container-Meta-B': '2',
                                           'X-Container-Meta-D': '2'})
        except ClientException:
            pass

        # object updates come to only one container server
        for _ in range(self.object_puts):
            obj = 'object-%s' % uuid4()
            client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # 2 container servers make comeback
        for node in cnodes:
            start_server((node['ip'], node['port']),
                         self.ipport2server)
        # But, container-server which got object updates is down
        kill_server((cnode['ip'], cnode['port']),
                    self.ipport2server)

        # Metadata update will be applied to 2 container servers
        # (equal to quorum)
        client.post_container(self.url, self.token, container,
                              headers={'X-Container-Meta-B': '3',
                                       'X-Container-Meta-E': '3'})
        # container-server which got object updates makes comeback
        start_server((cnode['ip'], cnode['port']),
                     self.ipport2server)

        # other nodes have no objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            self.assertIn(resp_headers.get('x-container-object-count'),
                          (None, '0', 0))

        # If container-replicator on the node which got the object updates
        # runs in first, db file may be replicated by rsync to other
        # containers. In that case, the db file does not information about
        # metadata, so metadata should be synced before replication
        Manager(['container-replicator']).once(
            number=self.config_number(cnode))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '3',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
            'x-container-meta-e': '3',
        }

        # node that got the object updates now has the meta
        resp_headers = direct_client.direct_head_container(
            cnode, cpart, self.account, container)
        for header, value in expected_meta.items():
            self.assertIn(header, resp_headers)
            self.assertEqual(value, resp_headers[header])
        self.assertNotIn(resp_headers.get('x-container-object-count'),
                         (None, '0', 0))

        # other nodes still have the meta, as well as objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))

        # and after full pass on remaining nodes
        for node in cnodes:
            Manager(['container-replicator']).once(
                number=self.config_number(node))

        # ... all is right
        for node in cnodes + [cnode]:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(
            self.url).scheme, urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Ejemplo n.º 60
0
    def test_revert_object(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)

        # kill 2 a parity count number of primary nodes so we can
        # force data onto handoffs, we do that by renaming dev dirs
        # to induce 507
        p_dev1 = self.device_dir(onodes[0])
        p_dev2 = self.device_dir(onodes[1])
        self.kill_drive(p_dev1)
        self.kill_drive(p_dev2)

        # PUT object
        contents = Body()
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents,
                          headers=headers)
        client.post_object(self.url,
                           self.token,
                           self.container_name,
                           self.object_name,
                           headers=headers_post)
        # (Some versions of?) swiftclient will mutate the headers dict on post
        headers_post.pop('X-Auth-Token', None)

        # these primaries can't serve the data any more, we expect 507
        # here and not 404 because we're using mount_check to kill nodes
        for onode in (onodes[0], onodes[1]):
            self.assert_direct_get_fails(onode, opart, 507)

        # now take out another primary
        p_dev3 = self.device_dir(onodes[2])
        self.kill_drive(p_dev3)

        # this node can't serve the data any more
        self.assert_direct_get_fails(onodes[2], opart, 507)

        # make sure we can still GET the object and its correct
        # we're now pulling from handoffs and reconstructing
        _headers, etag = self.proxy_get()
        self.assertEqual(etag, contents.etag)

        # rename the dev dirs so they don't 507 anymore
        self.revive_drive(p_dev1)
        self.revive_drive(p_dev2)
        self.revive_drive(p_dev3)

        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] % 100) // 10
            self.reconstructor.once(number=hnode_id)

        # first three primaries have data again
        for onode in (onodes[0], onodes[2]):
            self.assert_direct_get_succeeds(onode, opart)

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])

        # handoffs are empty
        for hnode in hnodes:
            self.assert_direct_get_fails(hnode, opart, 404)