def test_main(self): # Create container container = "container-%s" % uuid4() client.put_container(self.url, self.token, container) # Kill container servers excepting two of the primaries cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill_nonprimary_server(cnodes, self.ipport2server) kill_server((cnode["ip"], cnode["port"]), self.ipport2server) # Create container/obj obj = "object-%s" % uuid4() client.put_object(self.url, self.token, container, obj, "") # Restart other primary server start_server((cnode["ip"], cnode["port"]), self.ipport2server) # Assert it does not know about container/obj self.assertFalse(direct_client.direct_get_container(cnode, cpart, self.account, container)[1]) # Run the object-updaters Manager(["object-updater"]).once() # Assert the other primary server now knows about container/obj objs = [o["name"] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] self.assertIn(obj, objs)
def test_update_during_PUT(self): # verify that update sent during a PUT has override values int_client = self.make_internal_client() headers = { "Content-Type": "text/plain", "X-Object-Sysmeta-Container-Update-Override-Etag": "override-etag", "X-Object-Sysmeta-Container-Update-Override-Content-Type": "override-type", "X-Object-Sysmeta-Container-Update-Override-Size": "1999", } client.put_container(self.url, self.token, "c1", headers={"X-Storage-Policy": self.policy.name}) int_client.upload_object(StringIO(u"stuff"), self.account, "c1", "o1", headers) # Run the object-updaters to be sure updates are done Manager(["object-updater"]).once() meta = int_client.get_object_metadata(self.account, "c1", "o1") self.assertEqual("text/plain", meta["content-type"]) self.assertEqual("c13d88cb4cb02003daedb8a84e5d272a", meta["etag"]) self.assertEqual("5", meta["content-length"]) obj_iter = int_client.iter_objects(self.account, "c1") for obj in obj_iter: if obj["name"] == "o1": self.assertEqual("override-etag", obj["hash"]) self.assertEqual("override-type", obj["content_type"]) self.assertEqual(1999, obj["bytes"]) break else: self.fail("Failed to find object o1 in listing")
def test_main(self): # Create container # Kill container servers excepting two of the primaries # Create container/obj # Restart other primary server # Assert it does not know about container/obj # Run the object-updaters # Assert the other primary server now knows about container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill_nonprimary_server(cnodes, self.port2server, self.pids) kill_server(cnode['port'], self.port2server, self.pids) obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') start_server(cnode['port'], self.port2server, self.pids) self.assert_(not direct_client.direct_get_container( cnode, cpart, self.account, container)[1]) processes = [] for node in xrange(1, 5): processes.append(Popen(['swift-object-updater', '/etc/swift/object-server/%d.conf' % node, 'once'])) for process in processes: process.wait() objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] self.assert_(obj in objs)
def setUp(self): super(TestAccountReaper, self).setUp() self.all_objects = [] # upload some containers body = 'test-body' for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() client.put_object(self.url, self.token, container, obj, body) self.all_objects.append((policy, container, obj)) policy.load_ring('/etc/swift') Manager(['container-updater']).once() headers = client.head_account(self.url, self.token) self.assertEqual(int(headers['x-account-container-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-object-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-bytes-used']), len(ENABLED_POLICIES) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) for node in nodes: direct_delete_account(node, part, self.account)
def test_async_update_after_PUT(self): cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1') client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) # put an object while one container server is stopped so that we force # an async update to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' client.put_object(self.url, self.token, 'c1', 'o1', contents=content) meta = client.head_object(self.url, self.token, 'c1', 'o1') # re-start the container server and assert that it does not yet know # about the object start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) self.assertFalse(direct_client.direct_get_container( cnodes[0], cpart, self.account, 'c1')[1]) # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() # check the re-started container server has update with override values obj = direct_client.direct_get_container( cnodes[0], cpart, self.account, 'c1')[1][0] self.assertEqual(meta['etag'], obj['hash']) self.assertEqual(len(content), obj['bytes'])
def create_container(request): """ Creates a container (empty object of type application/directory) """ storage_url = get_endpoint(request, 'adminURL') auth_token = get_token_id(request) http_conn = client.http_connection(storage_url, insecure=settings.SWIFT_INSECURE) form = CreateContainerForm(request.POST or None) if form.is_valid(): container = form.cleaned_data['containername'] try: client.put_container(storage_url, auth_token, container, http_conn=http_conn) messages.add_message(request, messages.SUCCESS, _("Container created.")) actionlog.log(request.user.username, "create", container) except client.ClientException as err: log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err)) messages.add_message(request, messages.ERROR, _('Access denied.')) return redirect(containerview) context = utils.update_default_context(request, { 'form': form, }) return render_to_response('create_container.html', context, context_instance=RequestContext(request))
def test_one_node_fails(self): # Create container1 # Kill container1 servers excepting two of the primaries # Delete container1 # Restart other container1 primary server # Create container1/object1 (allowed because at least server thinks the # container exists) # Get to a final state # Assert all container1 servers indicate container1 is alive and # well with object1 # Assert account level also indicates container1 is alive and # well with object1 container1 = 'container-%s' % uuid4() cpart, cnodes = self.container_ring.get_nodes(self.account, container1) client.put_container(self.url, self.token, container1) kill_nonprimary_server(cnodes, self.port2server, self.pids) kill_server(cnodes[0]['port'], self.port2server, self.pids) client.delete_container(self.url, self.token, container1) start_server(cnodes[0]['port'], self.port2server, self.pids) client.put_object(self.url, self.token, container1, 'object1', '123') get_to_final_state() for cnode in cnodes: self.assertEquals( [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container1)[1]], ['object1']) headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '3')
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) anode = anodes[0] cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill(self.pids[self.port2server[cnode['port']]], SIGTERM) obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') self.pids[self.port2server[cnode['port']]] = \ Popen(['swift-container-server', '/etc/swift/container-server/%d.conf' % ((cnode['port'] - 6001) / 10)]).pid sleep(2) self.assert_(not direct_client.direct_get_container(cnode, cpart, self.account, container)[1]) ps = [] for n in xrange(1, 5): ps.append(Popen(['swift-object-updater', '/etc/swift/object-server/%d.conf' % n, 'once'])) for p in ps: p.wait() objs = [o['name'] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] self.assert_(obj in objs)
def test_two_nodes_fail(self): # Create container1 # Kill container1 servers excepting one of the primaries # Delete container1 directly to the one primary still up # Restart other container1 servers # Get to a final state # Assert all container1 servers indicate container1 is gone (happens # because the one node that knew about the delete replicated to the # others.) # Assert account level also indicates container1 is gone container1 = 'container-%s' % uuid4() cpart, cnodes = self.container_ring.get_nodes(self.account, container1) client.put_container(self.url, self.token, container1) cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids) kill_server(cnodes[0]['port'], self.port2server, self.pids) kill_server(cnodes[1]['port'], self.port2server, self.pids) direct_client.direct_delete_container(cnodes[2], cpart, self.account, container1) start_server(cnodes[0]['port'], self.port2server, self.pids) start_server(cnodes[1]['port'], self.port2server, self.pids) start_server(cnp_port, self.port2server, self.pids) get_to_final_state() for cnode in cnodes: exc = None try: direct_client.direct_get_container(cnode, cpart, self.account, container1) except client.ClientException as err: exc = err self.assertEquals(exc.http_status, 404) headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '0') self.assertEquals(headers['x-account-object-count'], '0') self.assertEquals(headers['x-account-bytes-used'], '0')
def test_sync_lazy_dkey(self): # Create synced containers, but with no key at dest source_container, dest_container =\ self._setup_synced_containers('secret', None) # upload to source object_name = 'object-%s' % uuid.uuid4() client.put_object(self.url, self.token, source_container, object_name, 'test-body') # cycle container-sync, nothing should happen Manager(['container-sync']).once() with self.assertRaises(ClientException) as err: _junk, body = client.get_object(self.url, self.token, dest_container, object_name) self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND) # amend dest key dest_headers = {'X-Container-Sync-Key': 'secret'} client.put_container(self.url, self.token, dest_container, headers=dest_headers) # cycle container-sync, should replicate Manager(['container-sync']).once() _junk, body = client.get_object(self.url, self.token, dest_container, object_name) self.assertEqual(body, 'test-body')
def _setup_synced_containers(self, skey='secret', dkey='secret'): # setup dest container dest_container = 'dest-container-%s' % uuid.uuid4() dest_headers = {} dest_policy = None if len(ENABLED_POLICIES) > 1: dest_policy = random.choice(ENABLED_POLICIES) dest_headers['X-Storage-Policy'] = dest_policy.name if dkey is not None: dest_headers['X-Container-Sync-Key'] = dkey client.put_container(self.url, self.token, dest_container, headers=dest_headers) # setup source container source_container = 'source-container-%s' % uuid.uuid4() source_headers = {} sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account, dest_container) source_headers['X-Container-Sync-To'] = sync_to if skey is not None: source_headers['X-Container-Sync-Key'] = skey if dest_policy: source_policy = random.choice([p for p in ENABLED_POLICIES if p is not dest_policy]) source_headers['X-Storage-Policy'] = source_policy.name client.put_container(self.url, self.token, source_container, headers=source_headers) return source_container, dest_container
def create_container(self): response = dict() swiftclient.put_container(self.url, self.token, self.container, headers=None, response_dict=response) status = response.get('status') assert (status >= 200 or status < 300)
def setUp(self): super(TestReconstructorRebuild, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() # sanity self.assertEqual(self.policy.policy_type, EC_POLICY) self.reconstructor = Manager(["object-reconstructor"]) # create EC container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) # PUT object and POST some metadata contents = Body() headers = {'x-object-meta-foo': 'meta-foo'} self.headers_post = {'x-object-meta-bar': 'meta-bar'} self.etag = client.put_object(self.url, self.token, self.container_name, self.object_name, contents=contents, headers=headers) client.post_object(self.url, self.token, self.container_name, self.object_name, headers=dict(self.headers_post)) self.opart, self.onodes = self.object_ring.get_nodes( self.account, self.container_name, self.object_name) # stash frag etags and metadata for later comparison self.frag_headers, self.frag_etags = self._assert_all_nodes_have_frag() for node_index, hdrs in self.frag_headers.items(): # sanity check self.assertIn( 'X-Backend-Durable-Timestamp', hdrs, 'Missing durable timestamp in %r' % self.frag_headers)
def test_update_during_PUT(self): # verify that update sent during a PUT has override values int_client = self.make_internal_client() headers = { 'Content-Type': 'text/plain', 'X-Backend-Container-Update-Override-Etag': 'override-etag', 'X-Backend-Container-Update-Override-Content-Type': 'override-type', 'X-Backend-Container-Update-Override-Size': '1999' } client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) int_client.upload_object( StringIO(u'stuff'), self.account, 'c1', 'o1', headers) # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() meta = int_client.get_object_metadata(self.account, 'c1', 'o1') self.assertEqual('text/plain', meta['content-type']) self.assertEqual('c13d88cb4cb02003daedb8a84e5d272a', meta['etag']) self.assertEqual('5', meta['content-length']) obj_iter = int_client.iter_objects(self.account, 'c1') for obj in obj_iter: if obj['name'] == 'o1': self.assertEqual('override-etag', obj['hash']) self.assertEqual('override-type', obj['content_type']) self.assertEqual(1999, obj['bytes']) break else: self.fail('Failed to find object o1 in listing')
def test(self): headers = { 'Content-Type': 'text/plain', 'X-Backend-Container-Update-Override-Etag': 'override-etag', 'X-Backend-Container-Update-Override-Content-Type': 'override-type' } client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) self.int_client.upload_object(StringIO(u'stuff'), self.account, 'c1', 'o1', headers) # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() meta = self.int_client.get_object_metadata(self.account, 'c1', 'o1') self.assertEqual('text/plain', meta['content-type']) self.assertEqual('c13d88cb4cb02003daedb8a84e5d272a', meta['etag']) obj_iter = self.int_client.iter_objects(self.account, 'c1') for obj in obj_iter: if obj['name'] == 'o1': self.assertEqual('override-etag', obj['hash']) self.assertEqual('override-type', obj['content_type']) break else: self.fail('Failed to find object o1 in listing')
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) # Kill container servers excepting two of the primaries cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill_nonprimary_server(cnodes, self.ipport2server, self.pids) kill_server((cnode['ip'], cnode['port']), self.ipport2server, self.pids) # Create container/obj obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') # Restart other primary server start_server((cnode['ip'], cnode['port']), self.ipport2server, self.pids) # Assert it does not know about container/obj self.assert_(not direct_client.direct_get_container( cnode, cpart, self.account, container)[1]) # Run the object-updaters Manager(['object-updater']).once() # Assert the other primary server now knows about container/obj objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] self.assert_(obj in objs)
def test_delete_propogate(self): # create EC container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) # get our node lists opart, onodes = self.object_ring.get_nodes( self.account, self.container_name, self.object_name) hnodes = self.object_ring.get_more_nodes(opart) p_dev2 = self.device_dir('object', onodes[1]) # PUT object contents = Body() client.put_object(self.url, self.token, self.container_name, self.object_name, contents=contents) # now lets shut one down self.kill_drive(p_dev2) # delete on the ones that are left client.delete_object(self.url, self.token, self.container_name, self.object_name) # spot check a node try: self.direct_get(onodes[0], opart) except direct_client.DirectClientException as err: self.assertEqual(err.http_status, 404) else: self.fail('Node data on %r was not fully destoryed!' % (onodes[0],)) # enable the first node again self.revive_drive(p_dev2) # propogate the delete... # fire up reconstructor on handoff nodes only for hnode in hnodes: hnode_id = (hnode['port'] - 6000) / 10 self.reconstructor.once(number=hnode_id) # check the first node to make sure its gone try: self.direct_get(onodes[1], opart) except direct_client.DirectClientException as err: self.assertEqual(err.http_status, 404) else: self.fail('Node data on %r was not fully destoryed!' % (onodes[0])) # make sure proxy get can't find it try: self.proxy_get() except Exception as err: self.assertEqual(err.http_status, 404) else: self.fail('Node data on %r was not fully destoryed!' % (onodes[0]))
def put_container(self,container_name): if(self.list_container(container_name) == None): #headers={"X-Container-Meta-Access-Control-Allow-Origin":"http://thucloud.com", #"Access-Control-Allow-Methods": "HEAD, GET, PUT, POST, COPY, OPTIONS, DELETE"} client.put_container(self.storage_url,self.token,container_name,http_conn=self.http_conn) return True return False
def create_container(request): """ Creates a container (empty object of type application/directory) """ storage_url = request.session.get("storage_url", "") auth_token = request.session.get("auth_token", "") headers = { "X-Container-Meta-Access-Control-Expose-Headers": "Access-Control-Allow-Origin", "X-Container-Meta-Access-Control-Allow-Origin": settings.BASE_URL, } form = CreateContainerForm(request.POST or None) if form.is_valid(): container = form.cleaned_data["containername"] # Check container does not already exist try: client.get_container(storage_url, auth_token, container) messages.add_message(request, messages.ERROR, _("Container {0} already exists.".format(container))) except: try: client.put_container(storage_url, auth_token, container, headers) messages.add_message(request, messages.INFO, _("Container created.")) except client.ClientException: messages.add_message(request, messages.ERROR, _("Access denied.")) return redirect(containerview) return render_to_response( "create_container.html", {"session": request.session}, context_instance=RequestContext(request) )
def create_container(self, keystone_username=None, swift_url=None, swift_container=None): """creates the container in swift with read and write permissions""" user_and_tenant = settings.KEYSTONE_TENANT + ':' + keystone_username headers = {'x-container-read': user_and_tenant, 'x-container-write': user_and_tenant} swift_connection = swift.HTTPConnection(url=swift_url, insecure=True) swift.put_container(swift_url, self.keystone.get_token('id'), swift_container, headers=headers, http_conn=(swift_connection.parsed_url, swift_connection))
def test_server_error(self): body = "c" * 60 c.http_connection = self.fake_http_connection(500, body=body) self.assertRaises(c.ClientException, c.put_container, "http://www.test.com", "asdf", "asdf") try: c.put_container("http://www.test.com", "asdf", "asdf") except c.ClientException as e: self.assertEqual(e.http_response_content, body)
def ingest(self): ''' Upload a file when creating an id, generally used with swift''' put_container(self.url, self.token, self.swift_info["container"]) put_object(self.url, token=self.token, container=self.swift_info["container"], name=self.swift_info["object"], contents=open(self.path))
def create_container(self, name, headers=None): if not self.http_conn: self.connect() swift.put_container( url=self.swift_url, token=self.token, http_conn=self.http_conn, container=name, headers=headers ) if self.debug: print("Container {0} created".format(name))
def test_missing_container(self): # In this test, we need to put container at handoff devices, so we # need container devices more than replica count if len(self.container_ring.devs) <= self.container_ring.replica_count: raise SkipTest("Need devices more that replica count") container = "container-%s" % uuid4() cpart, cnodes = self.container_ring.get_nodes(self.account, container) # Kill all primary container servers for cnode in cnodes: kill_server((cnode["ip"], cnode["port"]), self.ipport2server) # Create container, and all of its replicas are placed at handoff # device try: client.put_container(self.url, self.token, container) except ClientException as err: # if the cluster doesn't have enough devices, swift may return # error (ex. When we only have 4 devices in 3-replica cluster). self.assertEqual(err.http_status, 503) # Assert handoff device has a container replica another_cnode = self.container_ring.get_more_nodes(cpart).next() direct_client.direct_get_container(another_cnode, cpart, self.account, container) # Restart all primary container servers for cnode in cnodes: start_server((cnode["ip"], cnode["port"]), self.ipport2server) # Create container/obj obj = "object-%s" % uuid4() client.put_object(self.url, self.token, container, obj, "") # Run the object-updater Manager(["object-updater"]).once() # Run the container-replicator, and now, container replicas # at handoff device get moved to primary servers Manager(["container-replicator"]).once() # Assert container replicas in primary servers, just moved by # replicator don't know about the object for cnode in cnodes: self.assertFalse(direct_client.direct_get_container(cnode, cpart, self.account, container)[1]) # since the container is empty - we can delete it! client.delete_container(self.url, self.token, container) # Re-run the object-updaters and now container replicas in primary # container servers should get updated Manager(["object-updater"]).once() # Assert all primary container servers know about container/obj for cnode in cnodes: objs = [o["name"] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] self.assertIn(obj, objs)
def test_update_during_POST_only(self): # verify correct update values when PUT update is missed but then a # POST update succeeds *before* the PUT async pending update is sent cpart, cnodes = self.container_ring.get_nodes(self.account, "c1") client.put_container(self.url, self.token, "c1", headers={"X-Storage-Policy": self.policy.name}) # put an object while one container server is stopped so that we force # an async update to it kill_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server) content = u"stuff" client.put_object(self.url, self.token, "c1", "o1", contents=content, content_type="test/ctype") meta = client.head_object(self.url, self.token, "c1", "o1") # re-start the container server and assert that it does not yet know # about the object start_server((cnodes[0]["ip"], cnodes[0]["port"]), self.ipport2server) self.assertFalse(direct_client.direct_get_container(cnodes[0], cpart, self.account, "c1")[1]) # use internal client for POST so we can force fast-post mode int_client = self.make_internal_client(object_post_as_copy=False) int_client.set_object_metadata(self.account, "c1", "o1", {"X-Object-Meta-Fruit": "Tomato"}) self.assertEqual( "Tomato", int_client.get_object_metadata(self.account, "c1", "o1")["x-object-meta-fruit"] ) # sanity # check the re-started container server got same update as others. # we cannot assert the actual etag value because it may be encrypted listing_etags = set() for cnode in cnodes: listing = direct_client.direct_get_container(cnode, cpart, self.account, "c1")[1] self.assertEqual(1, len(listing)) self.assertEqual(len(content), listing[0]["bytes"]) self.assertEqual("test/ctype", listing[0]["content_type"]) listing_etags.add(listing[0]["hash"]) self.assertEqual(1, len(listing_etags)) # check that listing meta returned to client is consistent with object # meta returned to client hdrs, listing = client.get_container(self.url, self.token, "c1") self.assertEqual(1, len(listing)) self.assertEqual("o1", listing[0]["name"]) self.assertEqual(len(content), listing[0]["bytes"]) self.assertEqual(meta["etag"], listing[0]["hash"]) self.assertEqual("test/ctype", listing[0]["content_type"]) # Run the object-updaters to send the async pending from the PUT Manager(["object-updater"]).once() # check container listing metadata is still correct for cnode in cnodes: listing = direct_client.direct_get_container(cnode, cpart, self.account, "c1")[1] self.assertEqual(1, len(listing)) self.assertEqual(len(content), listing[0]["bytes"]) self.assertEqual("test/ctype", listing[0]["content_type"]) listing_etags.add(listing[0]["hash"]) self.assertEqual(1, len(listing_etags))
def test_sync_unexpired_object_metadata(self): # verify that metadata can be sync'd to a frag that has missed a POST # and consequently that frag appears to be expired, when in fact the # POST removed the x-delete-at header client.put_container(self.url, self.token, self.container_name, headers={'x-storage-policy': self.policy.name}) opart, onodes = self.object_ring.get_nodes( self.account, self.container_name, self.object_name) delete_at = int(time.time() + 3) contents = 'body-%s' % uuid.uuid4() headers = {'x-delete-at': delete_at} client.put_object(self.url, self.token, self.container_name, self.object_name, headers=headers, contents=contents) # fail a primary post_fail_node = random.choice(onodes) post_fail_path = self.device_dir('object', post_fail_node) self.kill_drive(post_fail_path) # post over w/o x-delete-at client.post_object(self.url, self.token, self.container_name, self.object_name, {'content-type': 'something-new'}) # revive failed primary self.revive_drive(post_fail_path) # wait for the delete_at to pass, and check that it thinks the object # is expired timeout = time.time() + 5 while time.time() < timeout: try: direct_client.direct_head_object( post_fail_node, opart, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': int(self.policy)}) except direct_client.ClientException as err: if err.http_status != 404: raise break else: time.sleep(0.1) else: self.fail('Failed to get a 404 from node with expired object') self.assertEqual(err.http_status, 404) self.assertIn('X-Backend-Timestamp', err.http_headers) # but from the proxy we've got the whole story headers, body = client.get_object(self.url, self.token, self.container_name, self.object_name) self.assertNotIn('X-Delete-At', headers) self.reconstructor.once() # ... and all the nodes have the final unexpired state for node in onodes: headers = direct_client.direct_head_object( node, opart, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': int(self.policy)}) self.assertNotIn('X-Delete-At', headers)
def test_server_error(self): body = 'c' * 60 c.http_connection = self.fake_http_connection(500, body=body) self.assertRaises(c.ClientException, c.put_container, 'http://www.test.com', 'asdf', 'asdf', ) try: c.put_container('http://www.test.com', 'asdf', 'asdf') except c.ClientException as e: self.assertEquals(e.http_response_content, body)
def moveRecursive(self, newname): """ Move/rename a container. This is only working for empty containers and objects with a size of 0. Required by some clients because they create a folder "New folder" first. For all other requests this will simply return HTTP_FORBIDDEN. """ oldname = self.path.lstrip('/') newname = newname.lstrip('/') if '/' not in oldname: try: # Container deletion will fail if not empty client.delete_container(self.storage_url, self.auth_token, oldname) client.put_container(self.storage_url, self.auth_token, newname) except client.ClientException: raise dav_error.DAVError(dav_error.HTTP_FORBIDDEN) else: old_container, _, old_object = oldname.partition('/') new_container, _, new_object = newname.partition('/') if old_container != new_container: raise dav_error.DAVError(dav_error.HTTP_FORBIDDEN) # If it is a pseudofolder, check that it is empty _, objects = client.get_container( self.storage_url, self.auth_token, container=self.container, delimiter='/', prefix=sanitize(old_object).rstrip('/') + '/', http_conn=self.http_connection) if len(objects) != 1: # first object is the pseudofolder entry raise dav_error.DAVError(dav_error.HTTP_FORBIDDEN) # also check if the object is empty if objects[0].get('bytes') != 0: raise dav_error.DAVError(dav_error.HTTP_FORBIDDEN) # Do a COPY to preserve existing metadata and content-type client.put_object(self.storage_url, self.auth_token, self.container, sanitize(new_object), headers = {'X-Copy-From': '/' + oldname + '/'}, http_conn=self.http_connection) client.delete_object(self.storage_url, self.auth_token, self.container, old_object + '/', http_conn=self.http_connection)
def test_sync(self): all_objects = [] # upload some containers for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() body = 'test-body' client.put_object(self.url, self.token, container, obj, body) all_objects.append((policy, container, obj)) Manager(['container-updater']).once() headers = client.head_account(self.url, self.token) self.assertEqual(int(headers['x-account-container-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-object-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-bytes-used']), len(ENABLED_POLICIES) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) for node in nodes: direct_delete_account(node, part, self.account) Manager(['account-reaper']).once() get_to_final_state() for policy, container, obj in all_objects: cpart, cnodes = self.container_ring.get_nodes( self.account, container) for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail('Found un-reaped /%s/%s on %r' % (self.account, container, node)) object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') part, nodes = object_ring.get_nodes(self.account, container, obj) for node in nodes: try: direct_get_object(node, part, self.account, container, obj) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy))
def copyMoveSingle(self, destPath, isMove): src = '/'.join(self.path.split('/')[2:]) dst = '/'.join(destPath.split('/')[2:]) src_cont = self.path.split('/')[1] dst_cont = destPath.split('/')[1] # Make sure target container exists try: client.put_container(self.storage_url, self.auth_token, dst_cont, http_conn=self.http_connection) except: pass _, objects = client.get_container(self.storage_url, self.auth_token, container=src_cont, delimiter='/', prefix=src, http_conn=self.http_connection) for obj in objects: objname = obj.get('name', obj.get('subdir')) headers = {'X-Copy-From': '%s/%s' % (self.container, objname)} newname = objname.replace(src, dst) if newname[-1] == '/': newname = newname.rstrip('/') + '/' try: client.put_object(self.storage_url, self.auth_token, dst_cont, newname, headers=headers, http_conn=self.http_connection) if isMove: client.delete_object(self.storage_url, self.auth_token, src_cont, objname, http_conn=self.http_connection) except client.ClientException: pass # will only succeed if container is empty if isMove: try: client.delete_container(self.storage_url, self.auth_token, self.container, http_conn=self.http_connection) except client.ClientException: pass
def setUp(self): super(TestAccountReaper, self).setUp() self.all_objects = [] int_client = self.make_internal_client() # upload some containers body = b'test-body' for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() client.put_object(self.url, self.token, container, obj, body) self.all_objects.append((policy, container, obj)) # Also create some reserved names container = get_reserved_name('reserved', policy.name, str(uuid.uuid4())) int_client.create_container( self.account, container, headers={'X-Storage-Policy': policy.name}) obj = get_reserved_name('object', str(uuid.uuid4())) int_client.upload_object(BytesIO(body), self.account, container, obj) self.all_objects.append((policy, container, obj)) policy.load_ring('/etc/swift') Manager(['container-updater']).once() headers = client.head_account(self.url, self.token) self.assertEqual(int(headers['x-account-container-count']), len(self.all_objects)) self.assertEqual(int(headers['x-account-object-count']), len(self.all_objects)) self.assertEqual(int(headers['x-account-bytes-used']), len(self.all_objects) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) for node in nodes: direct_delete_account(node, part, self.account)
def test_two_nodes_fail(self): # Create container1 container1 = 'container-%s' % uuid4() cpart, cnodes = self.container_ring.get_nodes(self.account, container1) client.put_container(self.url, self.token, container1) # Kill container1 servers excepting one of the primaries cnp_ipport = kill_nonprimary_server(cnodes, self.ipport2server, self.pids) kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server, self.pids) kill_server((cnodes[1]['ip'], cnodes[1]['port']), self.ipport2server, self.pids) # Delete container1 directly to the one primary still up direct_client.direct_delete_container(cnodes[2], cpart, self.account, container1) # Restart other container1 servers start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server, self.pids) start_server((cnodes[1]['ip'], cnodes[1]['port']), self.ipport2server, self.pids) start_server(cnp_ipport, self.ipport2server, self.pids) # Get to a final state self.get_to_final_state() # Assert all container1 servers indicate container1 is gone (happens # because the one node that knew about the delete replicated to the # others.) for cnode in cnodes: try: direct_client.direct_get_container(cnode, cpart, self.account, container1) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert account level also indicates container1 is gone headers, containers = client.get_account(self.url, self.token) self.assertEqual(headers['x-account-container-count'], '0') self.assertEqual(headers['x-account-object-count'], '0') self.assertEqual(headers['x-account-bytes-used'], '0')
def _setup_data_file(self, container, obj, data): client.put_container(self.url, self.token, container) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] self.assertEquals(odata, data) opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf('/etc/swift/object-server/%s.conf' % node_id) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s/objects/%s/%s/%s/' % (devices, device, opart, hash_str[-3:], hash_str) data_file = get_data_file_path(obj_dir) return onode, opart, data_file
def run_test(num_locks, catch_503): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) db_files = self._get_container_db_files(container) db_conns = [] for i in range(num_locks): db_conn = connect(db_files[i]) db_conn.execute('begin exclusive transaction') db_conns.append(db_conn) if catch_503: try: client.delete_container(self.url, self.token, container) except client.ClientException as err: self.assertEqual(err.http_status, 503) else: self.fail("Expected ClientException but didn't get it") else: client.delete_container(self.url, self.token, container)
def handle_uploaded_file(request): f = request.FILES['file'] # with open('E:/sample.txt', 'wb+') as destination: # for chunk in f.chunks(): # destination.write(chunk) token = request.session.get('token') tenant_id = request.session.get('tenant_id') print 'token:%s***tenant:%s' % (token, tenant_id) url = 'http://192.168.0.55:8888/v1/AUTH_' + str(tenant_id) container = 'Publish' containers = c.get_account(url, token)[1] print containers if get_container(containers, container): pass else: print 'put container %s' % (container) c.put_container(url, token, container) c.put_object(url, token, container, name=f.name, contents=f)
def create_container(self, keystone_username=None, swift_url=None, swift_container=None): """creates the container in swift with read and write permissions""" user_and_tenant = settings.KEYSTONE_TENANT + ':' + keystone_username headers = { 'x-container-read': user_and_tenant, 'x-container-write': user_and_tenant } swift_connection = swift.HTTPConnection(url=swift_url, insecure=True) swift.put_container(swift_url, self.keystone.get_token('id'), swift_container, headers=headers, http_conn=(swift_connection.parsed_url, swift_connection))
def create_container(request): """ Creates a container (empty object of type application/directory) """ storage_url = request.session.get('storage_url', '') auth_token = request.session.get('auth_token', '') form = CreateContainerForm(request.POST or None) if form.is_valid(): container = form.cleaned_data['containername'] try: client.put_container(storage_url, auth_token, container) messages.add_message(request, messages.INFO, _("Container created.")) except client.ClientException: messages.add_message(request, messages.ERROR, _("Access denied.")) return redirect(containerview) return render(request, 'create_container.html', {})
def delete_swift_account(storage_url, auth_token): insecure = settings.SWIFT_INSECURE try: # Criar container vazio para garantir que o account existe no swift http_conn = client.http_connection(storage_url, insecure=insecure) client.put_container(storage_url, auth_token, 'dummy_container', http_conn=http_conn) client.delete_container(storage_url, auth_token, 'dummy_container', http_conn=http_conn) except client.ClientException as err: log.exception( 'Fail to create container "dummy_container": {0}'.format(err)) return False try: # Deletar o account url = urlparse(storage_url) domain = '{}://{}'.format(url.scheme, url.netloc) path = url.path http_conn = client.HTTPConnection(domain, insecure=insecure) headers = {'X-Auth-Token': auth_token} resp = http_conn.request('DELETE', path, headers=headers) if resp.status_code != 204: error_msg = resp.json().get('error') log.exception( 'Fail to delete account {}: {}, status code {}'.format( storage_url, error_msg, resp.status_code)) return False, error_msg except client.ClientException as err: log.exception(f'Exception: {err}') return False, str(err) return True, 'success'
def put_container(self, policy_index=None): """ put container with next storage policy """ if policy_index is not None: policy = POLICIES.get_by_index(int(policy_index)) if not policy: raise ValueError('Unknown policy with index %s' % policy) elif not self.policy: policy = next(self.policies) else: policy = self.policy headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers)
def test_async_update_after_PUT(self): cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1') client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) # put an object while one container server is stopped so that we force # an async update to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' client.put_object(self.url, self.token, 'c1', 'o1', contents=content, content_type='test/ctype') meta = client.head_object(self.url, self.token, 'c1', 'o1') # re-start the container server and assert that it does not yet know # about the object start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) self.assertFalse(direct_client.direct_get_container( cnodes[0], cpart, self.account, 'c1')[1]) # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() # check the re-started container server got same update as others. # we cannot assert the actual etag value because it may be encrypted listing_etags = set() for cnode in cnodes: listing = direct_client.direct_get_container( cnode, cpart, self.account, 'c1')[1] self.assertEqual(1, len(listing)) self.assertEqual(len(content), listing[0]['bytes']) self.assertEqual('test/ctype', listing[0]['content_type']) listing_etags.add(listing[0]['hash']) self.assertEqual(1, len(listing_etags)) # check that listing meta returned to client is consistent with object # meta returned to client hdrs, listing = client.get_container(self.url, self.token, 'c1') self.assertEqual(1, len(listing)) self.assertEqual('o1', listing[0]['name']) self.assertEqual(len(content), listing[0]['bytes']) self.assertEqual(meta['etag'], listing[0]['hash']) self.assertEqual('test/ctype', listing[0]['content_type'])
def get_tempurl_key(): (storage_url, auth_token) = client.get_auth( settings.SWIFT_AUTH_URL, settings.SWIFT_USER, settings.SWIFT_PASSWORD) try: meta = client.head_container(storage_url, auth_token, settings.SWIFT_CONTAINER) key = meta.get('x-container-meta-temp-url-key') except client.ClientException: client.put_container(storage_url, auth_token, settings.SWIFT_CONTAINER) key = None if not key: key = random_key() headers = {'x-container-meta-temp-url-key': key} client.post_container(storage_url, auth_token, settings.SWIFT_CONTAINER, headers) return storage_url, key
def test_sync(self): base_headers = {'X-Container-Sync-Key': 'secret'} # setup dest container dest_container = 'dest-container-%s' % uuid.uuid4() dest_headers = base_headers.copy() dest_policy = None if len(ENABLED_POLICIES) > 1: dest_policy = random.choice(ENABLED_POLICIES) dest_headers['X-Storage-Policy'] = dest_policy.name client.put_container(self.url, self.token, dest_container, headers=dest_headers) # setup source container source_container = 'source-container-%s' % uuid.uuid4() source_headers = base_headers.copy() sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account, dest_container) source_headers['X-Container-Sync-To'] = sync_to if dest_policy: source_policy = random.choice( [p for p in ENABLED_POLICIES if p is not dest_policy]) source_headers['X-Storage-Policy'] = source_policy.name client.put_container(self.url, self.token, source_container, headers=source_headers) # upload to source object_name = 'object-%s' % uuid.uuid4() client.put_object(self.url, self.token, source_container, object_name, 'test-body') # cycle container-sync Manager(['container-sync']).once() # retrieve from sync'd container headers, body = client.get_object(self.url, self.token, dest_container, object_name) self.assertEqual(body, 'test-body')
def download_cloudstorage(request): ''' download data to cloud storage ''' token = request.session.get('token') tenant_id = request.session.get('tenant_id') print 'token:%s***tenant:%s' % (token, tenant_id) #url='http://192.168.0.55:8888/v1/AUTH_93e0ff98ddfb4bb28d936c049a89714b' url = 'http://192.168.0.55:8888/v1/AUTH_' + str(tenant_id) container = 'Subscriptions' containers = c.get_account(url, token)[1] print containers if get_container(containers, container): pass else: print 'put container %s' % (container) c.put_container(url, token, container) #TODO c.put_object(url, token, container, name='test222', contents='test') return HttpResponse('ok')
def setUp(self): self.container_name = str(uuid.uuid4()) conf = parse_config('access.cfg') self.url, self.token = get_auth(conf) self.repo_dir = conf['repo_dir'] deploy_storlet( conf, 'e2emlstorlets/video_recognize_face/video_recognize_face.py', 'video_recognize_face.MovieRecognizeFace') client.put_container(self.url, self.token, self.container_name) put_local_file(self.url, self.token, self.container_name, os.path.join(conf['repo_dir'], 'test/data'), 'eran_swapped_mov.avi') headers = {'X-Object-Meta-name-to-id': json.dumps({'eran': 1})} put_local_file(self.url, self.token, self.container_name, os.path.join(conf['repo_dir'], 'test/data'), 'model', headers=headers)
def _setup_data_file(self, container, obj, data): client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] self.assertEqual(odata, data) opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] node_id = (onode['port'] % 100) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf(self.configs['object-server'][node_id]) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device, get_data_dir(self.policy), opart, hash_str[-3:], hash_str) data_file = get_data_file_path(obj_dir) return onode, opart, data_file
def enable_versioning(request, container): """ Enable/Disable versioning in container. """ storage_url = get_endpoint(request, 'adminURL') auth_token = get_token_id(request) http_conn = client.http_connection(storage_url, insecure=settings.SWIFT_INSECURE) version_location = '{0}{1}'.format(settings.SWIFT_VERSION_PREFIX, container) try: client.put_container(storage_url, auth_token, version_location, http_conn=http_conn) actionlog.log(request.user.username, "create", version_location) except client.ClientException as err: log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err)) messages.add_message(request, messages.ERROR, _('Access denied.')) return False try: header = {'x-versions-location': version_location} client.post_container(storage_url, auth_token, container, headers=header, http_conn=http_conn) actionlog.log(request.user.username, "update", version_location) except client.ClientException as err: log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err)) messages.add_message(request, messages.ERROR, _('Access denied.')) return False messages.add_message(request, messages.SUCCESS, _('Versioning enabled.')) return True
def _setup_synced_containers( self, source_overrides=None, dest_overrides=None): # these defaults are used to create both source and dest containers # unless overridden by source_overrides and/or dest_overrides default_params = {'url': self.url, 'token': self.token, 'account': self.account, 'sync_key': 'secret'} # setup dest container dest = dict(default_params) dest['name'] = 'dest-container-%s' % uuid.uuid4() dest.update(dest_overrides or {}) dest_headers = {} dest_policy = None if len(ENABLED_POLICIES) > 1: dest_policy = random.choice(ENABLED_POLICIES) dest_headers['X-Storage-Policy'] = dest_policy.name if dest['sync_key'] is not None: dest_headers['X-Container-Sync-Key'] = dest['sync_key'] client.put_container(dest['url'], dest['token'], dest['name'], headers=dest_headers) # setup source container source = dict(default_params) source['name'] = 'source-container-%s' % uuid.uuid4() source.update(source_overrides or {}) source_headers = {} sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, dest['account'], dest['name']) source_headers['X-Container-Sync-To'] = sync_to if source['sync_key'] is not None: source_headers['X-Container-Sync-Key'] = source['sync_key'] if dest_policy: source_policy = random.choice([p for p in ENABLED_POLICIES if p is not dest_policy]) source_headers['X-Storage-Policy'] = source_policy.name client.put_container(source['url'], source['token'], source['name'], headers=source_headers) return source['name'], dest['name']
def update_container(request, project_id, container_name): if request.method == 'PUT': sp = JSONParser().parse(request) token = get_token_connection(request) url = settings.SWIFT_URL + "/AUTH_" + project_id headers, obj_list = swift_client.get_container(url, token, container_name) headers['X-Storage-Policy'] = sp path_container = settings.SWIFT_CFG_TMP_DIR + "/" + container_name os.mkdir(path_container) for obj in obj_list: fle = open(path_container + "/" + obj["name"], "w") obj_headers, obj_body = swift_client.get_object(url, token, container_name, obj["name"]) fle.write(obj_body) fle.close() obj["headers"] = obj_headers swift_client.delete_object(url, token, container_name, obj["name"]) swift_client.delete_container(url, token, container_name) swift_client.put_container(url, token, container_name, headers) for obj in obj_list: obj_path = os.path.join(path_container, obj["name"]) obj_body = open(obj_path, "r") content_length = os.stat(obj_path).st_size swift_response = {} swift_client.put_object(url, token, container_name, obj["name"], obj_body, content_length, None, None, obj['content_type'], obj["headers"], None, None, None, swift_response) obj_body.close() os.remove(obj_path) os.rmdir(path_container) return JSONResponse("Container Policy updated correctly", status=status.HTTP_201_CREATED) return JSONResponse('Method ' + str(request.method) + ' not allowed.', status=status.HTTP_405_METHOD_NOT_ALLOWED)
def main(): init_path_dependant_params() print 'Getting token' os_options = {'tenant_name': ACCOUNT} url, token = c.get_auth("http://" + AUTH_IP + ":" + AUTH_PORT \ + "/v2.0", ACCOUNT + ":" + USER_NAME, PASSWORD, os_options = os_options, auth_version="2.0") print 'Creating myobjects container' c.put_container(url, token, 'myobjects') print 'Invoking test storlet' testTestStorlet(url, token) os.system('python execdep_test.py') os.system('python identity_storlet_test.py') os.system('python half_storlet_test.py') os.system('python metadata_storlet_test.py') os.system('python SLO_test.py')
def cache_set(self, context, plan_name, key, contents): """Stores an object Allows the storage of jsonable objects except for None Storing None equals to a cache delete. """ swift_client = self.get_object_client(context) if contents is None: self.cache_delete(context, plan_name, key) return try: swift_client.head_container(constants.TRIPLEO_CACHE_CONTAINER) except swiftexceptions.ClientException: swift_client.put_container(constants.TRIPLEO_CACHE_CONTAINER) swift_client.put_object(constants.TRIPLEO_CACHE_CONTAINER, self._cache_key(plan_name, key), zlib.compress(json.dumps(contents).encode()))
def create_container(request): auth_token = request.session['auth_token'] storage_url = request.session['storage_url'] if request.method == 'POST': form = CreateContainerForm(request.POST) if form.is_valid(): container = form.cleaned_data['container'] try: http_conn = (urlparse(storage_url), client.HTTPConnection(storage_url, insecure=settings.SWIFT_SSL_INSECURE)) client.put_container(storage_url, auth_token, container, http_conn=http_conn) messages.add_message(request, messages.INFO, "Container created.") except client.ClientException: messages.add_message(request, messages.ERROR, "Access denied.") return redirect(containers) else: form = CreateContainerForm() return render(request, 'create_container.html', {'form': form})
def test_async_updates_after_PUT_and_POST(self): # verify correct update values when PUT update and POST updates are # missed but then async updates are sent cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1') client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) # PUT and POST to object while one container server is stopped so that # we force async updates to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' client.put_object(self.url, self.token, 'c1', 'o1', contents=content) meta = client.head_object(self.url, self.token, 'c1', 'o1') # use internal client for POST so we can force fast-post mode int_client = self.make_internal_client(object_post_as_copy=False) int_client.set_object_metadata(self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'}) self.assertEqual('Tomato', int_client.get_object_metadata( self.account, 'c1', 'o1')['x-object-meta-fruit']) # sanity # re-start the container server and assert that it does not yet know # about the object start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) self.assertFalse( direct_client.direct_get_container(cnodes[0], cpart, self.account, 'c1')[1]) # Run the object-updaters to send the async pendings Manager(['object-updater']).once() # check container listing metadata is still correct obj = direct_client.direct_get_container(cnodes[0], cpart, self.account, 'c1')[1][0] self.assertEqual(meta['etag'], obj['hash']) self.assertEqual(len(content), obj['bytes'])
def test_one_node_fails(self): # Create container1 container1 = 'container-%s' % uuid4() cpart, cnodes = self.container_ring.get_nodes(self.account, container1) client.put_container(self.url, self.token, container1) # Kill container1 servers excepting two of the primaries kill_nonprimary_server(cnodes, self.ipport2server, self.pids) kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server, self.pids) # Delete container1 client.delete_container(self.url, self.token, container1) # Restart other container1 primary server start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server, self.pids) # Create container1/object1 (allowed because at least server thinks the # container exists) client.put_object(self.url, self.token, container1, 'object1', '123') # Get to a final state self.get_to_final_state() # Assert all container1 servers indicate container1 is alive and # well with object1 for cnode in cnodes: self.assertEqual([ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container1)[1] ], ['object1']) # Assert account level also indicates container1 is alive and # well with object1 headers, containers = client.get_account(self.url, self.token) self.assertEqual(headers['x-account-container-count'], '1') self.assertEqual(headers['x-account-object-count'], '1') self.assertEqual(headers['x-account-bytes-used'], '3')
def test_main(self): # create EC container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) # PUT object contents = Body() headers = {'x-object-meta-foo': 'meta-foo'} headers_post = {'x-object-meta-bar': 'meta-bar'} etag = client.put_object(self.url, self.token, self.container_name, self.object_name, contents=contents, headers=headers) client.post_object(self.url, self.token, self.container_name, self.object_name, headers=headers_post) del headers_post['X-Auth-Token'] # WTF, where did this come from? # built up a list of node lists to kill data from, # first try a single node # then adjacent nodes and then nodes >1 node apart opart, onodes = self.object_ring.get_nodes( self.account, self.container_name, self.object_name) single_node = [random.choice(onodes)] adj_nodes = [onodes[0], onodes[-1]] far_nodes = [onodes[0], onodes[-2]] test_list = [single_node, adj_nodes, far_nodes] for node_list in test_list: for onode in node_list: try: self._check_node(onode, opart, etag, headers_post) except AssertionError as e: self.fail( str(e) + '\n... for node %r of scenario %r' % ( self._format_node(onode), [self._format_node(n) for n in node_list]))
def test_enable_syncing_while_versioned(self): source_container, dest_container = self._setup_synced_containers() container_name = 'versioned-%s' % uuid.uuid4() version_hdr = {'X-Versions-Enabled': 'true'} client.put_container(self.url, self.token, container_name, headers=version_hdr) # fails to configure as a container-sync source sync_headers = {'X-Container-Sync-Key': 'secret'} sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account, dest_container) sync_headers['X-Container-Sync-To'] = sync_to with self.assertRaises(ClientException) as cm: client.post_container(self.url, self.token, container_name, headers=sync_headers) self.assertEqual(400, cm.exception.http_status) # sanity check # but works if it's just a container-sync destination sync_headers = {'X-Container-Sync-Key': 'secret'} client.post_container(self.url, self.token, container_name, headers=sync_headers) headers = client.head_container(self.url, self.token, container_name) self.assertEqual('True', headers.get('x-versions-enabled')) self.assertEqual('secret', headers.get('x-container-sync-key')) # update source header to sync to versioned container source_headers = {'X-Container-Sync-Key': 'secret'} sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account, container_name) source_headers['X-Container-Sync-To'] = sync_to client.post_container(self.url, self.token, source_container, headers=source_headers) self._test_syncing(source_container, container_name)
def test_server_error(self): body = 'c' * 60 c.http_connection = self.fake_http_connection(500, body=body) self.assertRaises( c.ClientException, c.put_container, 'http://www.test.com', 'asdf', 'asdf', ) try: value = c.put_container('http://www.test.com', 'asdf', 'asdf') except c.ClientException as e: self.assertEquals(e.http_response_content, body)
def copyMoveSingle(self, destPath, isMove): src = '/'.join(self.path.split('/')[2:]) dst = '/'.join(destPath.split('/')[2:]) src_cont = self.path.split('/')[1] dst_cont = destPath.split('/')[1] headers = {'X-Copy-From': self.path} try: client.head_container(self.storage_url, self.auth_token, dst_cont, headers=headers, http_conn=self.http_connection) except client.ClientException as ex: client.put_container(self.storage_url, self.auth_token, dst_cont, headers=headers, http_conn=self.http_connection) try: client.put_object(self.storage_url, self.auth_token, dst_cont, dst, headers=headers, http_conn=self.http_connection) if isMove: client.delete_object(self.storage_url, self.auth_token, src_cont, src, http_conn=self.http_connection) except client.ClientException: pass
def setUp(self): super(TestReconstructorRebuild, self).setUp() self.container_name = self._make_name('container-') self.object_name = self._make_name('object-') # sanity self.assertEqual(self.policy.policy_type, EC_POLICY) self.reconstructor = Manager(["object-reconstructor"]) # create EC container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) # PUT object and POST some metadata self.proxy_put() self.headers_post = { self._make_name('x-object-meta-').decode('utf8'): self._make_name('meta-bar-').decode('utf8') } client.post_object(self.url, self.token, self.container_name, self.object_name, headers=dict(self.headers_post)) self.opart, self.onodes = self.object_ring.get_nodes( self.account, self.container_name, self.object_name) # stash frag etags and metadata for later comparison self.frag_headers, self.frag_etags = self._assert_all_nodes_have_frag() for node_index, hdrs in self.frag_headers.items(): # sanity check self.assertIn( 'X-Backend-Durable-Timestamp', hdrs, 'Missing durable timestamp in %r' % self.frag_headers)
def test_update_during_PUT(self): # verify that update sent during a PUT has override values int_client = self.make_internal_client() headers = { 'Content-Type': 'text/plain', 'X-Backend-Container-Update-Override-Etag': 'override-etag', 'X-Backend-Container-Update-Override-Content-Type': 'override-type', 'X-Backend-Container-Update-Override-Size': '1999' } client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': self.policy.name}) int_client.upload_object(StringIO(u'stuff'), self.account, 'c1', 'o1', headers) # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() meta = int_client.get_object_metadata(self.account, 'c1', 'o1') self.assertEqual('text/plain', meta['content-type']) self.assertEqual('c13d88cb4cb02003daedb8a84e5d272a', meta['etag']) self.assertEqual('5', meta['content-length']) obj_iter = int_client.iter_objects(self.account, 'c1') for obj in obj_iter: if obj['name'] == 'o1': self.assertEqual('override-etag', obj['hash']) self.assertEqual('override-type', obj['content_type']) self.assertEqual(1999, obj['bytes']) break else: self.fail('Failed to find object o1 in listing')
def put_storlet_containers(url, token): response = dict() c.put_container(url, token, 'storlet', None, None, response) status = response.get('status') assert (status >= 200 or status < 300) response = dict() c.put_container(url, token, 'dependency', None, None, response) status = response.get('status') assert (status >= 200 or status < 300) response = dict() c.put_container(url, token, 'storletlog', None, None, response) status = response.get('status') assert (status >= 200 or status < 300)