def get_object_metadata(obj_path): """ Return metadata of object. """ metadata = {} if os.path.exists(obj_path): if not os.path.isdir(obj_path): metadata = { X_TIMESTAMP: normalize_timestamp(os.path.getctime(obj_path)), X_CONTENT_TYPE: FILE_TYPE, X_ETAG: get_etag(obj_path), X_CONTENT_LENGTH: os.path.getsize(obj_path), X_TYPE: OBJECT, X_OBJECT_TYPE: FILE, } else: metadata = { X_TIMESTAMP: normalize_timestamp(os.path.getctime(obj_path)), X_CONTENT_TYPE: DIR_TYPE, X_ETAG: get_etag(obj_path), X_CONTENT_LENGTH: 0, X_TYPE: OBJECT, X_OBJECT_TYPE: DIR, } return metadata
def test_delete_db(self): def init_stub(conn, put_timestamp): conn.execute('CREATE TABLE test (one TEXT)') conn.execute('CREATE TABLE test_stat (id TEXT)') conn.execute('INSERT INTO test_stat (id) VALUES (?)', (str(uuid4),)) conn.execute('INSERT INTO test (one) VALUES ("1")') conn.commit() stub_called = [False] def delete_stub(*a, **kw): stub_called[0] = True broker = DatabaseBroker(':memory:') broker.db_type = 'test' broker._initialize = init_stub # Initializes a good broker for us broker.initialize(normalize_timestamp('1')) self.assert_(broker.conn is not None) broker._delete_db = delete_stub stub_called[0] = False broker.delete_db('2') self.assert_(stub_called[0]) broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) broker.db_type = 'test' broker._initialize = init_stub broker.initialize(normalize_timestamp('1')) broker._delete_db = delete_stub stub_called[0] = False broker.delete_db('2') self.assert_(stub_called[0]) # ensure that metadata was cleared m2 = broker.metadata self.assert_(not any(v[0] for v in m2.itervalues())) self.assert_(all(v[1] == normalize_timestamp('2') for v in m2.itervalues()))
def test_quarantine_same_file(self): df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time()) + '.data'), 'wb') setxattr(f.fileno(), diskfile.METADATA_KEY, pickle.dumps({}, diskfile.PICKLE_PROTOCOL)) df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o', FakeLogger()) new_dir = df.quarantine() quar_dir = os.path.join(self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname( df.data_file))) self.assert_(os.path.isdir(quar_dir)) self.assertEquals(quar_dir, new_dir) # have to remake the datadir and file mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time()) + '.data'), 'wb') setxattr(f.fileno(), diskfile.METADATA_KEY, pickle.dumps({}, diskfile.PICKLE_PROTOCOL)) df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o', FakeLogger(), keep_data_fp=True) double_uuid_path = df.quarantine() self.assert_(os.path.isdir(double_uuid_path)) self.assert_('-' in os.path.basename(double_uuid_path))
def test_POST_update_meta(self): """ Test swift.object_server.ObjectController.POST """ timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, 'Content-Type': 'application/x-test', 'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': timestamp, 'X-Object-Meta-3': 'Three', 'X-Object-Meta-4': 'Four', 'Content-Type': 'application/x-test'}) resp = self.object_controller.POST(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = self.object_controller.GET(req) self.assert_("X-Object-Meta-1" not in resp.headers and \ "X-Object-Meta-3" in resp.headers) self.assertEquals(resp.headers['Content-Type'], 'application/x-test')
def test_PUT_overwrite(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '6', 'Content-Type': 'application/octet-stream'}) req.body = 'VERIFY' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, 'Content-Type': 'text/plain', 'Content-Encoding': 'gzip'}) req.body = 'VERIFY TWO' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.data') self.assert_(os.path.isfile(objfile)) self.assertEquals(open(objfile).read(), 'VERIFY TWO') self.assertEquals(pickle.loads(getxattr(objfile, object_server.METADATA_KEY)), {'X-Timestamp': timestamp, 'Content-Length': '10', 'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039', 'Content-Type': 'text/plain', 'name': '/a/c/o', 'Content-Encoding': 'gzip'})
def _create_object_metadata(self, file_path): if self._etag is None: self._etag = md5().hexdigest() if self._is_dir \ else get_etag(file_path) if self._file_has_changed or (X_TIMESTAMP not in self._metadata): timestamp = normalize_timestamp(self._stat.st_mtime) else: timestamp = self._metadata[X_TIMESTAMP] metadata = { X_TYPE: OBJECT, X_TIMESTAMP: timestamp, X_CONTENT_TYPE: DIR_TYPE if self._is_dir else FILE_TYPE, X_OBJECT_TYPE: DIR_NON_OBJECT if self._is_dir else FILE, X_CONTENT_LENGTH: 0 if self._is_dir else self._stat.st_size, X_ETAG: self._etag} # Add X_MTIME key if object is a file if not self._is_dir: metadata[X_MTIME] = normalize_timestamp(self._stat.hpss_st_mtime) meta_new = self._metadata.copy() meta_new.update(metadata) if self._metadata != meta_new: write_metadata(file_path, meta_new) # Avoid additional read_metadata() later self._metadata = meta_new
def test_unicode(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) containers_dir = os.path.join(self.sda1, container_server.DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='\xce\xa9') cb.initialize(normalize_timestamp(1)) cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') def accept(sock, addr): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n') out.flush() inc.read() except BaseException, err: import traceback traceback.print_exc() return err return None
def test_hash_cleanup_listdir(self): file_list = [] def mock_listdir(path): return list(file_list) def mock_unlink(path): file_list.remove(os.path.basename(path)) with unit_mock({"os.listdir": mock_listdir, "os.unlink": mock_unlink}): # purge .data if there's a newer .ts file1 = normalize_timestamp(time()) + ".data" file2 = normalize_timestamp(time() + 1) + ".ts" file_list = [file1, file2] self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file2]) # purge .ts if there's a newer .data file1 = normalize_timestamp(time()) + ".ts" file2 = normalize_timestamp(time() + 1) + ".data" file_list = [file1, file2] self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file2]) # keep .meta and .data if meta newer than data file1 = normalize_timestamp(time()) + ".ts" file2 = normalize_timestamp(time() + 1) + ".data" file3 = normalize_timestamp(time() + 2) + ".meta" file_list = [file1, file2, file3] self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file3, file2]) # keep only latest of multiple .ts files file1 = normalize_timestamp(time()) + ".ts" file2 = normalize_timestamp(time() + 1) + ".ts" file3 = normalize_timestamp(time() + 2) + ".ts" file_list = [file1, file2, file3] self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file3])
def test_DELETE_now_empty(self): req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "PUT", "HTTP_X_TIMESTAMP": "0"}) self.controller.PUT(req) req = Request.blank( "/sda1/p/a/c1", environ={"REQUEST_METHOD": "PUT"}, headers={ "X-Put-Timestamp": "1", "X-Delete-Timestamp": "0", "X-Object-Count": "0", "X-Bytes-Used": "0", "X-Timestamp": normalize_timestamp(0), }, ) self.controller.PUT(req) req = Request.blank( "/sda1/p/a/c1", environ={"REQUEST_METHOD": "PUT"}, headers={ "X-Put-Timestamp": "1", "X-Delete-Timestamp": "2", "X-Object-Count": "0", "X-Bytes-Used": "0", "X-Timestamp": normalize_timestamp(0), }, ) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 204) req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "DELETE", "HTTP_X_TIMESTAMP": "1"}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204)
def test_initialize(self): self.assertRaises(AttributeError, DatabaseBroker(':memory:').initialize, normalize_timestamp('1')) stub_dict = {} def stub(*args, **kwargs): for key in stub_dict.keys(): del stub_dict[key] stub_dict['args'] = args for key, value in kwargs.items(): stub_dict[key] = value broker = DatabaseBroker(':memory:') broker._initialize = stub broker.initialize(normalize_timestamp('1')) self.assert_(hasattr(stub_dict['args'][0], 'execute')) self.assertEquals(stub_dict['args'][1], '0000000001.00000') with broker.get() as conn: conn.execute('SELECT * FROM outgoing_sync') conn.execute('SELECT * FROM incoming_sync') broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) broker._initialize = stub broker.initialize(normalize_timestamp('1')) self.assert_(hasattr(stub_dict['args'][0], 'execute')) self.assertEquals(stub_dict['args'][1], '0000000001.00000') with broker.get() as conn: conn.execute('SELECT * FROM outgoing_sync') conn.execute('SELECT * FROM incoming_sync') broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) broker._initialize = stub self.assertRaises(DatabaseAlreadyExists, broker.initialize, normalize_timestamp('1'))
def test_reap_delay(self): time_value = [100] def _time(): return time_value[0] time_orig = reaper.time try: reaper.time = _time r = reaper.AccountReaper({'delay_reaping': '10'}) b = FakeBroker() b.info['delete_timestamp'] = normalize_timestamp(110) self.assertFalse(r.reap_account(b, 0, None)) b.info['delete_timestamp'] = normalize_timestamp(100) self.assertFalse(r.reap_account(b, 0, None)) b.info['delete_timestamp'] = normalize_timestamp(90) self.assertFalse(r.reap_account(b, 0, None)) # KeyError raised immediately as reap_account tries to get the # account's name to do the reaping. b.info['delete_timestamp'] = normalize_timestamp(89) self.assertRaises(KeyError, r.reap_account, b, 0, None) b.info['delete_timestamp'] = normalize_timestamp(1) self.assertRaises(KeyError, r.reap_account, b, 0, None) finally: reaper.time = time_orig
def get_info(self): name = os.path.basename(self.datadir) st = os.stat(self.datadir) if self._type == 2: cont_cnt_str = xattr.getxattr(self.datadir, CONTCNT_KEY) try: container_count = int(cont_cnt_str) except ValueError: cont_cnt_str = "0" # XXX object_count, bytes_used return {'account': name, 'created_at': normalize_timestamp(st.st_ctime), 'put_timestamp': normalize_timestamp(st.st_mtime), 'delete_timestamp': '0', 'container_count': cont_cnt_str, 'object_count': '0', 'bytes_used': '0', 'hash': '-', 'id': ''} else: # XXX container info has something different in it, what is it? return {'container': name, 'created_at': normalize_timestamp(st.st_ctime), 'put_timestamp': normalize_timestamp(st.st_mtime), 'delete_timestamp': '0', 'object_count': '0', 'bytes_used': '0', 'hash': '-', 'id': ''}
def test_collect_object(self): t = 42 t = normalize_timestamp(t) data = 'ContentHere' etag = md5() etag.update(data) testdir = self._create_test_file( data, timestamp = t, account='TEST_Acc', container='TEST_Con', obj='TEST_Obj', metadata={ 'X-Timestamp':t, 'ETag' : etag.hexdigest(), 'Content-Length' : str(len(data)), 'delete_timestamp' : normalize_timestamp(0), 'Content-Type' : "text/plain", 'Content-Encoding' : 'gzip', 'Content-Disposition' : 'action', 'Content-Langauge':'en' })._datadir location = diskfile.AuditLocation(testdir, 'sda1', '0') metaDict = self.crawler.collect_object(location) self.assertEquals(metaDict['name'],'/TEST_Acc/TEST_Con/TEST_Obj') self.assertEquals(metaDict['X-Timestamp'],t) self.assertEquals(metaDict['ETag'],etag.hexdigest()) self.assertEquals(metaDict['Content-Length'],str(len(data))) self.assertEquals(metaDict['Content-Type'],'text/plain') self.assertEquals(metaDict['Content-Encoding'],'gzip') self.assertEquals(metaDict['Content-Disposition'],'action') self.assertEquals(metaDict['Content-Langauge'],'en')
def test_max_upload_time(self): class SlowBody(): def __init__(self): self.sent = 0 def read(self, size=-1): if self.sent < 4: sleep(0.1) self.sent += 1 return ' ' return '' req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}) resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) self.object_controller.max_upload_time = 0.1 req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}) resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 408)
def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container = split_and_validate_path(req, 3, 4) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) if container: # put account container pending_timeout = None container_policy_index = req.headers.get(POLICY_INDEX, 0) if 'x-trans-id' in req.headers: pending_timeout = 3 broker = self._get_account_broker(drive, part, account, pending_timeout=pending_timeout) if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(normalize_timestamp( req.headers.get('x-timestamp') or time.time())) except DatabaseAlreadyExists: pass if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used'], container_policy_index) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account broker = self._get_account_broker(drive, part, account) timestamp = normalize_timestamp(req.headers['x-timestamp']) if not os.path.exists(broker.db_file): try: broker.initialize(timestamp) created = True except DatabaseAlreadyExists: created = False elif broker.is_status_deleted(): return self._deleted_response(broker, req, HTTPForbidden, body='Recently deleted') else: created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp)) for key, value in req.headers.iteritems() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata) if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req)
def test_unicode(self): cu = container_updater.ContainerUpdater( { "devices": self.devices_dir, "mount_check": "false", "swift_dir": self.testdir, "interval": "1", "concurrency": "1", "node_timeout": "15", } ) containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, "subdir") os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, "hash.db"), account="a", container="\xce\xa9") cb.initialize(normalize_timestamp(1), 0) cb.put_object("\xce\xa9", normalize_timestamp(2), 3, "text/plain", "68b329da9893e34099c7d8ad5cb9c940") def accept(sock, addr): try: with Timeout(3): inc = sock.makefile("rb") out = sock.makefile("wb") out.write("HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n") out.flush() inc.read() except BaseException as err: import traceback traceback.print_exc() return err return None bindsock = listen(("127.0.0.1", 0)) def spawn_accepts(): events = [] for _junk in range(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) return events spawned = spawn(spawn_accepts) for dev in cu.get_account_ring().devs: if dev is not None: dev["port"] = bindsock.getsockname()[1] cu.run_once() for event in spawned.wait(): err = event.wait() if err: raise err info = cb.get_info() self.assertEqual(info["object_count"], 1) self.assertEqual(info["bytes_used"], 3) self.assertEqual(info["reported_object_count"], 1) self.assertEqual(info["reported_bytes_used"], 3)
def test_PUT_not_found(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-PUT-Timestamp': normalize_timestamp(1), 'X-DELETE-Timestamp': normalize_timestamp(0), 'X-Object-Count': '1', 'X-Bytes-Used': '1', 'X-Timestamp': normalize_timestamp(0)}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 404)
def test_unicode(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='\xce\xa9') cb.initialize(normalize_timestamp(1), 0) cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') def accept(sock, addr): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n') out.flush() inc.read() except BaseException as err: import traceback traceback.print_exc() return err return None bindsock = listen(('127.0.0.1', 0)) def spawn_accepts(): events = [] for _junk in range(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) return events spawned = spawn(spawn_accepts) for dev in cu.get_account_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] cu.run_once() for event in spawned.wait(): err = event.wait() if err: raise err info = cb.get_info() self.assertEquals(info['object_count'], 1) self.assertEquals(info['bytes_used'], 3) self.assertEquals(info['reported_object_count'], 1) self.assertEquals(info['reported_bytes_used'], 3)
def test_DELETE(self): """ Test swift.object_server.ObjectController.DELETE """ req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 400) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 400) # self.assertRaises(KeyError, self.object_controller.DELETE, req) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 404) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ 'X-Timestamp': timestamp, 'Content-Type': 'application/octet-stream', 'Content-Length': '4', }) req.body = 'test' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) timestamp = normalize_timestamp(float(timestamp) - 1) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 204) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.ts') self.assert_(os.path.isfile(objfile)) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 204) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.ts') self.assert_(os.path.isfile(objfile))
def test_empty(self): # Test AccountBroker.empty broker = AccountBroker(':memory:', account='a') broker.initialize(normalize_timestamp('1')) self.assert_(broker.empty()) broker.put_container('o', normalize_timestamp(time()), 0, 0, 0) self.assert_(not broker.empty()) sleep(.00001) broker.put_container('o', 0, normalize_timestamp(time()), 0, 0) self.assert_(broker.empty())
def test_POST_HEAD_metadata(self): req = Request.blank( "/sda1/p/a", environ={"REQUEST_METHOD": "PUT"}, headers={"X-Timestamp": normalize_timestamp(1)} ) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # Set metadata header req = Request.blank( "/sda1/p/a", environ={"REQUEST_METHOD": "POST"}, headers={"X-Timestamp": normalize_timestamp(1), "X-Account-Meta-Test": "Value"}, ) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "HEAD"}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get("x-account-meta-test"), "Value") # Update metadata header req = Request.blank( "/sda1/p/a", environ={"REQUEST_METHOD": "POST"}, headers={"X-Timestamp": normalize_timestamp(3), "X-Account-Meta-Test": "New Value"}, ) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "HEAD"}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get("x-account-meta-test"), "New Value") # Send old update to metadata header req = Request.blank( "/sda1/p/a", environ={"REQUEST_METHOD": "POST"}, headers={"X-Timestamp": normalize_timestamp(2), "X-Account-Meta-Test": "Old Value"}, ) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "HEAD"}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get("x-account-meta-test"), "New Value") # Remove metadata header (by setting it to empty) req = Request.blank( "/sda1/p/a", environ={"REQUEST_METHOD": "POST"}, headers={"X-Timestamp": normalize_timestamp(4), "X-Account-Meta-Test": ""}, ) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank("/sda1/p/a", environ={"REQUEST_METHOD": "HEAD"}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assert_("x-account-meta-test" not in resp.headers)
def get_container_metadata(cont_path, memcache=None): objects = [] object_count = 0 bytes_used = 0 objects, object_count, bytes_used = get_container_details(cont_path, memcache) metadata = {X_TYPE: CONTAINER, X_TIMESTAMP: normalize_timestamp(os_path.getctime(cont_path)), X_PUT_TIMESTAMP: normalize_timestamp(os_path.getmtime(cont_path)), X_OBJECTS_COUNT: object_count, X_BYTES_USED: bytes_used} return _add_timestamp(metadata)
def get_account_metadata(acc_path, memcache=None): containers = [] container_count = 0 containers, container_count = get_account_details(acc_path, memcache) metadata = {X_TYPE: ACCOUNT, X_TIMESTAMP: normalize_timestamp(os.path.getctime(acc_path)), X_PUT_TIMESTAMP: normalize_timestamp(os.path.getmtime(acc_path)), X_OBJECTS_COUNT: 0, X_BYTES_USED: 0, X_CONTAINER_COUNT: container_count} return metadata
def test_run_once(self): cu = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) cu.run_once() async_dir = os.path.join(self.sda1, object_server.ASYNCDIR) os.mkdir(async_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) odd_dir = os.path.join(async_dir, 'not really supposed to be here') os.mkdir(odd_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) self.assert_(not os.path.exists(odd_dir)) ohash = hash_path('a', 'c', 'o') odir = os.path.join(async_dir, ohash[-3:]) mkdirs(odir) op_path = os.path.join(odir, '%s-%s' % (ohash, normalize_timestamp(time()))) pickle.dump({'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o', 'headers': {'X-Container-Timestamp': normalize_timestamp(0)}}, open(op_path, 'wb')) cu.run_once() self.assert_(os.path.exists(op_path)) bindsock = listen(('127.0.0.1', 0)) def accepter(sock, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-container-timestamp' in headers) except BaseException, err: return err return None
def test_PUT_GET_metadata(self): # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test': 'Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') # Set another metadata header, ensuring old one doesn't disappear req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test2': 'Value2'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(3), 'X-Container-Meta-Test': 'New Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(2), 'X-Container-Meta-Test': 'Old Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(4), 'X-Container-Meta-Test': ''}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assert_('x-container-meta-test' not in resp.headers)
def test_reclaim(self): broker = AccountBroker(':memory:', account='test_account') broker.initialize(normalize_timestamp('1')) broker.put_container('c', normalize_timestamp(time()), 0, 0, 0) with broker.get() as conn: self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 0").fetchone()[0], 1) self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 1").fetchone()[0], 0) broker.reclaim(normalize_timestamp(time() - 999), time()) with broker.get() as conn: self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 0").fetchone()[0], 1) self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 1").fetchone()[0], 0) sleep(.00001) broker.put_container('c', 0, normalize_timestamp(time()), 0, 0) with broker.get() as conn: self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 0").fetchone()[0], 0) self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 1").fetchone()[0], 1) broker.reclaim(normalize_timestamp(time() - 999), time()) with broker.get() as conn: self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 0").fetchone()[0], 0) self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 1").fetchone()[0], 1) sleep(.00001) broker.reclaim(normalize_timestamp(time()), time()) with broker.get() as conn: self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 0").fetchone()[0], 0) self.assertEqual(conn.execute( "SELECT count(*) FROM container " "WHERE deleted = 1").fetchone()[0], 0) # Test reclaim after deletion. Create 3 test containers broker.put_container('x', 0, 0, 0, 0) broker.put_container('y', 0, 0, 0, 0) broker.put_container('z', 0, 0, 0, 0) broker.reclaim(normalize_timestamp(time()), time()) # self.assertEqual(len(res), 2) # self.assert_(isinstance(res, tuple)) # containers, account_name = res # self.assert_(containers is None) # self.assert_(account_name is None) # Now delete the account broker.delete_db(normalize_timestamp(time())) broker.reclaim(normalize_timestamp(time()), time())
def test_unicode(self): cu = self._get_container_updater() containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='\xce\xa9') cb.initialize(normalize_timestamp(1), 0) obj_name = u'\N{GREEK CAPITAL LETTER OMEGA}' if six.PY2: obj_name = obj_name.encode('utf-8') cb.put_object(obj_name, normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') def accept(sock, addr): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write(b'HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n') out.flush() inc.read() except BaseException as err: import traceback traceback.print_exc() return err return None bindsock = listen_zero() def spawn_accepts(): events = [] for _junk in range(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) return events spawned = spawn(spawn_accepts) for dev in cu.get_account_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] cu.run_once() for event in spawned.wait(): err = event.wait() if err: raise err info = cb.get_info() self.assertEqual(info['object_count'], 1) self.assertEqual(info['bytes_used'], 3) self.assertEqual(info['reported_object_count'], 1) self.assertEqual(info['reported_bytes_used'], 3)
def test_PUT_not_found(self): req = Request.blank( "/sda1/p/a/c", environ={"REQUEST_METHOD": "PUT"}, headers={ "X-PUT-Timestamp": normalize_timestamp(1), "X-DELETE-Timestamp": normalize_timestamp(0), "X-Object-Count": "1", "X-Bytes-Used": "1", "X-Timestamp": normalize_timestamp(0), }, ) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 404)
def test_report_up_to_date(self): repl = replicator.ContainerReplicator({}) info = {'put_timestamp': normalize_timestamp(1), 'delete_timestamp': normalize_timestamp(0), 'object_count': 0, 'bytes_used': 0, 'reported_put_timestamp': normalize_timestamp(1), 'reported_delete_timestamp': normalize_timestamp(0), 'reported_object_count': 0, 'reported_bytes_used': 0} self.assertTrue(repl.report_up_to_date(info)) info['delete_timestamp'] = normalize_timestamp(2) self.assertFalse(repl.report_up_to_date(info)) info['reported_delete_timestamp'] = normalize_timestamp(2) self.assertTrue(repl.report_up_to_date(info)) info['object_count'] = 1 self.assertFalse(repl.report_up_to_date(info)) info['reported_object_count'] = 1 self.assertTrue(repl.report_up_to_date(info)) info['bytes_used'] = 1 self.assertFalse(repl.report_up_to_date(info)) info['reported_bytes_used'] = 1 self.assertTrue(repl.report_up_to_date(info)) info['put_timestamp'] = normalize_timestamp(3) self.assertFalse(repl.report_up_to_date(info)) info['reported_put_timestamp'] = normalize_timestamp(3) self.assertTrue(repl.report_up_to_date(info))
def test_PUT_after_DELETE(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1)}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(1)}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(2)}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 403) self.assertEquals(resp.body, 'Recently deleted')
def test_generic_exception_handling(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger) timestamp = str(normalize_timestamp(time.time())) pre_errors = auditor_worker.errors data = '0' * 1024 etag = md5() with self.disk_file.create() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) with mock.patch('swift.obj.diskfile.DiskFile', lambda *_: 1 / 0): auditor_worker.audit_all_objects() self.assertEquals(auditor_worker.errors, pre_errors + 1)
def test_run_once_with_get_info_timeout(self, mock_dump_recon): cu = self._get_container_updater() containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) db_file = os.path.join(subdir, 'hash.db') cb = ContainerBroker(db_file, account='a', container='c') cb.initialize(normalize_timestamp(1), 0) timeout = exceptions.LockTimeout(10, db_file) timeout.cancel() with mock.patch('swift.container.updater.ContainerBroker.get_info', side_effect=timeout): cu.run_once() log_lines = self.logger.get_lines_for_level('info') self.assertIn( 'Failed to get container info (Lock timeout: ' '10 seconds: %s); skipping.' % db_file, log_lines)
def test_run_once_recover_from_failure(self): replicator = object_replicator.ObjectReplicator( dict(swift_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect object_replicator.http_connect = mock_http_connect(200) # Write some files into '1' and run replicate- they should be moved # to the other partitoins and then node should get deleted. cur_part = '1' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o') mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) self.assertTrue( os.access(os.path.join(self.objects, '1', data_dir, ohash), os.F_OK)) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) for i, result in [('0', True), ('1', False), ('2', True), ('3', True)]: self.assertEquals( os.access( os.path.join(self.objects, i, object_replicator.HASH_FILE), os.F_OK), result) object_replicator.http_connect = was_connector
def DELETE(self, req): """Handle HTTP DELETE request.""" drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): requested_policy_index = (self.get_and_validate_policy_index(req) or POLICIES.default.idx) try: broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time()), requested_policy_index) except DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object broker.delete_object(obj, req.headers.get('x-timestamp')) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = float(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req.headers['X-Timestamp']) if not broker.is_deleted(): return HTTPConflict(request=req) resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound()
def test_get_hashes_unmodified(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) with open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.ts'), 'wb') as f: f.write('1234567890') part = os.path.join(self.objects, '0') hashed, hashes = object_replicator.get_hashes(part) i = [0] def getmtime(filename): i[0] += 1 return 1 with mock({'os.path.getmtime': getmtime}): hashed, hashes = object_replicator.get_hashes(part, recalculate=['a83']) self.assertEquals(i[0], 2)
def test_get_hashes_modified(self): df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) with open( os.path.join(df.datadir, normalize_timestamp(time()) + '.ts'), 'wb') as f: f.write('1234567890') part = os.path.join(self.objects, '0') hashed, hashes = diskfile.get_hashes(part) i = [0] def _getmtime(filename): if i[0] < 3: i[0] += 1 return i[0] with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}): hashed, hashes = diskfile.get_hashes(part, recalculate=['a83']) self.assertEquals(i[0], 3)
def put(self, metadata): """ Finalize writing the file on disk. For this implementation, this method is responsible for renaming the temporary file to the final name and directory location. This method should be called after the final call to :func:`swift.obj.diskfile.DiskFileWriter.write`. :param metadata: dictionary of metadata to be associated with the object """ if not self._tmppath: raise ValueError("tmppath is unusable.") timestamp = normalize_timestamp(metadata['X-Timestamp']) metadata['name'] = self._name target_path = join(self._datadir, timestamp + self._extension) self._threadpool.force_run_in_thread(self._finalize_put, metadata, target_path)
def test_unlinkold_file(self): td = tempfile.mkdtemp() the_path = os.path.join(td, "vol0", "bar") the_file = os.path.join(the_path, "z") try: os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar", "z", self.lg) assert gdf._obj == "z" assert gdf.data_file == the_file assert not gdf._is_dir later = float(gdf.metadata['X-Timestamp']) + 1 gdf.unlinkold(normalize_timestamp(later)) assert os.path.isdir(gdf.datadir) assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj)) finally: shutil.rmtree(td)
def test_object_run_once_no_sda(self): self.auditor = auditor.AuditorWorker(self.conf) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() os.write(fd, 'extra_data') self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_hash_suffix_multi_file_one(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) for tdiff in [1, 50, 100, 500]: for suff in ['.meta', '.data', '.ts']: f = open(os.path.join(df.datadir, normalize_timestamp(int(time.time()) - tdiff) + suff), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, '0', data_dir) hsh_path = os.listdir(whole_path_from)[0] whole_hsh_path = os.path.join(whole_path_from, hsh_path) object_replicator.hash_suffix(whole_path_from, 99) # only the tombstone should be left self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
def put(self, fd, tmppath, metadata, extension='.data'): """ Finalize writing the file on disk, and renames it from the temp file to the real location. This should be called after the data has been written to the temp file. :params fd: file descriptor of the temp file :param tmppath: path to the temporary file being used :param metadata: dictionary of metadata to be written :param extention: extension to be used when making the file """ metadata['name'] = self.name timestamp = normalize_timestamp(metadata['X-Timestamp']) write_metadata(fd, metadata) if 'Content-Length' in metadata: self.drop_cache(fd, 0, int(metadata['Content-Length'])) tpool.execute(os.fsync, fd) invalidate_hash(os.path.dirname(self.datadir)) renamer(tmppath, os.path.join(self.datadir, timestamp + extension)) self.metadata = metadata
def DELETE(self, req): """HTTP DELETE request handler.""" if not self.app.allow_account_management: return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(self.allowed_methods)}) account_partition, accounts = \ self.app.account_ring.get_nodes(self.account_name) headers = { 'X-Timestamp': normalize_timestamp(time.time()), 'X-Trans-Id': self.trans_id, 'Connection': 'close' } if self.app.memcache: self.app.memcache.delete( get_account_memcache_key(self.account_name)) resp = self.make_requests(req, self.app.account_ring, account_partition, 'DELETE', req.path_info, [headers] * len(accounts)) return resp
def test_repl_to_node(self): replicator = TestReplicator({}) fake_node = {'ip': '127.0.0.1', 'device': 'sda1', 'port': 1000} fake_info = { 'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b', 'created_at': 100, 'put_timestamp': 0, 'delete_timestamp': 0, 'metadata': { 'Test': ('Value', normalize_timestamp(1)) } } replicator._http_connect = lambda *args: ReplHttp( '{"id": 3, "point": -1}') self.assertEquals( replicator._repl_to_node(fake_node, FakeBroker(), '0', fake_info), True)
def autocreate_account(self, env, account): """ Autocreate an account :param env: the environment of the request leading to this autocreate :param account: the unquoted account name """ partition, nodes = self.app.account_ring.get_nodes(account) path = '/%s' % account headers = {'X-Timestamp': normalize_timestamp(time.time()), 'X-Trans-Id': self.trans_id, 'Connection': 'close'} resp = self.make_requests(Request.blank('/v1' + path), self.app.account_ring, partition, 'PUT', path, [headers] * len(nodes)) if is_success(resp.status_int): self.app.logger.info('autocreate account %r' % path) clear_info_cache(self.app, env, account) else: self.app.logger.warning('Could not autocreate account %r' % path)
def test_GET_accept_xml(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Put-Timestamp': '1', 'X-Delete-Timestamp': '0', 'X-Object-Count': '0', 'X-Bytes-Used': '0', 'X-Timestamp': normalize_timestamp(0)}) self.controller.PUT(req) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/xml' resp = self.controller.GET(req) self.assertEquals(resp.status_int, 200) dom = xml.dom.minidom.parseString(resp.body) self.assertEquals(dom.firstChild.nodeName, 'account') listing = \ [n for n in dom.firstChild.childNodes if n.nodeName != '#text'] self.assertEquals(len(listing), 1)
def test_object_audit_will_not_swallow_errors_in_tests(self): timestamp = str(normalize_timestamp(time.time())) path = os.path.join(self.disk_file._datadir, timestamp + '.data') mkdirs(self.disk_file._datadir) with open(path, 'w') as f: write_metadata(f, {'name': '/a/c/o'}) auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) def blowup(*args): raise NameError('tpyo') with mock.patch.object(DiskFileManager, 'get_diskfile_from_audit_location', blowup): self.assertRaises( NameError, auditor_worker.object_audit, AuditLocation(os.path.dirname(path), 'sda', '0', policy=POLICIES.legacy))
def _create_test_file(self, data, keep_data_fp=True): df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time()) + '.data'), 'wb') f.write(data) setxattr(f.fileno(), diskfile.METADATA_KEY, pickle.dumps({}, diskfile.PICKLE_PROTOCOL)) f.close() df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o', FakeLogger(), keep_data_fp=keep_data_fp) return df
def test_object_run_once_no_sda(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = auditor_worker.quarantines data = '0' * 1024 etag = md5() with self.disk_file.create() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) os.write(writer._fd, 'extra_data') auditor_worker.audit_all_objects() self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_failsafe_object_audit_will_swallow_errors_in_tests(self): timestamp = str(normalize_timestamp(time.time())) path = os.path.join(self.disk_file._datadir, timestamp + '.data') mkdirs(self.disk_file._datadir) with open(path, 'w') as f: write_metadata(f, {'name': '/a/c/o'}) auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) def blowup(*args): raise NameError('tpyo') with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls', blowup): auditor_worker.failsafe_object_audit( AuditLocation(os.path.dirname(path), 'sda', '0', policy=POLICIES.legacy)) self.assertEqual(auditor_worker.errors, 1)
def _backend_requests(self, req, n_outgoing, account_partition, accounts): headers = [{ 'Connection': 'close', 'X-Timestamp': normalize_timestamp(time.time()), 'x-trans-id': self.trans_id } for _junk in range(n_outgoing)] for header in headers: self.transfer_headers(req.headers, header) for i, account in enumerate(accounts): i = i % len(headers) headers[i]['X-Account-Partition'] = account_partition headers[i]['X-Account-Host'] = csv_append( headers[i].get('X-Account-Host'), '%(ip)s:%(port)s' % account) headers[i]['X-Account-Device'] = csv_append( headers[i].get('X-Account-Device'), account['device']) return headers
def test_object_run_once_multi_devices(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) # pretend that we logged (and reset counters) just now auditor_worker.last_logged = time.time() timestamp = str(normalize_timestamp(time.time())) pre_quarantines = auditor_worker.quarantines data = '0' * 10 etag = md5() with self.disk_file.create() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) auditor_worker.audit_all_objects() self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob', policy=POLICIES.legacy) data = '1' * 10 etag = md5() with self.disk_file.create() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) os.write(writer._fd, 'extra_data') auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1)
def _make_open_diskfile(self, device='dev', partition='9', account='a', container='c', obj='o', body='test', extra_metadata=None, policy_idx=0): object_parts = account, container, obj req_timestamp = utils.normalize_timestamp(time.time()) df = self.sender.daemon._diskfile_mgr.get_diskfile( device, partition, *object_parts, policy_idx=policy_idx) content_length = len(body) etag = hashlib.md5(body).hexdigest() with df.create() as writer: writer.write(body) metadata = { 'X-Timestamp': req_timestamp, 'Content-Length': content_length, 'ETag': etag, } if extra_metadata: metadata.update(extra_metadata) writer.put(metadata) df.open() return df
def POST(self, req): """Handle HTTP POST request.""" drive, part, account = split_and_validate_path(req, 3) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing or bad timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account) if broker.is_deleted(): return self._deleted_response(broker, req, HTTPNotFound) timestamp = normalize_timestamp(req.headers['x-timestamp']) metadata = {} metadata.update((key, (value, timestamp)) for key, value in req.headers.iteritems() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata) return HTTPNoContent(request=req)
def get_object_metadata(obj_path): """ Return metadata of object. """ try: stats = os.stat(obj_path) except OSError as e: if e.errno != errno.ENOENT: raise metadata = {} else: is_dir = (stats.st_mode & 0040000) != 0 metadata = { X_TYPE: OBJECT, X_TIMESTAMP: normalize_timestamp(stats.st_ctime), X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE, X_OBJECT_TYPE: DIR if is_dir else FILE, X_CONTENT_LENGTH: 0 if is_dir else stats.st_size, X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path), } return metadata
def test_get_items_since(self): broker = DatabaseBroker(':memory:') broker.db_type = 'test' broker.db_contains_type = 'test' def _initialize(conn, timestamp): conn.execute('CREATE TABLE test (one TEXT)') conn.execute('INSERT INTO test (one) VALUES ("1")') conn.execute('INSERT INTO test (one) VALUES ("2")') conn.execute('INSERT INTO test (one) VALUES ("3")') conn.commit() broker._initialize = _initialize broker.initialize(normalize_timestamp('1')) self.assertEquals(broker.get_items_since(-1, 10), [{'one': '1'}, {'one': '2'}, {'one': '3'}]) self.assertEquals(broker.get_items_since(-1, 2), [{'one': '1'}, {'one': '2'}]) self.assertEquals(broker.get_items_since(1, 2), [{'one': '2'}, {'one': '3'}]) self.assertEquals(broker.get_items_since(3, 2), []) self.assertEquals(broker.get_items_since(999, 2), [])
def _backend_requests(self, req, n_outgoing, account_partition, accounts, policy_index=None): additional = {'X-Timestamp': normalize_timestamp(time.time())} if policy_index is not None: additional[POLICY_INDEX] = str(policy_index) headers = [self.generate_request_headers(req, transfer=True, additional=additional) for _junk in range(n_outgoing)] for i, account in enumerate(accounts): i = i % len(headers) headers[i]['X-Account-Partition'] = account_partition headers[i]['X-Account-Host'] = csv_append( headers[i].get('X-Account-Host'), '%(ip)s:%(port)s' % account) headers[i]['X-Account-Device'] = csv_append( headers[i].get('X-Account-Device'), account['device']) return headers
def test_create_object_metadata_file(self): tf = tempfile.NamedTemporaryFile() tf.file.write('4567'); tf.file.flush() r_md = utils.create_object_metadata(tf.name) xkey = _xkey(tf.name, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['get'] == 1 assert _xattr_op_cnt['set'] == 1 md = pickle.loads(_xattrs[xkey]) assert r_md == md for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.FILE assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name) assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name)) assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_put_auto_create(self): headers = {'x-timestamp': normalize_timestamp(1), 'x-size': '0', 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'} resp = self.controller.PUT(Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 201) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404)
def test_error_in_process(self, mock_process, mock_dump_recon): cu = self._get_container_updater() containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='c', pending_timeout=1) cb.initialize(normalize_timestamp(1), 0) cu.run_once() log_lines = self.logger.get_lines_for_level('error') self.assertTrue(log_lines) self.assertIn('Error processing container ', log_lines[0]) self.assertIn('devices/sda1/containers/subdir/hash.db', log_lines[0]) self.assertIn('Boom!', log_lines[0]) self.assertFalse(log_lines[1:]) self.assertEqual(1, len(mock_dump_recon.mock_calls))
def test_object_run_once_pass(self): self.auditor = auditor.AuditorWorker(self.conf, self.logger) self.auditor.log_time = 0 timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 1024 etag = md5() with self.disk_file.writer() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer.fd).st_size), } writer.put(metadata) self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines) self.assertEquals(self.auditor.stats_buckets[1024], 1) self.assertEquals(self.auditor.stats_buckets[10240], 0)