def test_unicode(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'chase_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) containers_dir = os.path.join(self.sda1, container_server.DATADIR) os.mkdir(containers_dir) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='\xce\xa9') cb.initialize(normalize_timestamp(1)) cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') def accept(sock, addr): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n') out.flush() inc.read() except BaseException, err: import traceback traceback.print_exc() return err return None
def test_PUT_GET_metadata(self): # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test': 'Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') # Set another metadata header, ensuring old one doesn't disappear req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test2': 'Value2'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(3), 'X-Container-Meta-Test': 'New Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(2), 'X-Container-Meta-Test': 'Old Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(4), 'X-Container-Meta-Test': ''}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assert_('x-container-meta-test' not in resp.headers)
def test_run_once(self): cu = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'chase_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) cu.run_once() async_dir = os.path.join(self.sda1, object_server.ASYNCDIR) os.mkdir(async_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) odd_dir = os.path.join(async_dir, 'not really supposed to be here') os.mkdir(odd_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) self.assert_(not os.path.exists(odd_dir)) ohash = hash_path('a', 'c', 'o') odir = os.path.join(async_dir, ohash[-3:]) mkdirs(odir) op_path = os.path.join(odir, '%s-%s' % (ohash, normalize_timestamp(time()))) pickle.dump({'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o', 'headers': {'X-Container-Timestamp': normalize_timestamp(0)}}, open(op_path, 'wb')) cu.run_once() self.assert_(os.path.exists(op_path)) bindsock = listen(('127.0.0.1', 0)) def accepter(sock, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-container-timestamp' in headers) except BaseException, err: return err return None
def direct_delete_container(node, part, account, container, conn_timeout=5, response_timeout=15, headers={}): path = '/%s/%s' % (account, container) headers['X-Timestamp'] = normalize_timestamp(time()) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'DELETE', path, headers) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if resp.status < 200 or resp.status >= 300: raise ClientException( 'Container server %s:%s direct DELETE %s gave status %s' % (node['ip'], node['port'], repr('/%s/%s%s' % (node['device'], part, path)), resp.status), http_host=node['ip'], http_port=node['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason)
def test_run_once_recover_from_timeout(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect was_get_hashes = object_replicator.get_hashes was_execute = tpool.execute self.get_hash_count = 0 try: def fake_get_hashes(*args, **kwargs): self.get_hash_count += 1 if self.get_hash_count == 3: # raise timeout on last call to get hashes raise Timeout() return 2, {'abc': 'def'} def fake_exc(tester, *args, **kwargs): if 'Error syncing partition' in args[0]: tester.i_failed = True self.i_failed = False object_replicator.http_connect = mock_http_connect(200) object_replicator.get_hashes = fake_get_hashes replicator.logger.exception = \ lambda *args, **kwargs: fake_exc(self, *args, **kwargs) # Write some files into '1' and run replicate- they should be moved # to the other partitoins and then node should get deleted. cur_part = '1' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) self.assertTrue(os.access(os.path.join(self.objects, '1', data_dir, ohash), os.F_OK)) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) self.assertFalse(self.i_failed) finally: object_replicator.http_connect = was_connector object_replicator.get_hashes = was_get_hashes tpool.execute = was_execute
def test_run_once(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect object_replicator.http_connect = mock_http_connect(200) cur_part = '0' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) object_replicator.http_connect = was_connector
def test_object_run_fast_track_non_zero(self): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.auditor.run_once(zero_byte_fps=50) self.assertFalse(os.path.isdir(quarantine_path)) self.auditor.run_once() self.assertTrue(os.path.isdir(quarantine_path))
def direct_delete_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers={}): """ Delete object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :returns: response from server """ path = '/%s/%s/%s' % (account, container, obj) headers['X-Timestamp'] = normalize_timestamp(time()) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'DELETE', path, headers) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if resp.status < 200 or resp.status >= 300: raise ClientException( 'Object server %s:%s direct DELETE %s gave status %s' % (node['ip'], node['port'], repr('/%s/%s%s' % (node['device'], part, path)), resp.status), http_host=node['ip'], http_port=node['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason)
def setup_bad_zero_byte(self, with_ts=False): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 ts_file_path = '' if with_ts: name_hash = hash_path('a', 'c', 'o') dir_path = os.path.join(self.devices, 'sda', storage_directory(DATADIR, '0', name_hash)) ts_file_path = os.path.join(dir_path, '99999.ts') if not os.path.exists(dir_path): mkdirs(dir_path) fp = open(ts_file_path, 'w') fp.close() etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': 10, } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) if self.disk_file.data_file: return self.disk_file.data_file return ts_file_path
def test_run_once(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect object_replicator.http_connect = mock_http_connect(200) cur_part = '0' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) object_replicator.http_connect = was_connector
def direct_post_object(node, part, account, container, name, headers, conn_timeout=5, response_timeout=15): """ Direct update to object metadata on object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param headers: headers to store as metadata :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :raises ClientException: HTTP POST request failed """ path = '/%s/%s/%s' % (account, container, name) headers['X-Timestamp'] = normalize_timestamp(time()) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'POST', path, headers=headers) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if resp.status < 200 or resp.status >= 300: raise ClientException( 'Object server %s:%s direct POST %s gave status %s' % (node['ip'], node['port'], repr('/%s/%s%s' % (node['device'], part, path)), resp.status), http_host=node['ip'], http_port=node['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason)
def test_object_audit_extra_data(self): self.auditor = auditor.AuditorWorker(self.conf) data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() timestamp = str(normalize_timestamp(time.time())) metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) pre_quarantines = self.auditor.quarantines self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines) os.write(fd, 'extra_data') self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self): self.auditor = auditor.AuditorWorker(self.conf) data = '0' * 1024 etag = md5() timestamp = str(normalize_timestamp(time.time())) with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) pre_quarantines = self.auditor.quarantines # remake so it will have metadata self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', self.logger) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_hash_suffix_multi_file_two(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) for tdiff in [1, 50, 100, 500]: suffs = ['.meta', '.data'] if tdiff > 50: suffs.append('.ts') for suff in suffs: f = open( os.path.join( df.datadir, normalize_timestamp(int(time.time()) - tdiff) + suff), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, '0', data_dir) hsh_path = os.listdir(whole_path_from)[0] whole_hsh_path = os.path.join(whole_path_from, hsh_path) object_replicator.hash_suffix(whole_path_from, 99) # only the meta and data should be left self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
def test_POST_HEAD_metadata(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1)}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test': 'Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(3), 'X-Container-Meta-Test': 'New Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(2), 'X-Container-Meta-Test': 'Old Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(4), 'X-Container-Meta-Test': ''}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assert_('x-container-meta-test' not in resp.headers)
def test_repl_to_node(self): replicator = TestReplicator({}) fake_node = {'ip': '127.0.0.1', 'device': 'sda1', 'port': 1000} fake_info = {'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b', 'created_at': 100, 'put_timestamp': 0, 'delete_timestamp': 0, 'metadata': {'Test': ('Value', normalize_timestamp(1))}} replicator._http_connect = lambda *args: ReplHttp('{"id": 3, "point": -1}') self.assertEquals(replicator._repl_to_node( fake_node, FakeBroker(), '0', fake_info), True)
def direct_put_object(node, part, account, container, name, contents, content_length=None, etag=None, content_type=None, headers=None, conn_timeout=5, response_timeout=15, resp_chunk_size=None): """ Put object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param contents: a string to read object data from :param content_length: value to send as content-length header :param etag: etag of contents :param content_type: value to send as content-type header :param headers: additional headers to include in the request :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param chunk_size: if defined, chunk size of data to send. :returns: etag from the server response """ # TODO: Add chunked puts path = '/%s/%s/%s' % (account, container, name) if headers is None: headers = {} if etag: headers['ETag'] = etag.strip('"') if content_length is not None: headers['Content-Length'] = str(content_length) if content_type is not None: headers['Content-Type'] = content_type else: headers['Content-Type'] = 'application/octet-stream' if not contents: headers['Content-Length'] = '0' headers['X-Timestamp'] = normalize_timestamp(time()) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'PUT', path, headers=headers) conn.send(contents) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if resp.status < 200 or resp.status >= 300: raise ClientException( 'Object server %s:%s direct PUT %s gave status %s' % (node['ip'], node['port'], repr('/%s/%s%s' % (node['device'], part, path)), resp.status), http_host=node['ip'], http_port=node['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason) return resp.getheader('etag').strip('"')
def test_object_audit_no_meta(self): timestamp = str(normalize_timestamp(time.time())) path = os.path.join(self.disk_file.datadir, timestamp + '.data') mkdirs(self.disk_file.datadir) fp = open(path, 'w') fp.write('0' * 1024) fp.close() invalidate_hash(os.path.dirname(self.disk_file.datadir)) self.auditor = auditor.AuditorWorker(self.conf) pre_quarantines = self.auditor.quarantines self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_params_utf8(self): self.controller.PUT(Request.blank('/sda1/p/a/c', headers={'X-Timestamp': normalize_timestamp(1)}, environ={'REQUEST_METHOD': 'PUT'})) for param in ('delimiter', 'format', 'limit', 'marker', 'path', 'prefix'): req = Request.blank('/sda1/p/a/c?%s=\xce' % param, environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 400) req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param, environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assert_(resp.status_int in (204, 412), resp.status_int)
def unlinkold(self, timestamp): """ Remove any older versions of the object file. Any file that has an older timestamp than timestamp will be deleted. :param timestamp: timestamp to compare with each file """ timestamp = normalize_timestamp(timestamp) for fname in os.listdir(self.datadir): if fname < timestamp: try: os.unlink(os.path.join(self.datadir, fname)) except OSError, err: # pragma: no cover if err.errno != errno.ENOENT: raise
def test_run_once_recover_from_failure(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect try: object_replicator.http_connect = mock_http_connect(200) # Write some files into '1' and run replicate- they should be moved # to the other partitoins and then node should get deleted. cur_part = '1' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) self.assertTrue( os.access(os.path.join(self.objects, '1', data_dir, ohash), os.F_OK)) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) for i, result in [('0', True), ('1', False), ('2', True), ('3', True)]: self.assertEquals( os.access( os.path.join(self.objects, i, object_replicator.HASH_FILE), os.F_OK), result) finally: object_replicator.http_connect = was_connector
def test_hash_suffix_one_file(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time.time() - 100) + '.ts'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, '0', data_dir) object_replicator.hash_suffix(whole_path_from, 101) self.assertEquals(len(os.listdir(self.parts['0'])), 1) object_replicator.hash_suffix(whole_path_from, 99) self.assertEquals(len(os.listdir(self.parts['0'])), 0)
def test_GET_accept_not_valid(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Put-Timestamp': '1', 'X-Delete-Timestamp': '0', 'X-Object-Count': '0', 'X-Bytes-Used': '0', 'X-Timestamp': normalize_timestamp(0)}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/xml*' resp = self.controller.GET(req) self.assertEquals(resp.status_int, 400) self.assertEquals(resp.body, 'bad accept header: application/xml*')
def test_hash_suffix_one_file(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time.time() - 100) + '.ts'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, '0', data_dir) object_replicator.hash_suffix(whole_path_from, 101) self.assertEquals(len(os.listdir(self.parts['0'])), 1) object_replicator.hash_suffix(whole_path_from, 99) self.assertEquals(len(os.listdir(self.parts['0'])), 0)
def test_get_hashes(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) with open(os.path.join(df.datadir, normalize_timestamp( time.time()) + '.ts'), 'wb') as f: f.write('1234567890') part = os.path.join(self.objects, '0') hashed, hashes = object_replicator.get_hashes(part) self.assertEquals(hashed, 1) self.assert_('a83' in hashes) hashed, hashes = object_replicator.get_hashes(part, do_listdir=True) self.assertEquals(hashed, 0) self.assert_('a83' in hashes) hashed, hashes = object_replicator.get_hashes(part, recalculate=['a83']) self.assertEquals(hashed, 1) self.assert_('a83' in hashes)
def async_update(self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice): """ Sends or saves an async update. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param host: host that the container is on :param partition: partition that the container is on :param contdevice: device name that the container is on :param headers_out: dictionary of headers to send in the container request :param objdevice: device name that the object is in """ full_path = '/%s/%s/%s' % (account, container, obj) if all([host, partition, contdevice]): try: with ConnectionTimeout(self.conn_timeout): ip, port = host.rsplit(':', 1) conn = http_connect(ip, port, contdevice, partition, op, full_path, headers_out) with Timeout(self.node_timeout): response = conn.getresponse() response.read() if 200 <= response.status < 300: return else: self.logger.error(_('ERROR Container update failed ' '(saving for async update later): %(status)d ' 'response from %(ip)s:%(port)s/%(dev)s'), {'status': response.status, 'ip': ip, 'port': port, 'dev': contdevice}) except (Exception, Timeout): self.logger.exception(_('ERROR container update failed with ' '%(ip)s:%(port)s/%(dev)s (saving for async update later)'), {'ip': ip, 'port': port, 'dev': contdevice}) async_dir = os.path.join(self.devices, objdevice, ASYNCDIR) ohash = hash_path(account, container, obj) write_pickle( {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out}, os.path.join(async_dir, ohash[-3:], ohash + '-' + normalize_timestamp(headers_out['x-timestamp'])), os.path.join(self.devices, objdevice, 'tmp'))
def test_delete_auto_create(self): headers = {'x-timestamp': normalize_timestamp(1)} resp = self.controller.DELETE(Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.DELETE(Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 204) resp = self.controller.DELETE(Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.DELETE(Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404)
def test_normalize_timestamp(self): """ Test chase.common.utils.normalize_timestamp """ self.assertEquals(utils.normalize_timestamp('1253327593.48174'), "1253327593.48174") self.assertEquals(utils.normalize_timestamp(1253327593.48174), "1253327593.48174") self.assertEquals(utils.normalize_timestamp('1253327593.48'), "1253327593.48000") self.assertEquals(utils.normalize_timestamp(1253327593.48), "1253327593.48000") self.assertEquals(utils.normalize_timestamp('253327593.48'), "0253327593.48000") self.assertEquals(utils.normalize_timestamp(253327593.48), "0253327593.48000") self.assertEquals(utils.normalize_timestamp('1253327593'), "1253327593.00000") self.assertEquals(utils.normalize_timestamp(1253327593), "1253327593.00000") self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_run_once_recover_from_failure(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect try: object_replicator.http_connect = mock_http_connect(200) # Write some files into '1' and run replicate- they should be moved # to the other partitoins and then node should get deleted. cur_part = '1' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open(os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) self.assertTrue(os.access(os.path.join(self.objects, '1', data_dir, ohash), os.F_OK)) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) for i, result in [('0', True), ('1', False), ('2', True), ('3', True)]: self.assertEquals(os.access( os.path.join(self.objects, i, object_replicator.HASH_FILE), os.F_OK), result) finally: object_replicator.http_connect = was_connector
def test_get_hashes(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) with open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.ts'), 'wb') as f: f.write('1234567890') part = os.path.join(self.objects, '0') hashed, hashes = object_replicator.get_hashes(part) self.assertEquals(hashed, 1) self.assert_('a83' in hashes) hashed, hashes = object_replicator.get_hashes(part, do_listdir=True) self.assertEquals(hashed, 0) self.assert_('a83' in hashes) hashed, hashes = object_replicator.get_hashes(part, recalculate=['a83']) self.assertEquals(hashed, 1) self.assert_('a83' in hashes)
def test_hash_suffix_multi_file_one(self): df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) for tdiff in [1, 50, 100, 500]: for suff in ['.meta', '.data', '.ts']: f = open(os.path.join(df.datadir, normalize_timestamp(int(time.time()) - tdiff) + suff), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, '0', data_dir) hsh_path = os.listdir(whole_path_from)[0] whole_hsh_path = os.path.join(whole_path_from, hsh_path) object_replicator.hash_suffix(whole_path_from, 99) # only the tombstone should be left self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
def put(self, fd, tmppath, metadata, extension='.data'): """ Finalize writing the file on disk, and renames it from the temp file to the real location. This should be called after the data has been written to the temp file. :params fd: file descriptor of the temp file :param tmppath: path to the temporary file being used :param metadata: dictionary of metadata to be written :param extention: extension to be used when making the file """ metadata['name'] = self.name timestamp = normalize_timestamp(metadata['X-Timestamp']) write_metadata(fd, metadata) if 'Content-Length' in metadata: self.drop_cache(fd, 0, int(metadata['Content-Length'])) tpool.execute(os.fsync, fd) invalidate_hash(os.path.dirname(self.datadir)) renamer(tmppath, os.path.join(self.datadir, timestamp + extension)) self.metadata = metadata
def test_repl_to_node(self): replicator = TestReplicator({}) fake_node = {'ip': '127.0.0.1', 'device': 'sda1', 'port': 1000} fake_info = { 'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b', 'created_at': 100, 'put_timestamp': 0, 'delete_timestamp': 0, 'metadata': { 'Test': ('Value', normalize_timestamp(1)) } } replicator._http_connect = lambda *args: ReplHttp( '{"id": 3, "point": -1}') self.assertEquals( replicator._repl_to_node(fake_node, FakeBroker(), '0', fake_info), True)
def test_object_run_once_no_sda(self): self.auditor = auditor.AuditorWorker(self.conf) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() os.write(fd, 'extra_data') self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_put_auto_create(self): headers = {'x-timestamp': normalize_timestamp(1), 'x-size': '0', 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'} resp = self.controller.PUT(Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 201) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404)
def test_object_sweep(self): prefix_dir = os.path.join(self.sda1, ASYNCDIR, 'abc') mkpath(prefix_dir) objects = { 'a': [1089.3, 18.37, 12.83, 1.3], 'b': [49.4, 49.3, 49.2, 49.1], 'c': [109984.123], } expected = set() for o, timestamps in objects.iteritems(): ohash = hash_path('account', 'container', o) for t in timestamps: o_path = os.path.join(prefix_dir, ohash + '-' + normalize_timestamp(t)) if t == timestamps[0]: expected.add(o_path) write_pickle({}, o_path) seen = set() class MockObjectUpdater(object_updater.ObjectUpdater): def process_object_update(self, update_path, device): seen.add(update_path) os.unlink(update_path) cu = MockObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'chase_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '5', }) cu.object_sweep(self.sda1) self.assert_(not os.path.exists(prefix_dir)) self.assertEqual(expected, seen)
def async_update(self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice): """ Sends or saves an async update. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param host: host that the container is on :param partition: partition that the container is on :param contdevice: device name that the container is on :param headers_out: dictionary of headers to send in the container request :param objdevice: device name that the object is in """ full_path = '/%s/%s/%s' % (account, container, obj) if all([host, partition, contdevice]): try: with ConnectionTimeout(self.conn_timeout): ip, port = host.rsplit(':', 1) conn = http_connect(ip, port, contdevice, partition, op, full_path, headers_out) with Timeout(self.node_timeout): response = conn.getresponse() response.read() if 200 <= response.status < 300: return else: self.logger.error( _('ERROR Container update failed ' '(saving for async update later): %(status)d ' 'response from %(ip)s:%(port)s/%(dev)s'), { 'status': response.status, 'ip': ip, 'port': port, 'dev': contdevice }) except (Exception, Timeout): self.logger.exception( _('ERROR container update failed with ' '%(ip)s:%(port)s/%(dev)s (saving for async update later)' ), { 'ip': ip, 'port': port, 'dev': contdevice }) async_dir = os.path.join(self.devices, objdevice, ASYNCDIR) ohash = hash_path(account, container, obj) write_pickle( { 'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out }, os.path.join( async_dir, ohash[-3:], ohash + '-' + normalize_timestamp(headers_out['x-timestamp'])), os.path.join(self.devices, objdevice, 'tmp'))
def test_run_once(self): cu = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'chase_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', }) cu.run_once() async_dir = os.path.join(self.sda1, object_server.ASYNCDIR) os.mkdir(async_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) odd_dir = os.path.join(async_dir, 'not really supposed to be here') os.mkdir(odd_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) self.assert_(not os.path.exists(odd_dir)) ohash = hash_path('a', 'c', 'o') odir = os.path.join(async_dir, ohash[-3:]) mkdirs(odir) op_path = os.path.join(odir, '%s-%s' % (ohash, normalize_timestamp(time()))) pickle.dump( { 'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o', 'headers': { 'X-Container-Timestamp': normalize_timestamp(0) } }, open(op_path, 'wb')) cu.run_once() self.assert_(os.path.exists(op_path)) bindsock = listen(('127.0.0.1', 0)) def accepter(sock, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-container-timestamp' in headers) except BaseException, err: return err return None
def test_run_once(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'chase_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', 'account_suppression_time': 0 }) cu.run_once() containers_dir = os.path.join(self.sda1, container_server.DATADIR) os.mkdir(containers_dir) cu.run_once() self.assert_(os.path.exists(containers_dir)) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='c') cb.initialize(normalize_timestamp(1)) cu.run_once() info = cb.get_info() self.assertEquals(info['object_count'], 0) self.assertEquals(info['bytes_used'], 0) self.assertEquals(info['reported_object_count'], 0) self.assertEquals(info['reported_bytes_used'], 0) cb.put_object('o', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') cu.run_once() info = cb.get_info() self.assertEquals(info['object_count'], 1) self.assertEquals(info['bytes_used'], 3) self.assertEquals(info['reported_object_count'], 0) self.assertEquals(info['reported_bytes_used'], 0) def accept(sock, addr, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-put-timestamp' in headers) self.assert_('x-delete-timestamp' in headers) self.assert_('x-object-count' in headers) self.assert_('x-bytes-used' in headers) except BaseException, err: import traceback traceback.print_exc() return err return None
drive, part, account, container = split_path( unquote(req.path), 3, 4) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if self.mount_check and not check_mount(self.root, drive): return Response(status='507 %s is not mounted' % drive) broker = self._get_account_broker(drive, part, account) if container: # put account container if 'x-trans-id' in req.headers: broker.pending_timeout = 3 if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time())) if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used']) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account timestamp = normalize_timestamp(req.headers['x-timestamp']) if not os.path.exists(broker.db_file):
def test_run_once_recover_from_timeout(self): replicator = object_replicator.ObjectReplicator( dict(chase_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1')) was_connector = object_replicator.http_connect was_get_hashes = object_replicator.get_hashes was_execute = tpool.execute self.get_hash_count = 0 try: def fake_get_hashes(*args, **kwargs): self.get_hash_count += 1 if self.get_hash_count == 3: # raise timeout on last call to get hashes raise Timeout() return 2, {'abc': 'def'} def fake_exc(tester, *args, **kwargs): if 'Error syncing partition' in args[0]: tester.i_failed = True self.i_failed = False object_replicator.http_connect = mock_http_connect(200) object_replicator.get_hashes = fake_get_hashes replicator.logger.exception = \ lambda *args, **kwargs: fake_exc(self, *args, **kwargs) # Write some files into '1' and run replicate- they should be moved # to the other partitoins and then node should get deleted. cur_part = '1' df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger()) mkdirs(df.datadir) f = open( os.path.join(df.datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('1234567890') f.close() ohash = hash_path('a', 'c', 'o') data_dir = ohash[-3:] whole_path_from = os.path.join(self.objects, cur_part, data_dir) process_arg_checker = [] nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) \ if node['ip'] not in _ips()] for node in nodes: rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part) process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mod])) self.assertTrue( os.access(os.path.join(self.objects, '1', data_dir, ohash), os.F_OK)) with _mock_process(process_arg_checker): replicator.run_once() self.assertFalse(process_errors) self.assertFalse(self.i_failed) finally: object_replicator.http_connect = was_connector object_replicator.get_hashes = was_get_hashes tpool.execute = was_execute
return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if 'x-container-sync-to' in req.headers: err = validate_sync_to(req.headers['x-container-sync-to'], self.allowed_sync_hosts) if err: return HTTPBadRequest(err) if self.mount_check and not check_mount(self.root, drive): return Response(status='507 %s is not mounted' % drive) timestamp = normalize_timestamp(req.headers['x-timestamp']) broker = self._get_container_broker(drive, part, account, container) if obj: # put container object if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): broker.initialize(timestamp) if not os.path.exists(broker.db_file): return HTTPNotFound() broker.put_object(obj, timestamp, int(req.headers['x-size']), req.headers['x-content-type'], req.headers['x-etag']) return HTTPCreated(request=req) else: # put container if not os.path.exists(broker.db_file): broker.initialize(timestamp) created = True
class ContainerController(object): """WSGI Controller for the container server.""" # Ensure these are all lowercase save_headers = [ 'x-container-read', 'x-container-write', 'x-container-sync-key', 'x-container-sync-to' ] def __init__(self, conf): self.logger = get_logger(conf, log_route='container-server') self.root = conf.get('devices', '/srv/node/') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') self.node_timeout = int(conf.get('node_timeout', 3)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.allowed_sync_hosts = [ h.strip() for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',') if h.strip() ] self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, ContainerBroker, self.mount_check, logger=self.logger) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' def _get_container_broker(self, drive, part, account, container): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') return ContainerBroker(db_path, account=account, container=container, logger=self.logger) def account_update(self, req, account, container, broker): """ Update the account server with latest container info. :param req: webob.Request object :param account: account name :param container: container name :param borker: container DB broker object :returns: if the account request returns a 404 error code, HTTPNotFound response object, otherwise None. """ account_host = req.headers.get('X-Account-Host') account_partition = req.headers.get('X-Account-Partition') account_device = req.headers.get('X-Account-Device') if all([account_host, account_partition, account_device]): account_ip, account_port = account_host.rsplit(':', 1) new_path = '/' + '/'.join([account, container]) info = broker.get_info() account_headers = { 'x-put-timestamp': info['put_timestamp'], 'x-delete-timestamp': info['delete_timestamp'], 'x-object-count': info['object_count'], 'x-bytes-used': info['bytes_used'], 'x-trans-id': req.headers.get('x-trans-id', '-') } if req.headers.get('x-account-override-deleted', 'no').lower() == \ 'yes': account_headers['x-account-override-deleted'] = 'yes' try: with ConnectionTimeout(self.conn_timeout): conn = http_connect(account_ip, account_port, account_device, account_partition, 'PUT', new_path, account_headers) with Timeout(self.node_timeout): account_response = conn.getresponse() account_response.read() if account_response.status == 404: return HTTPNotFound(request=req) elif account_response.status < 200 or \ account_response.status > 299: self.logger.error( _('ERROR Account update failed ' 'with %(ip)s:%(port)s/%(device)s (will retry ' 'later): Response %(status)s %(reason)s'), { 'ip': account_ip, 'port': account_port, 'device': account_device, 'status': account_response.status, 'reason': account_response.reason }) except (Exception, Timeout): self.logger.exception( _('ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later)'), { 'ip': account_ip, 'port': account_port, 'device': account_device }) return None def DELETE(self, req): """Handle HTTP DELETE request.""" try: drive, part, account, container, obj = split_path( unquote(req.path), 4, 5, True) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return Response(status='507 %s is not mounted' % drive) broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time())) if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object broker.delete_object(obj, req.headers.get('x-timestamp')) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = float(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req.headers['X-Timestamp']) if not broker.is_deleted(): return HTTPConflict(request=req) resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound()