def test_DELETE(self): """ Test swift.object_server.ObjectController.DELETE """ req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 400) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 400) # self.assertRaises(KeyError, self.object_controller.DELETE, req) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 404) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ 'X-Timestamp': timestamp, 'Content-Type': 'application/octet-stream', 'Content-Length': '4', }) req.body = 'test' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) timestamp = normalize_timestamp(float(timestamp) - 1) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 204) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.ts') self.assert_(os.path.isfile(objfile)) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = self.object_controller.DELETE(req) self.assertEquals(resp.status_int, 204) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.ts') self.assert_(os.path.isfile(objfile))
def test_PUT_overwrite(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '6', 'Content-Type': 'application/octet-stream'}) req.body = 'VERIFY' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, 'Content-Type': 'text/plain', 'Content-Encoding': 'gzip'}) req.body = 'VERIFY TWO' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.data') self.assert_(os.path.isfile(objfile)) self.assertEquals(open(objfile).read(), 'VERIFY TWO') self.assertEquals(pickle.loads(getxattr(objfile, object_server.METADATA_KEY)), {'X-Timestamp': timestamp, 'Content-Length': '10', 'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039', 'Content-Type': 'text/plain', 'name': '/a/c/o', 'Content-Encoding': 'gzip'})
def __init__(self, path, device, partition, account, container, obj, logger, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None, threadpool=None, obj_dir='objects', mount_check=False): if mount_check and not check_mount(path, device): raise DiskFileDeviceUnavailable() self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = join( path, device, storage_directory(obj_dir, partition, name_hash)) self.device_path = join(path, device) self.tmpdir = join(path, device, 'tmp') self.logger = logger self._metadata = None self.data_file = None self._data_file_size = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.suppress_file_closing = False self._verify_close = False self.threadpool = threadpool or ThreadPool(nthreads=0) # FIXME(clayg): this attribute is set after open and affects the # behavior of the class (i.e. public interface) self.keep_cache = False
def setup_bad_zero_byte(self, with_ts=False): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 ts_file_path = "" if with_ts: name_hash = hash_path("a", "c", "o") dir_path = os.path.join(self.devices, "sda", storage_directory(DATADIR, "0", name_hash)) ts_file_path = os.path.join(dir_path, "99999.ts") if not os.path.exists(dir_path): mkdirs(dir_path) fp = open(ts_file_path, "w") fp.close() etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): etag = etag.hexdigest() metadata = {"ETag": etag, "X-Timestamp": str(normalize_timestamp(time.time())), "Content-Length": 10} self.disk_file.put(fd, tmppath, metadata) etag = md5() etag = etag.hexdigest() metadata["ETag"] = etag write_metadata(fd, metadata) if self.disk_file.data_file: return self.disk_file.data_file return ts_file_path
def print_ring_locations(ring, datadir, account, container=None): """ print out ring locations of specified type :param ring: ring instance :param datadir: high level directory to store account/container/objects :param acount: account name :param container: container name """ if ring is None or datadir is None or account is None: raise ValueError("None type") storage_type = "account" if container: storage_type = "container" try: part, nodes = ring.get_nodes(account, container, None) except (ValueError, AttributeError): raise ValueError("Ring error") else: path_hash = hash_path(account, container, None) print "\nRing locations:" for node in nodes: print ( " %s:%s - /srv/node/%s/%s/%s.db" % (node["ip"], node["port"], node["device"], storage_directory(datadir, part, path_hash), path_hash) ) print "\nnote: /srv/node is used as default value of `devices`, the " "real value is set in the %s config file on each storage node." % storage_type
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, uid=DEFAULT_UID, gid=DEFAULT_GID): self.disk_chunk_size = disk_chunk_size #Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc obj = obj.strip('/') if '/' in obj: self.obj_path, self.obj = obj.rsplit('/', 1) else: self.obj_path = '' self.obj = obj if self.obj_path: self.name = '/'.join((container, self.obj_path)) else: self.name = container #Absolute path for obj directory. self.datadir = os.path.join(path, device, storage_directory(DATADIR, partition, self.name)) self.device_path = os.path.join(path, device) self.container_path = os.path.join(path, device, container) self.tmpdir = os.path.join(path, device, 'tmp') self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.is_dir = False self.is_valid = True self.uid = int(uid) self.gid = int(gid) if not os.path.exists(self.datadir + '/' + self.obj): return self.data_file = os.path.join(self.datadir, self.obj) self.metadata = read_metadata(self.datadir + '/' + self.obj) if not self.metadata: create_object_metadata(self.datadir + '/' + self.obj) self.metadata = read_metadata(self.datadir + '/' + self.obj) if not validate_object(self.metadata): self.logger.error('Metadata validation failed %s %s' % \ (self.data_file, self.metadata)) self.metadata = {} self.is_valid = False self.data_file = None return if os.path.isdir(self.datadir + '/' + self.obj): self.is_dir = True else: self.fp = do_open(self.data_file, 'rb') if not keep_data_fp: self.close(verify_file=False)
def get_reconciler_broker(self, timestamp): """ Get a local instance of the reconciler container broker that is appropriate to enqueue the given timestamp. :param timestamp: the timestamp of the row to be enqueued :returns: a local reconciler broker """ container = get_reconciler_container_name(timestamp) if self.reconciler_containers and \ container in self.reconciler_containers: return self.reconciler_containers[container][1] account = MISPLACED_OBJECTS_ACCOUNT part = self.ring.get_part(account, container) node = self.find_local_handoff_for_part(part) if not node: raise DeviceUnavailable( 'No mounted devices found suitable to Handoff reconciler ' 'container %s in partition %s' % (container, part)) hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db') broker = ContainerBroker(db_path, account=account, container=container) if not os.path.exists(broker.db_file): try: broker.initialize(timestamp, 0) except DatabaseAlreadyExists: pass if self.reconciler_containers is not None: self.reconciler_containers[container] = part, broker, node['id'] return broker
def setup_bad_zero_byte(self, with_ts=False): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 ts_file_path = '' if with_ts: name_hash = hash_path('a', 'c', 'o') dir_path = os.path.join(self.devices, 'sda', storage_directory(DATADIR, '0', name_hash)) ts_file_path = os.path.join(dir_path, '99999.ts') if not os.path.exists(dir_path): mkdirs(dir_path) fp = open(ts_file_path, 'w') fp.close() etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': 10, } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) if self.disk_file.data_file: return self.disk_file.data_file return ts_file_path
def _get_account_broker(self, drive, part, account, **kwargs): hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') kwargs.setdefault('account', account) kwargs.setdefault('logger', self.logger) return AccountBroker(db_path, **kwargs)
def dispatch(self, replicate_args, args): if not hasattr(args, 'pop'): return HTTPBadRequest(body='Invalid object type') op = args.pop(0) drive, partition, hsh = replicate_args try: dev_path = check_drive(self.root, drive, self.mount_check) except ValueError: return Response(status='507 %s is not mounted' % drive) db_file = os.path.join(dev_path, storage_directory(self.datadir, partition, hsh), hsh + '.db') if op == 'rsync_then_merge': return self.rsync_then_merge(drive, db_file, args) if op == 'complete_rsync': return self.complete_rsync(drive, db_file, args) else: # someone might be about to rsync a db to us, # make sure there's a tmp dir to receive it. mkdirs(os.path.join(self.root, drive, 'tmp')) if not self._db_file_exists(db_file): return HTTPNotFound() return getattr(self, op)(self.broker_class(db_file, logger=self.logger), args)
def _get_account_broker(self, drive, part, account, **kwargs): hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + ".db") kwargs.setdefault("account", account) kwargs.setdefault("logger", self.logger) return AccountBroker(db_path, **kwargs)
def test_PUT_user_metadata(self): timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, 'Content-Type': 'text/plain', 'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568', 'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY THREE' resp = self.object_controller.PUT(req) self.assertEquals(resp.status_int, 201) objfile = os.path.join(self.testdir, 'sda1', storage_directory(object_server.DATADIR, 'p', hash_path('a', 'c', 'o')), timestamp + '.data') self.assert_(os.path.isfile(objfile)) self.assertEquals(open(objfile).read(), 'VERIFY THREE') self.assertEquals(pickle.loads(getxattr(objfile, object_server.METADATA_KEY)), {'X-Timestamp': timestamp, 'Content-Length': '12', 'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568', 'Content-Type': 'text/plain', 'name': '/a/c/o', 'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two'})
def _get_container_broker(self, drive, part, account, container): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ if self.fs_object: return DiskDir(self.root, drive, part, account, container, self.logger, fs_object=self.fs_object) hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') return ContainerBroker(db_path, account=account, container=container, logger=self.logger)
def print_ring_locations(ring, datadir, account, container=None): """ print out ring locations of specified type :param ring: ring instance :param datadir: high level directory to store account/container/objects :param account: account name :param container: container name """ if ring is None or datadir is None or account is None: raise ValueError('None type') storage_type = 'account' if container: storage_type = 'container' try: part, nodes = ring.get_nodes(account, container, None) except (ValueError, AttributeError): raise ValueError('Ring error') else: path_hash = hash_path(account, container, None) print '\nRing locations:' for node in nodes: print (' %s:%s - /srv/node/%s/%s/%s.db' % (node['ip'], node['port'], node['device'], storage_directory(datadir, part, path_hash), path_hash)) print '\nnote: /srv/node is used as default value of `devices`, the ' \ 'real value is set in the %s config file on each storage node.' % \ storage_type
def print_ring_locations(ring, datadir, account, container=None): """ print out ring locations of specified type :param ring: ring instance :param datadir: high level directory to store account/container/objects :param acount: account name :param container: container name """ if ring is None or datadir is None or account is None: raise ValueError('None type') storage_type = 'account' if container: storage_type = 'container' try: part, nodes = ring.get_nodes(account, container, None) except (ValueError, AttributeError): raise ValueError('Ring error') else: path_hash = hash_path(account, container, None) print '\nRing locations:' for node in nodes: print(' %s:%s - /srv/node/%s/%s/%s.db' % (node['ip'], node['port'], node['device'], storage_directory(datadir, part, path_hash), path_hash)) print '\nnote: /srv/node is used as default value of `devices`, the ' \ 'real value is set in the %s config file on each storage node.' % \ storage_type
def setup_bad_zero_byte(self, with_ts=False): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 ts_file_path = '' if with_ts: name_hash = hash_path('a', 'c', 'o') dir_path = os.path.join( self.devices, 'sda', storage_directory(get_data_dir(0), '0', name_hash)) ts_file_path = os.path.join(dir_path, '99999.ts') if not os.path.exists(dir_path): mkdirs(dir_path) fp = open(ts_file_path, 'w') write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'}) fp.close() etag = md5() with self.disk_file.create() as writer: etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': 10, } writer.put(metadata) etag = md5() etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(writer._fd, metadata) return ts_file_path
def tmp_dir(self, pool, partition, name_hash): if self.fs_per_obj: return os.path.join(self.root, pool, storage_directory(self.srvdir, partition, name_hash), 'tmp') elif self.fs_per_part: return os.path.join(self.root, pool, self.srvdir, partition, 'tmp') else: return os.path.join(self.root, pool, self.srvdir, 'tmp')
def get_account_broker(drive, part, account): DATADIR = "accounts" root = os.getcwd() hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(root, drive, db_dir, hsh + ".db") print root, hsh, db_dir, db_path return AccountBroker(db_path, account=account, logger=None)
def get_account_broker(drive, part, account): DATADIR = 'accounts' root = os.getcwd() hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(root, drive, db_dir, hsh + '.db') print root, hsh, db_dir, db_path return AccountBroker(db_path, account=account, logger=None)
def _get_account_broker(self, drive, part, account): if self.fs_object: return DiskAccount(self.root, account, self.fs_object) hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') return AccountBroker(db_path, account=account, logger=self.logger)
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None, threadpool=None): self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join( path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) self.tmpdir = os.path.join(path, device, 'tmp') self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False self.threadpool = threadpool or ThreadPool(nthreads=0) if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for afile in files: if afile.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if afile.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, afile) if afile.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, afile) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp)) if 'name' in self.metadata: if self.metadata['name'] != self.name: self.logger.error(_('Client path %(client)s does not match ' 'path stored in object metadata %(meta)s'), {'client': self.name, 'meta': self.metadata['name']}) raise DiskFileCollision('Client path does not match path ' 'stored in object metadata')
def __init__( self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, iter_hook=None, ): self.disk_chunk_size = disk_chunk_size self.iter_hook = iter_hook self.name = "/" + "/".join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join(path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) self.tmpdir = os.path.join(path, device, "tmp") self.tmppath = None self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for file in files: if file.endswith(".ts"): self.data_file = self.meta_file = None self.metadata = {"deleted": True} return if file.endswith(".meta") and not self.meta_file: self.meta_file = os.path.join(self.datadir, file) if file.endswith(".data") and not self.data_file: self.data_file = os.path.join(self.datadir, file) break if not self.data_file: return self.fp = open(self.data_file, "rb") self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp))
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, iter_hook=None): self.disk_chunk_size = disk_chunk_size self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join( path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) self.tmpdir = os.path.join(path, device, 'tmp') self.tmppath = None self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for file in files: if file.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if file.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, file) if file.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, file) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp))
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, origin_disk_chunk_size=65536, iter_hook=None, encryption_context=None, crypto_driver=None): self.disk_chunk_size = disk_chunk_size self.origin_disk_chunk_size = origin_disk_chunk_size self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join( path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) self.tmpdir = os.path.join(path, device, 'tmp') self.tmppath = None self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False self.encryption_context = encryption_context self.crypto_driver = crypto_driver if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for file in files: if file.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if file.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, file) if file.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, file) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp)) if self.crypto_driver and not self.encryption_context: key_id = self.metadata.get('X-Object-Meta-Key-Id') self.encryption_context = \ self.crypto_driver.encryption_context(key_id)
def delete_handoff_objs(self, job, delete_objs): for object_hash in delete_objs: object_path = storage_directory(job['obj_path'], job['partition'], object_hash) tpool.execute(shutil.rmtree, object_path, ignore_errors=True) suffix_dir = dirname(object_path) try: os.rmdir(suffix_dir) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENOTEMPTY): self.logger.exception( "Unexpected error trying to cleanup suffix dir:%r", suffix_dir)
def _get_container_broker(self, drive, part, account, container): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + ".db") return ContainerBroker(db_path, account=account, container=container, logger=self.logger)
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, storage = None): self.disk_chunk_size = disk_chunk_size self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join(path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) if storage: storage.setup_objdir(device, partition, name_hash) self.tmpdir = storage.tmp_dir(device, partition, name_hash) else: self.tmpdir = os.path.join(path, device, DATADIR, partition, 'tmp') self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for file in files: if file.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if file.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, file) if file.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, file) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp))
def delete_handoff_objs(self, job, delete_objs): success_paths = [] error_paths = [] for object_hash in delete_objs: object_path = storage_directory(job["obj_path"], job["partition"], object_hash) tpool.execute(shutil.rmtree, object_path, ignore_errors=True) suffix_dir = dirname(object_path) try: os.rmdir(suffix_dir) success_paths.append(object_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENOTEMPTY): error_paths.append(object_path) self.logger.exception("Unexpected error trying to cleanup suffix dir:%r", suffix_dir) return success_paths, error_paths
def __call__(self, env, start_response): req = Request(env) lxc_host = env.get("HTTP_X_OBJECT_META_LXC_HOST") addresses = whataremyips() if lxc_host in addresses: #path_hash = hash_path(account, container, obj) ring = Ring(self.object_ring_path) raw_path = env.get("RAW_PATH_INFO").split("/") path_hash = hash_path(raw_path[3],raw_path[4],raw_path[5]) f_location = storage_directory("objects", raw_path[2], path_hash) path = "%s/%s/%s" % (self.root, raw_path[1], f_location) #Check if container exists and is running self.check_container(path, raw_path[5]) return self.app(env, start_response)
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None, threadpool=None, obj_dir='objects', mount_check=False): if mount_check and not check_mount(path, device): raise DiskFileDeviceUnavailable() self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = join(path, device, storage_directory(obj_dir, partition, name_hash)) self.device_path = join(path, device) self.tmpdir = join(path, device, 'tmp') self.logger = logger self.metadata = {} self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False self.threadpool = threadpool or ThreadPool(nthreads=0) data_file, meta_file, ts_file = self._get_ondisk_file() if not data_file: if ts_file: self._construct_from_ts_file(ts_file) else: fp = self._construct_from_data_file(data_file, meta_file) if keep_data_fp: self.fp = fp else: fp.close()
def get_data_dir(vertigo): """ Gets the data directory full path :param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance :returns: the data directory path """ devices = vertigo.conf.get('devices') device, partition, account, container, obj, policy = \ get_name_and_placement(vertigo.request, 5, 5, True) name_hash = hash_path(account, container, obj) device_path = os.path.join(devices, device) storage_dir = storage_directory(df_data_dir(policy), partition, name_hash) data_dir = os.path.join(device_path, storage_dir) return data_dir
def test_delete_partition_ssync(self): with mock.patch('swift.obj.replicator.http_connect', mock_http_connect(200)): df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o') mkdirs(df._datadir) f = open(os.path.join(df._datadir, normalize_timestamp(time.time()) + '.data'), 'wb') f.write('0') f.close() ohash = hash_path('a', 'c', 'o') whole_path_from = storage_directory(self.objects, 1, ohash) suffix_dir_path = os.path.dirname(whole_path_from) part_path = os.path.join(self.objects, '1') self.assertTrue(os.access(part_path, os.F_OK)) self.call_nums = 0 self.conf['sync_method'] = 'ssync' def _fake_ssync(node, job, suffixes, **kwargs): success = True ret_val = [whole_path_from] if self.call_nums == 2: # ssync should return (True, []) only when the second # candidate node has not get the replica yet. success = False ret_val = [] self.call_nums += 1 return success, set(ret_val) self.replicator.sync_method = _fake_ssync self.replicator.replicate() # The file should still exist self.assertTrue(os.access(whole_path_from, os.F_OK)) self.assertTrue(os.access(suffix_dir_path, os.F_OK)) self.assertTrue(os.access(part_path, os.F_OK)) self.replicator.replicate() # The file should be deleted at the second replicate call self.assertFalse(os.access(whole_path_from, os.F_OK)) self.assertFalse(os.access(suffix_dir_path, os.F_OK)) self.assertTrue(os.access(part_path, os.F_OK)) self.replicator.replicate() # The partition should be deleted at the third replicate call self.assertFalse(os.access(whole_path_from, os.F_OK)) self.assertFalse(os.access(suffix_dir_path, os.F_OK)) self.assertFalse(os.access(part_path, os.F_OK)) del self.call_nums
def __init__(self, mgr, device_path, threadpool, partition, account, container, obj): self._mgr = mgr self._device_path = device_path self._threadpool = threadpool or ThreadPool(nthreads=0) self._logger = mgr.logger self._disk_chunk_size = mgr.disk_chunk_size self._bytes_per_sync = mgr.bytes_per_sync self._name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self._datadir = join(device_path, storage_directory(DATADIR, partition, name_hash)) self._tmpdir = join(device_path, 'tmp') self._metadata = None self._data_file = None self._fp = None self._quarantined_dir = None
def _get_container_broker(self, drive, part, account, container, **kwargs): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') kwargs.setdefault('account', account) kwargs.setdefault('container', container) kwargs.setdefault('logger', self.logger) return ContainerMetaBroker(db_path, **kwargs)
def _get_container_broker(self, drive, part, account, container, **kwargs): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') kwargs.setdefault('account', account) kwargs.setdefault('container', container) kwargs.setdefault('logger', self.logger) return ContainerBroker(db_path, **kwargs)
def __init__(self, mgr, device_path, threadpool, partition, account, container, obj): self._mgr = mgr self._device_path = device_path self._threadpool = threadpool or ThreadPool(nthreads=0) self._logger = mgr.logger self._disk_chunk_size = mgr.disk_chunk_size self._bytes_per_sync = mgr.bytes_per_sync self._name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self._datadir = join( device_path, storage_directory(DATADIR, partition, name_hash)) self._tmpdir = join(device_path, 'tmp') self._metadata = None self._data_file = None self._fp = None self._quarantined_dir = None
def dispatch(self, replicate_args, args): if not hasattr(args, "pop"): return HTTPBadRequest(body="Invalid object type") op = args.pop(0) drive, partition, hsh = replicate_args if self.mount_check and not os.path.ismount(os.path.join(self.root, drive)): return Response(status="507 %s is not mounted" % drive) db_file = os.path.join(self.root, drive, storage_directory(self.datadir, partition, hsh), hsh + ".db") if op == "rsync_then_merge": return self.rsync_then_merge(drive, db_file, args) if op == "complete_rsync": return self.complete_rsync(drive, db_file, args) else: # someone might be about to rsync a db to us, # make sure there's a tmp dir to receive it. mkdirs(os.path.join(self.root, drive, "tmp")) if not os.path.exists(db_file): return HTTPNotFound() return getattr(self, op)(self.broker_class(db_file), args)
def __init__(self, path, device, partition, account, container, logger, uid=DEFAULT_UID, gid=DEFAULT_GID): if container: #self.name = '/'.join((account, container)) self.name = container else: self.name = None if self.name: self.datadir = os.path.join(path, device, storage_directory(DATADIR, partition, self.name)) else: self.datadir = os.path.join(path, device) #print 'Gaurav DiskDir datadir', account, container, path,\ #device, self.datadir self.device_path = os.path.join(path, device) self.logger = logger self.metadata = {} self.uid = int(uid) self.gid = int(gid) self.dir_exists = os.path.isdir (self.datadir) if self.dir_exists: self.metadata = read_metadata(self.datadir) else: return if container: if not self.metadata: #objects, object_count, bytes_used = get_container_details(self.datadir) #self.update_container(object_count, bytes_used) create_container_metadata(self.datadir) self.metadata = read_metadata(self.datadir) ret = validate_container(self.metadata) else: if not self.metadata: #containers, container_count = get_account_details(self.datadir) #self.update_acocunt(container_count) create_account_metadata(self.datadir) self.metadata = read_metadata(self.datadir) ret = validate_account(self.metadata) if not ret: self.dir_exists = False self.metadata = {}
def __init__(self, path, device, partition, account, container, obj, logger, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None, threadpool=None, obj_dir='objects', mount_check=False): if mount_check and not check_mount(path, device): raise DiskFileDeviceUnavailable() self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = join(path, device, storage_directory(obj_dir, partition, name_hash)) self.device_path = join(path, device) self.tmpdir = join(path, device, 'tmp') self.logger = logger self._metadata = None self.data_file = None self._data_file_size = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.suppress_file_closing = False self._verify_close = False self.threadpool = threadpool or ThreadPool(nthreads=0) # FIXME(clayg): this attribute is set after open and affects the # behavior of the class (i.e. public interface) self.keep_cache = False
def dispatch(self, replicate_args, args): if not hasattr(args, 'pop'): return HTTPBadRequest(body='Invalid object type') op = args.pop(0) drive, partition, hsh = replicate_args if not check_drive(self.root, drive, self.mount_check): return Response(status='507 %s is not mounted' % drive) db_file = os.path.join(self.root, drive, storage_directory(self.datadir, partition, hsh), hsh + '.db') if op == 'rsync_then_merge': return self.rsync_then_merge(drive, db_file, args) if op == 'complete_rsync': return self.complete_rsync(drive, db_file, args) else: # someone might be about to rsync a db to us, # make sure there's a tmp dir to receive it. mkdirs(os.path.join(self.root, drive, 'tmp')) if not os.path.exists(db_file): return HTTPNotFound() return getattr(self, op)(self.broker_class(db_file), args)
def _get_db_info(self, account, container): """ Returns the database path of the container :param account: UTF-8 encoded account name :param container: UTF-8 encoded container name :returns: a tuple of (db path, nodes count, index of replica) """ part, container_nodes = self.container_ring.get_nodes( account, container) nodes_count = len(container_nodes) db_hash = hash_path(account, container) db_dir = storage_directory(DATADIR, part, db_hash) for index, node in enumerate(container_nodes): if not is_local_device(self.myips, None, node['ip'], node['port']): continue db_path = os.path.join(self.root, node['device'], db_dir, db_hash + '.db') return db_path, nodes_count, index return None, None, None
def __init__(self, path, device, partition, account, container, obj, keep_data_fp=False, disk_chunk_size=65536): self.disk_chunk_size = disk_chunk_size self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join(path, device, storage_directory(DATADIR, partition, name_hash)) self.tmpdir = os.path.join(path, device, 'tmp') self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.keep_cache = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for file in files: if file.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if file.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, file) if file.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, file) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close() if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in ('content-type', 'content-encoding', 'deleted', 'content-length', 'etag'): del self.metadata[key] self.metadata.update(read_metadata(mfp))
def _get_db_info(self, account, container, number): server_type = 'container' obj_conf = self.configs['%s-server' % server_type] config_path = obj_conf[number] options = utils.readconf(config_path, 'app:container-server') root = options.get('devices') swift_dir = options.get('swift_dir', '/etc/swift') ring = Ring(swift_dir, ring_name=server_type) part, nodes = ring.get_nodes(account, container) for node in nodes: # assumes one to one mapping if node['port'] == int(options.get('bind_port')): device = node['device'] break else: return None path_hash = utils.hash_path(account, container) _dir = utils.storage_directory('%ss' % server_type, part, path_hash) db_dir = os.path.join(root, device, _dir) db_file = os.path.join(db_dir, '%s.db' % path_hash) db = ContainerBroker(db_file) return db.get_info()
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None): self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = os.path.join( path, device, storage_directory(DATADIR, partition, name_hash)) self.device_path = os.path.join(path, device) self.tmpdir = os.path.join(path, device, 'tmp') self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) for afile in files: if afile.endswith('.ts'): self.data_file = self.meta_file = None self.metadata = {'deleted': True} return if afile.endswith('.meta') and not self.meta_file: self.meta_file = os.path.join(self.datadir, afile) if afile.endswith('.data') and not self.data_file: self.data_file = os.path.join(self.datadir, afile) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if self.meta_file: with open(self.meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in DISALLOWED_HEADERS: del self.metadata[key] self.metadata.update(read_metadata(mfp)) if 'name' in self.metadata: if self.metadata['name'] != self.name: self.logger.error( _('Client path %(client)s does not match ' 'path stored in object metadata %(meta)s'), { 'client': self.name, 'meta': self.metadata['name'] }) raise DiskFileCollision('Client path does not match path ' 'stored in object metadata')
def print_ring_locations(ring, datadir, account, container=None, obj=None, tpart=None, all_nodes=False, policy_index=None): """ print out ring locations of specified type :param ring: ring instance :param datadir: name of directory where things are stored. Usually one of "accounts", "containers", "objects", or "objects-N". :param account: account name :param container: container name :param obj: object name :param tpart: target partition in ring :param all_nodes: include all handoff nodes. If false, only the N primary nodes and first N handoffs will be printed. :param policy_index: include policy_index in curl headers """ if not ring: raise ValueError("No ring specified") if not datadir: raise ValueError("No datadir specified") if tpart is None and not account: raise ValueError("No partition or account/container/object specified") if not account and (container or obj): raise ValueError("Container/object specified without account") if obj and not container: raise ValueError('Object specified without container') if obj: target = '%s/%s/%s' % (account, container, obj) elif container: target = '%s/%s' % (account, container) else: target = '%s' % (account) if tpart: part = int(tpart) else: part = ring.get_part(account, container, obj) primary_nodes = ring.get_part_nodes(part) handoff_nodes = ring.get_more_nodes(part) if not all_nodes: handoff_nodes = itertools.islice(handoff_nodes, len(primary_nodes)) handoff_nodes = list(handoff_nodes) if account and not tpart: path_hash = hash_path(account, container, obj) else: path_hash = None print 'Partition\t%s' % part print 'Hash \t%s\n' % path_hash for node in primary_nodes: print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], node['device']) for node in handoff_nodes: print 'Server:Port Device\t%s:%s %s\t [Handoff]' % ( node['ip'], node['port'], node['device']) print "\n" for node in primary_nodes: cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ % (node['ip'], node['port'], node['device'], part, urllib.quote(target)) if policy_index is not None: cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', policy_index) print cmd for node in handoff_nodes: cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ % (node['ip'], node['port'], node['device'], part, urllib.quote(target)) if policy_index is not None: cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', policy_index) cmd += ' # [Handoff]' print cmd print "\n\nUse your own device location of servers:" print "such as \"export DEVICE=/srv/node\"" if path_hash: for node in primary_nodes: print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' % (node['ip'], node['device'], storage_directory(datadir, part, path_hash))) for node in handoff_nodes: print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' % (node['ip'], node['device'], storage_directory(datadir, part, path_hash))) else: for node in primary_nodes: print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' % (node['ip'], node['device'], datadir, part)) for node in handoff_nodes: print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' ' # [Handoff]' % (node['ip'], node['device'], datadir, part)) print '\nnote: `/srv/node*` is used as default value of `devices`, the ' \ 'real value is set in the config file on each storage node.'
def PUT(self, req): version, acc, con, obj = split_path(req.path, 1, 4, True) stor_policy = req.headers['storage_policy'] ring = POLICIES.get_object_ring(stor_policy, '/etc/swift') #broker = self._get_metadata_broker() #broker.initialize() #Handle Container PUT if not obj: hsh = hash_path(acc, con) part = ring.get_part(acc, con) db_dir = storage_directory(swift.container.backend.DATADIR, part, hsh) nodes = ring.get_part_nodes(part) for node in nodes: for item in self.devicelist: if node['device'] in item: try: path = os.path.join(self.root + item, db_dir, hsh + '.db') #TODO: move kwargs kwargs = { 'account': acc, 'container': con, 'logger': self.logger } md_broker = swift.container.backend.ContainerBroker( path, **kwargs) md = md_broker.get_info() md.update( (key, value) for key, (value, timestamp ) in md_broker.metadata.iteritems() if value != '' and is_sys_or_user_meta('container', key)) sys_md = format_con_metadata(md) user_md = format_custom_metadata(md) if 'X-Container-Read' in req.headers: sys_md[ 'container_read_permissions'] = req.headers[ 'X-Container-Read'] if 'X-Container-Write' in req.headers: sys_md[ 'container_write_permissions'] = req.headers[ 'X-Container-Write'] #TODO: insert container_last_activity_time #TODO: split meta user/sys #TODO: insert meta self.broker.insert_container_md([sys_md]) return except DatabaseConnectionError as e: self.logger.warn("DatabaseConnectionError: " + e.path + "\n") pass except: self.logger.warn("%s: %s\n" % (str( sys.exc_info()[0]), str(sys.exc_info()[1]))) pass #handle object PUT else: part = ring.get_part(acc, con, obj) nodes = ring.get_part_nodes(part) for node in nodes: for item in self.devicelist: if node['device'] in item: try: df = self.diskfile_mgr.get_diskfile( item, part, acc, con, obj, stor_policy) md = df.read_metadata() sys_md = format_obj_metadata(md) #df._data_file is a direct path to the objects data sys_md['object_location'] = df._data_file user_md = format_custom_metadata(md) #TODO: insert user meta and sys meta self.broker.insert_object_md([sys_md]) except: self.logger.warn("%s: %s\n" % (str( sys.exc_info()[0]), str(sys.exc_info()[1]))) pass return
def POST(self, req): version, acc, con, obj = split_path(req.path, 1, 4, True) stor_policy = req.headers['storage_policy'] ring = POLICIES.get_object_ring(stor_policy, '/etc/swift') if not con and not obj: meta_type = 'account' kwargs = {'account': acc, 'logger': self.logger} data_dir = swift.account.backend.DATADIR hsh = hash_path(acc) part = ring.get_part(acc) db_dir = storage_directory(data_dir, part, hsh) nodes = ring.get_part_nodes(part) for node in nodes: for item in self.devicelist: if node['device'] in item: try: path = os.path.join(self.root + item, db_dir, hsh + '.db') broker = swift.account.backend.AccountBroker( path, **kwargs) md = broker.get_info() md.update((key, value) for key, ( value, timestamp) in broker.metadata.iteritems() if value != '' and is_sys_or_user_meta(meta_type, key)) sys_md = format_acc_metadata(md) user_md = format_custom_metadata(md) #TODO: call overwrite_account_metadata #TODO: call overwrite_custom_metadata return except: self.logger.warn("%s: %s\n" % (str( sys.exc_info()[0]), str(sys.exc_info()[1]))) pass #Handle Container POST elif not obj: meta_type = 'container' kwargs = {'account': acc, 'container': con, 'logger': self.logger} data_dir = swift.container.backend.DATADIR try: hsh = hash_path(acc, con) part = ring.get_part(acc, con) db_dir = storage_directory(data_dir, part, hsh) nodes = ring.get_part_nodes(part) for node in nodes: for item in self.devicelist: if node['device'] in item: try: path = os.path.join(self.root + item, db_dir, hsh + '.db') broker = swift.container.backend.ContainerBroker( path, **kwargs) md = broker.get_info() md.update( (key, value) for key, (value, timestamp ) in broker.metadata.iteritems() if value != '' and is_sys_or_user_meta('container', key)) sys_md = format_con_metadata(md) user_md = format_custom_metadata(md) if 'X-Container-Read' in req.headers: sys_md[ 'container_read_permissions'] = req.headers[ 'X-Container-Read'] if 'X-Container-Write' in req.headers: sys_md[ 'container_write_permissions'] = req.headers[ 'X-Container-Write'] #TODO: call overwrite_container_metadata #TODO: call overwrite_custom_metadata return except DatabaseConnectionError as e: self.logger.warn("DatabaseConnectionError: " + e.path + "\n") pass except: self.logger.warn( "%s: %s\n" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) pass else: part = ring.get_part(acc, con, obj) nodes = ring.get_part_nodes(part) for node in nodes: for item in self.devicelist: if node['device'] in item: try: df = self.diskfile_mgr.get_diskfile( item, part, acc, con, obj, stor_policy) md = df.read_metadata() sys_md = format_obj_metadata(md) user_md = format_custom_metadata(md) #TODO: call overwrite_object_metadata #TODO: call overwrite_custom_metadata except: self.logger.warn("%s: %s\n" % (str( sys.exc_info()[0]), str(sys.exc_info()[1]))) pass return
def _get_storage_dir(self, part, node, account=None, container=None): # TODO: fix hard-coded path datadir = os.path.join('/srv/1/node/sdb1', 'containers') container_hash = swift_utils.hash_path(account, container) return (swift_utils.storage_directory(datadir, part, container_hash), container_hash)
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, bytes_per_sync=(512 * 1024 * 1024), iter_hook=None, threadpool=None, obj_dir='objects', mount_check=False, disallowed_metadata_keys=None): if mount_check and not check_mount(path, device): raise DiskFileDeviceUnavailable() self.disk_chunk_size = disk_chunk_size self.bytes_per_sync = bytes_per_sync self.iter_hook = iter_hook self.name = '/' + '/'.join((account, container, obj)) name_hash = hash_path(account, container, obj) self.datadir = join(path, device, storage_directory(obj_dir, partition, name_hash)) self.device_path = join(path, device) self.tmpdir = join(path, device, 'tmp') self.logger = logger self.disallowed_metadata_keys = disallowed_metadata_keys or [] self.metadata = {} self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.suppress_file_closing = False self.threadpool = threadpool or ThreadPool(nthreads=0) if not exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) meta_file = None for afile in files: if afile.endswith('.ts'): self.data_file = None with open(join(self.datadir, afile)) as mfp: self.metadata = read_metadata(mfp) self.metadata['deleted'] = True break if afile.endswith('.meta') and not meta_file: meta_file = join(self.datadir, afile) if afile.endswith('.data') and not self.data_file: self.data_file = join(self.datadir, afile) break if not self.data_file: return self.fp = open(self.data_file, 'rb') self.metadata = read_metadata(self.fp) if not keep_data_fp: self.close(verify_file=False) if meta_file: with open(meta_file) as mfp: for key in self.metadata.keys(): if key.lower() not in self.disallowed_metadata_keys: del self.metadata[key] self.metadata.update(read_metadata(mfp)) if 'name' in self.metadata: if self.metadata['name'] != self.name: self.logger.error( _('Client path %(client)s does not match ' 'path stored in object metadata %(meta)s'), { 'client': self.name, 'meta': self.metadata['name'] }) raise DiskFileCollision('Client path does not match path ' 'stored in object metadata')
def test_storage_directory(self): self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'), 'objects/1/DEF/ABCDEF')
def get_broker(self, account, container, part, node): db_hash = hash_path(account.encode('utf-8'), container.encode('utf-8')) db_dir = storage_directory(DATADIR, part, db_hash) db_path = os.path.join(self.root, node['device'], db_dir, db_hash + '.db') return ContainerBroker(db_path, account=account, container=container)