def __init__(self, path, device, partition, account, container, logger, uid=DEFAULT_UID, gid=DEFAULT_GID, fs_object=None): self.root = path device = account if container: self.name = container else: self.name = None if self.name: self.datadir = os.path.join(path, account, self.name) else: self.datadir = os.path.join(path, device) self.account = account self.device_path = os.path.join(path, device) if not check_mount(path, device): check_valid_account(account, fs_object) self.logger = logger self.metadata = {} self.uid = int(uid) self.gid = int(gid) # Create a dummy db_file in /etc/swift self.db_file = '/etc/swift/db_file.db' if not os.path.exists(self.db_file): file(self.db_file, 'w+') self.dir_exists = os.path.exists(self.datadir) if self.dir_exists: try: self.metadata = read_metadata(self.datadir) except EOFError: create_container_metadata(self.datadir) else: return if container: if not self.metadata: create_container_metadata(self.datadir) self.metadata = read_metadata(self.datadir) else: if not validate_container(self.metadata): create_container_metadata(self.datadir) self.metadata = read_metadata(self.datadir) else: if not self.metadata: create_account_metadata(self.datadir) self.metadata = read_metadata(self.datadir) else: if not validate_account(self.metadata): create_account_metadata(self.datadir) self.metadata = read_metadata(self.datadir)
def get_info(self, include_metadata=False): """ Get global data for the account. :returns: dict with keys: account, created_at, put_timestamp, delete_timestamp, container_count, object_count, bytes_used, hash, id """ metadata = {} if (os.path.exists(self.datadir)): metadata = read_metadata(self.datadir) if not metadata: metadata = create_account_metadata(self.datadir) data = { 'account': self.account, 'created_at': '1', 'put_timestamp': '1', 'delete_timestamp': '1', 'container_count': metadata.get(X_CONTAINER_COUNT, 0), 'object_count': metadata.get(X_OBJECTS_COUNT, 0), 'bytes_used': metadata.get(X_BYTES_USED, 0), 'hash': '', 'id': '' } if include_metadata: data['metadata'] = metadata return data
def get_info(self, include_metadata=False): """ Get global data for the container. :returns: dict with keys: account, container, created_at, put_timestamp, delete_timestamp, object_count, bytes_used, reported_put_timestamp, reported_delete_timestamp, reported_object_count, reported_bytes_used, hash, id, x_container_sync_point1, and x_container_sync_point2. If include_metadata is set, metadata is included as a key pointing to a dict of tuples of the metadata """ # TODO: delete_timestamp, reported_put_timestamp # reported_delete_timestamp, reported_object_count, # reported_bytes_used, created_at metadata = {} if os.path.exists(self.datadir): metadata = read_metadata(self.datadir) data = {'account' : self.account, 'container' : self.name, 'object_count' : metadata.get(X_OBJECTS_COUNT, '0'), 'bytes_used' : metadata.get(X_BYTES_USED, '0'), 'hash': '', 'id' : '', 'created_at' : '1', 'put_timestamp' : metadata.get(X_PUT_TIMESTAMP, '0'), 'delete_timestamp' : '1', 'reported_put_timestamp' : '1', 'reported_delete_timestamp' : '1', 'reported_object_count' : '1', 'reported_bytes_used' : '1'} if include_metadata: data['metadata'] = metadata return data
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter, path): """ Returns tuple of name, created_at, size, content_type, etag. """ if path: prefix = path = path.rstrip('/') + '/' delimiter = '/' if delimiter and not prefix: prefix = '' objects = [] object_count = 0 bytes_used = 0 container_list = [] objects, object_count, bytes_used = get_container_details(self.datadir) if int(self.metadata[X_OBJECTS_COUNT][0]) != object_count or \ int(self.metadata[X_BYTES_USED][0]) != bytes_used: self.metadata[X_OBJECTS_COUNT] = (object_count, 0) self.metadata[X_BYTES_USED] = (bytes_used, 0) self.update_container(self.metadata) if objects: objects.sort() if objects and prefix: objects = self.filter_prefix(objects, prefix) if objects and delimiter: objects = self.filter_delimiter(objects, delimiter, prefix) if objects and marker: objects = self.filter_marker(objects, marker) if objects and end_marker: objects = self.filter_end_marker(objects, end_marker) if objects and limit: if len(objects) > limit: objects = self.filter_limit(objects, limit) if objects: for obj in objects: list_item = [] list_item.append(obj) obj_path = os.path.join(self.datadir, obj) metadata = read_metadata(obj_path) if not metadata or not validate_object(metadata): metadata = create_object_metadata(obj_path) if metadata: list_item.append(metadata[X_TIMESTAMP]) list_item.append(int(metadata[X_CONTENT_LENGTH])) list_item.append(metadata[X_CONTENT_TYPE]) list_item.append(metadata[X_ETAG]) container_list.append(list_item) return container_list
def test_read_metadata(self): path = "/tmp/foo/r" expected_d = { 'a': 'y' } xkey = _xkey(path, utils.METADATA_KEY) _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL) res_d = utils.read_metadata(path) assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d) assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter, path): """ Returns tuple of name, created_at, size, content_type, etag. """ if path: prefix = path = path.rstrip('/') + '/' delimiter = '/' if delimiter and not prefix: prefix = '' objects = [] object_count = 0 bytes_used = 0 container_list = [] objects, object_count, bytes_used = get_container_details(self.datadir) if int(self.metadata[X_OBJECTS_COUNT]) != object_count or \ int(self.metadata[X_BYTES_USED]) != bytes_used: self.metadata[X_OBJECTS_COUNT] = object_count self.metadata[X_BYTES_USED] = bytes_used self.update_container(self.metadata) if objects: objects.sort() if objects and prefix: objects = self.filter_prefix(objects, prefix) if objects and delimiter: objects = self.filter_delimiter(objects, delimiter, prefix) if objects and marker: objects = self.filter_marker(objects, marker) if objects and end_marker: objects = self.filter_end_marker(objects, end_marker) if objects and limit: if len(objects) > limit: objects = self.filter_limit(objects, limit) if objects: for obj in objects: list_item = [] list_item.append(obj) metadata = read_metadata(self.datadir + '/' + obj) if not metadata or not validate_object(metadata): metadata = create_object_metadata(self.datadir + '/' + obj) if metadata: list_item.append(metadata[X_TIMESTAMP]) list_item.append(int(metadata[X_CONTENT_LENGTH])) list_item.append(metadata[X_CONTENT_TYPE]) list_item.append(metadata[X_ETAG]) container_list.append(list_item) return container_list
def __init__(self, root, account, fs_object=None): self.root = root self.account = account self.datadir = os.path.join(self.root, self.account) if not check_mount(root, account): check_valid_account(account, fs_object) self.metadata = read_metadata(self.datadir) if not self.metadata or not validate_account(self.metadata): self.metadata = create_account_metadata(self.datadir)
def __init__(self, root, account, fs_object = None): self.root = root self.account = account self.datadir = os.path.join(self.root, self.account) if not check_mount(root, account): check_valid_account(account, fs_object) self.metadata = read_metadata(self.datadir) if not self.metadata or not validate_account(self.metadata): self.metadata = create_account_metadata(self.datadir)
def list_containers_iter(self, limit, marker, end_marker, prefix, delimiter): """ Return tuple of name, object_count, bytes_used, 0(is_subdir). Used by account server. """ if delimiter and not prefix: prefix = '' containers = [] container_count = 0 account_list = [] containers, container_count = get_account_details(self.datadir) if int(self.metadata[X_CONTAINER_COUNT]) != container_count: self.metadata[X_CONTAINER_COUNT] = container_count self.update_account(self.metadata) if containers: containers.sort() if containers and prefix: containers = self.filter_prefix(containers, prefix) if containers and delimiter: containers = self.filter_delimiter(containers, delimiter, prefix) if containers and marker: containers = self.filter_marker(containers, marker) if containers and end_marker: containers = self.filter_end_marker(containers, end_marker) if containers and limit: if len(containers) > limit: containers = self.filter_limit(containers, limit) if containers: for cont in containers: list_item = [] metadata = None list_item.append(cont) metadata = read_metadata(self.datadir + '/' + cont) if not metadata or not validate_container(metadata): metadata = create_container_metadata(self.datadir + '/' + cont) if metadata: list_item.append(metadata[X_OBJECTS_COUNT]) list_item.append(metadata[X_BYTES_USED]) list_item.append(0) account_list.append(list_item) return account_list
def test_read_metadata_multiple(self): path = "/tmp/foo/r" expected_d = { 'a': 'y' * 150000 } expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL) for i in range(0,3): xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or '')) _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE] expected_p = expected_p[utils.MAX_XATTR_SIZE:] assert not expected_p res_d = utils.read_metadata(path) assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d) assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
def test_read_metadata_err(self): path = "/tmp/foo/r" expected_d = { 'a': 'y' } xkey = _xkey(path, utils.METADATA_KEY) _xattrs[xkey] = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL) _xattr_err[xkey] = errno.EOPNOTSUPP try: res_d = utils.read_metadata(path) except IOError as e: assert e.errno == errno.EOPNOTSUPP assert (_xattr_op_cnt['get'] == 1), "%r" % _xattr_op_cnt else: self.fail("Expected an IOError exception on get")
def test_read_metadata_multiple_one_missing(self): path = "/tmp/foo/r" expected_d = { 'a': 'y' * 150000 } expected_p = pickle.dumps(expected_d, utils.PICKLE_PROTOCOL) for i in range(0,2): xkey = _xkey(path, "%s%s" % (utils.METADATA_KEY, i or '')) _xattrs[xkey] = expected_p[:utils.MAX_XATTR_SIZE] expected_p = expected_p[utils.MAX_XATTR_SIZE:] assert len(expected_p) <= utils.MAX_XATTR_SIZE res_d = utils.read_metadata(path) assert res_d == {} assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt assert len(_xattrs.keys()) == 0, "Expected 0 keys, found %d" % len(_xattrs.keys())
def _read_metadata(dd): """ Filter read metadata so that it always returns a tuple that includes some kind of timestamp. With 1.4.8 of the Swift integration the timestamps were not stored. Here we fabricate timestamps for volumes where the existing data has no timestamp (that is, stored data is not a tuple), allowing us a measure of backward compatibility. FIXME: At this time it does not appear that the timestamps on each metadata are used for much, so this should not hurt anything. """ metadata_i = read_metadata(dd) metadata = {} timestamp = 0 for key, value in metadata_i.iteritems(): if not isinstance(value, tuple): value = (value, timestamp) metadata[key] = value return metadata
def get_info(self, include_metadata=False): """ Get global data for the account. :returns: dict with keys: account, created_at, put_timestamp, delete_timestamp, container_count, object_count, bytes_used, hash, id """ metadata = {} if (os.path.exists(self.datadir)): metadata = read_metadata(self.datadir) if not metadata: metadata = create_account_metadata(self.datadir) data = {'account' : self.account, 'created_at' : '1', 'put_timestamp' : '1', 'delete_timestamp' : '1', 'container_count' : metadata.get(X_CONTAINER_COUNT, 0), 'object_count' : metadata.get(X_OBJECTS_COUNT, 0), 'bytes_used' : metadata.get(X_BYTES_USED, 0), 'hash' : '', 'id' : ''} if include_metadata: data['metadata'] = metadata return data
def test_read_metadata_notfound(self): path = "/tmp/foo/r" res_d = utils.read_metadata(path) assert res_d == {} assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
def __init__( self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, uid=DEFAULT_UID, gid=DEFAULT_GID, ): self.disk_chunk_size = disk_chunk_size device = account # Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc obj = obj.strip("/") if "/" in obj: self.obj_path, self.obj = obj.rsplit("/", 1) else: self.obj_path = "" self.obj = obj if self.obj_path: self.name = "/".join((container, self.obj_path)) else: self.name = container # Absolute path for obj directory. self.datadir = os.path.join(path, device, self.name) self.device_path = os.path.join(path, device) if not check_mount(path, device): check_valid_account(account) self.container_path = os.path.join(path, device, container) self.tmpdir = os.path.join(path, device, "tmp") self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.is_dir = False self.is_valid = True self.uid = int(uid) self.gid = int(gid) if not os.path.exists(self.datadir + "/" + self.obj): return self.data_file = os.path.join(self.datadir, self.obj) self.metadata = read_metadata(self.datadir + "/" + self.obj) if not self.metadata: create_object_metadata(self.datadir + "/" + self.obj) self.metadata = read_metadata(self.datadir + "/" + self.obj) if not validate_object(self.metadata): create_object_metadata(self.datadir + "/" + self.obj) self.metadata = read_metadata(self.datadir + "/" + self.obj) self.filter_metadata() if os.path.isdir(self.datadir + "/" + self.obj): self.is_dir = True else: self.fp = do_open(self.data_file, "rb") if not keep_data_fp: self.close(verify_file=False)
def __init__(self, path, device, partition, account, container, obj, logger, keep_data_fp=False, disk_chunk_size=65536, uid=DEFAULT_UID, gid=DEFAULT_GID, fs_object=None): self.disk_chunk_size = disk_chunk_size device = account #Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc obj = obj.strip('/') if '/' in obj: self.obj_path, self.obj = obj.rsplit('/', 1) else: self.obj_path = '' self.obj = obj if self.obj_path: self.name = '/'.join((container, self.obj_path)) else: self.name = container #Absolute path for obj directory. self.datadir = os.path.join(path, device, self.name) self.device_path = os.path.join(path, device) if not check_mount(path, device): check_valid_account(account, fs_object) self.container_path = os.path.join(path, device, container) self.tmpdir = os.path.join(path, device, 'tmp') self.logger = logger self.metadata = {} self.meta_file = None self.data_file = None self.fp = None self.iter_etag = None self.started_at_0 = False self.read_to_eof = False self.quarantined_dir = None self.keep_cache = False self.is_dir = False self.is_valid = True self.uid = int(uid) self.gid = int(gid) if not os.path.exists(self.datadir + '/' + self.obj): return self.data_file = os.path.join(self.datadir, self.obj) self.metadata = read_metadata(self.datadir + '/' + self.obj) if not self.metadata: create_object_metadata(self.datadir + '/' + self.obj) self.metadata = read_metadata(self.datadir + '/' + self.obj) if not validate_object(self.metadata): create_object_metadata(self.datadir + '/' + self.obj) self.metadata = read_metadata(self.datadir + '/' + self.obj) self.filter_metadata() if os.path.isdir(self.datadir + '/' + self.obj): self.is_dir = True else: self.fp = do_open(self.data_file, 'rb') if not keep_data_fp: self.close(verify_file=False)