def test_get_container_metadata(self): def _mock_get_container_details(path): o_list = ['a', 'b', 'c'] o_count = 3 b_used = 47 return o_list, o_count, b_used orig_gcd = utils.get_container_details utils.get_container_details = _mock_get_container_details td = tempfile.mkdtemp() try: exp_md = { utils.X_TYPE: (utils.CONTAINER, 0), utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0), utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0), utils.X_OBJECTS_COUNT: (3, 0), utils.X_BYTES_USED: (47, 0), } md = utils.get_container_metadata(td) assert md == exp_md finally: utils.get_container_details = orig_gcd os.rmdir(td)
def test_create_account_metadata(self): td = tempfile.mkdtemp() try: r_md = utils.create_account_metadata(td) xkey = _xkey(td, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['get'] == 1 assert _xattr_op_cnt['set'] == 1 md = pickle.loads(_xattrs[xkey]) assert r_md == md for key in self.acct_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == (utils.ACCOUNT, 0) assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp( os.path.getctime(td)), 0) assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp( os.path.getmtime(td)), 0) assert md[utils.X_OBJECTS_COUNT] == (0, 0) assert md[utils.X_BYTES_USED] == (0, 0) assert md[utils.X_CONTAINER_COUNT] == (0, 0) finally: os.rmdir(td)
def __init__(self, root, drive, account, logger, **kwargs): super(DiskAccount, self).__init__(root, drive, account, logger, **kwargs) if self.account == 'gsexpiring': # Do not bother updating object count, container count and bytes # used. Return immediately before metadata validation and # creation happens. info = do_stat(self.datadir) if info and stat.S_ISDIR(info.st_mode): self._dir_exists = True semi_fake_md = { 'X-Object-Count': (0, 0), 'X-Container-Count': (0, 0), 'X-Timestamp': ((normalize_timestamp(info.st_ctime)), 0), 'X-Type': ('Account', 0), 'X-PUT-Timestamp': ((normalize_timestamp(info.st_mtime)), 0), 'X-Bytes-Used': (0, 0) } self.metadata = semi_fake_md return # Since accounts should always exist (given an account maps to a # gluster volume directly, and the mount has already been checked at # the beginning of the REST API handling), just assert that that # assumption still holds. assert self._dir_exists_read_metadata() assert self._dir_exists if not self.metadata or not validate_account(self.metadata): create_account_metadata(self.datadir) self.metadata = _read_metadata(self.datadir)
def test_get_account_metadata(self): def _mock_get_account_details(path): c_list = ['123', 'abc'] c_count = 2 return c_list, c_count orig_gad = utils.get_account_details utils.get_account_details = _mock_get_account_details td = tempfile.mkdtemp() try: exp_md = { utils.X_TYPE: (utils.ACCOUNT, 0), utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0), utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0), utils.X_OBJECTS_COUNT: (0, 0), utils.X_BYTES_USED: (0, 0), utils.X_CONTAINER_COUNT: (2, 0), } md = utils.get_account_metadata(td) assert md == exp_md finally: utils.get_account_details = orig_gad os.rmdir(td)
def test_open_no_metadata(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") stats = os.stat(the_file) ts = normalize_timestamp(stats.st_ctime) etag = md5() etag.update("1234") etag = etag.hexdigest() exp_md = { 'Content-Length': 4, 'ETag': etag, 'X-Timestamp': ts, 'Content-Type': 'application/octet-stream' } gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._fd is None assert gdf._metadata is None assert not gdf._is_dir with gdf.open(): assert gdf._data_file == the_file assert not gdf._is_dir assert gdf._fd is not None assert gdf._metadata == exp_md self.assertRaises(DiskFileNotOpen, gdf.get_metadata) self.assertRaises(DiskFileNotOpen, gdf.reader) self.assertRaises(DiskFileNotOpen, gdf.__enter__)
def test_delete_file_unlink_error(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._data_file == the_file assert not gdf._is_dir later = float(gdf.read_metadata()['X-Timestamp']) + 1 def _mock_os_unlink_eacces_err(f): raise OSError(errno.EACCES, os.strerror(errno.EACCES)) stats = os.stat(the_path) try: os.chmod(the_path, stats.st_mode & (~stat.S_IWUSR)) # Handle the case os_unlink() raises an OSError with patch("os.unlink", _mock_os_unlink_eacces_err): try: gdf.delete(normalize_timestamp(later)) except OSError as e: assert e.errno == errno.EACCES else: self.fail("Excepted an OSError when unlinking file") finally: os.chmod(the_path, stats.st_mode) assert os.path.isdir(gdf._datadir) assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_open_no_metadata(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") stats = os.stat(the_file) ts = normalize_timestamp(stats.st_ctime) etag = md5() etag.update("1234") etag = etag.hexdigest() exp_md = { 'Content-Length': 4, 'ETag': etag, 'X-Timestamp': ts, 'Content-Type': 'application/octet-stream'} gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._fd is None assert gdf._disk_file_open is False assert gdf._metadata is None with gdf.open(): assert gdf._data_file == the_file assert gdf._fd is not None assert gdf._metadata == exp_md assert gdf._disk_file_open is True assert gdf._disk_file_open is False self.assertRaises(DiskFileNotOpen, gdf.get_metadata) self.assertRaises(DiskFileNotOpen, gdf.reader) self.assertRaises(DiskFileNotOpen, gdf.__enter__)
def test_delete_file_unlink_error(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._data_file == the_file later = float(gdf.read_metadata()['X-Timestamp']) + 1 def _mock_os_unlink_eacces_err(f): raise OSError(errno.EACCES, os.strerror(errno.EACCES)) stats = os.stat(the_path) try: os.chmod(the_path, stats.st_mode & (~stat.S_IWUSR)) # Handle the case os_unlink() raises an OSError with patch("os.unlink", _mock_os_unlink_eacces_err): try: gdf.delete(normalize_timestamp(later)) except OSError as e: assert e.errno == errno.EACCES else: self.fail("Excepted an OSError when unlinking file") finally: os.chmod(the_path, stats.st_mode) assert os.path.isdir(gdf._put_datadir) assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_is_dir(self): the_path = os.path.join(self.td, "vol0", "bar") the_dir = os.path.join(the_path, "d") os.makedirs(the_dir) gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "d") assert gdf._data_file == the_dir later = float(gdf.read_metadata()['X-Timestamp']) + 1 gdf.delete(normalize_timestamp(later)) assert os.path.isdir(gdf._datadir) assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_delete_is_dir(self): the_path = os.path.join(self.td, "vol0", "bar") the_dir = os.path.join(the_path, "d") os.makedirs(the_dir) gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "d") assert gdf._data_file == the_dir later = float(gdf.read_metadata()['X-Timestamp']) + 1 gdf.delete(normalize_timestamp(later)) assert os.path.isdir(gdf._put_datadir) assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def __init__(self, path, drive, account, container, logger, uid=DEFAULT_UID, gid=DEFAULT_GID, **kwargs): super(DiskDir, self).__init__(path, drive, account, logger, **kwargs) self.uid = int(uid) self.gid = int(gid) self.container = container self.datadir = os.path.join(self.datadir, self.container) if self.account == 'gsexpiring': # Do not bother crawling the entire container tree just to update # object count and bytes used. Return immediately before metadata # validation and creation happens. info = do_stat(self.datadir) if info and stat.S_ISDIR(info.st_mode): self._dir_exists = True if not info: # Container no longer exists. return semi_fake_md = { 'X-Object-Count': (0, 0), 'X-Timestamp': ((normalize_timestamp(info.st_ctime)), 0), 'X-Type': ('container', 0), 'X-PUT-Timestamp': ((normalize_timestamp(info.st_mtime)), 0), 'X-Bytes-Used': (0, 0) } self.metadata = semi_fake_md return if not self._dir_exists_read_metadata(): return if not self.metadata: create_container_metadata(self.datadir) self.metadata = _read_metadata(self.datadir) else: if not validate_container(self.metadata): create_container_metadata(self.datadir) self.metadata = _read_metadata(self.datadir)
def test_create_container_metadata(self): td = tempfile.mkdtemp() try: r_md = utils.create_container_metadata(td) xkey = _xkey(td, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['get'] == 1 assert _xattr_op_cnt['set'] == 1 md = deserialize_metadata(_xattrs[xkey]) assert r_md == md for key in self.cont_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == (utils.CONTAINER, 0) assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp(os.path.getctime(td)), 0) assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp(os.path.getmtime(td)), 0) assert md[utils.X_OBJECTS_COUNT] == (0, 0) assert md[utils.X_BYTES_USED] == (0, 0) finally: os.rmdir(td)
def test_delete_same_timestamp(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._data_file == the_file now = float(gdf.read_metadata()['X-Timestamp']) gdf.delete(normalize_timestamp(now)) assert os.path.isdir(gdf._put_datadir) assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_get_object_metadata_file(self): tf = tempfile.NamedTemporaryFile() tf.file.write('123') tf.file.flush() md = utils.get_object_metadata(tf.name) for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.FILE assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name) assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name)) assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_get_object_metadata_dir(self): td = tempfile.mkdtemp() try: md = utils.get_object_metadata(td) for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td)) assert md[utils.X_ETAG] == hashlib.md5().hexdigest() finally: os.rmdir(td)
def test_get_object_metadata_file(self): tf = tempfile.NamedTemporaryFile() tf.file.write('123') tf.file.flush() md = utils.get_object_metadata(tf.name) for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.FILE assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name) assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp( os.path.getctime(tf.name)) assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_get_object_metadata_dir(self): td = tempfile.mkdtemp() try: md = utils.get_object_metadata(td) for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp( os.path.getctime(td)) assert md[utils.X_ETAG] == hashlib.md5().hexdigest() finally: os.rmdir(td)
def test_read_metadata_optimize_open_close(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") init_md = { 'X-Type': 'Object', 'X-Object-Type': 'file', 'Content-Length': 4, 'ETag': md5("1234").hexdigest(), 'X-Timestamp': normalize_timestamp(os.stat(the_file).st_ctime), 'Content-Type': 'application/octet-stream' } _metadata[_mapit(the_file)] = init_md gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._fd is None assert gdf._disk_file_open is False assert gdf._metadata is None # Case 1 # Ensure that reading metadata for non-GET requests # does not incur opening and closing the file when # metadata is NOT stale. mock_open = Mock() mock_close = Mock() with mock.patch("gluster.swift.obj.diskfile.do_open", mock_open): with mock.patch("gluster.swift.obj.diskfile.do_close", mock_close): md = gdf.read_metadata() self.assertEqual(md, init_md) self.assertFalse(mock_open.called) self.assertFalse(mock_close.called) # Case 2 # Ensure that reading metadata for non-GET requests # still opens and reads the file when metadata is stale with open(the_file, "a") as fd: # Append to the existing file to make the stored metadata # invalid/stale. fd.write("5678") md = gdf.read_metadata() # Check that the stale metadata is recalculated to account for # change in file content self.assertNotEqual(md, init_md) self.assertEqual(md['Content-Length'], 8) self.assertEqual(md['ETag'], md5("12345678").hexdigest())
def test_delete_file_not_found(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._data_file == the_file later = float(gdf.read_metadata()['X-Timestamp']) + 1 # Handle the case the file is not in the directory listing. os.unlink(the_file) gdf.delete(normalize_timestamp(later)) assert os.path.isdir(gdf._put_datadir) assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_read_metadata_optimize_open_close(self): the_path = os.path.join(self.td, "vol0", "bar") the_file = os.path.join(the_path, "z") os.makedirs(the_path) with open(the_file, "wb") as fd: fd.write("1234") init_md = { 'X-Type': 'Object', 'X-Object-Type': 'file', 'Content-Length': 4, 'ETag': md5("1234").hexdigest(), 'X-Timestamp': normalize_timestamp(os.stat(the_file).st_ctime), 'Content-Type': 'application/octet-stream'} _metadata[_mapit(the_file)] = init_md gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") assert gdf._obj == "z" assert gdf._fd is None assert gdf._disk_file_open is False assert gdf._metadata is None # Case 1 # Ensure that reading metadata for non-GET requests # does not incur opening and closing the file when # metadata is NOT stale. mock_open = Mock() mock_close = Mock() with mock.patch("gluster.swift.obj.diskfile.do_open", mock_open): with mock.patch("gluster.swift.obj.diskfile.do_close", mock_close): md = gdf.read_metadata() self.assertEqual(md, init_md) self.assertFalse(mock_open.called) self.assertFalse(mock_close.called) # Case 2 # Ensure that reading metadata for non-GET requests # still opens and reads the file when metadata is stale with open(the_file, "a") as fd: # Append to the existing file to make the stored metadata # invalid/stale. fd.write("5678") md = gdf.read_metadata() # Check that the stale metadata is recalculated to account for # change in file content self.assertNotEqual(md, init_md) self.assertEqual(md['Content-Length'], 8) self.assertEqual(md['ETag'], md5("12345678").hexdigest())
def test_create_object_metadata_file(self): tf = tempfile.NamedTemporaryFile() tf.file.write('4567') tf.file.flush() r_md = utils.create_object_metadata(tf.name) xkey = _xkey(tf.name, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['set'] == 1 md = deserialize_metadata(_xattrs[xkey]) assert r_md == md for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.FILE assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name) assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name)) assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_create_object_metadata_file(self): tf = tempfile.NamedTemporaryFile() tf.file.write('4567') tf.file.flush() r_md = utils.create_object_metadata(tf.name) xkey = _xkey(tf.name, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['set'] == 1 md = deserialize_metadata(_xattrs[xkey]) assert r_md == md for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.FILE assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name) assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp( os.path.getctime(tf.name)) assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_create_object_metadata_dir(self): td = tempfile.mkdtemp() try: r_md = utils.create_object_metadata(td) xkey = _xkey(td, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['set'] == 1 md = deserialize_metadata(_xattrs[xkey]) assert r_md == md for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td)) assert md[utils.X_ETAG] == hashlib.md5().hexdigest() finally: os.rmdir(td)
def test_create_object_metadata_dir(self): td = tempfile.mkdtemp() try: r_md = utils.create_object_metadata(td) xkey = _xkey(td, utils.METADATA_KEY) assert len(_xattrs.keys()) == 1 assert xkey in _xattrs assert _xattr_op_cnt['set'] == 1 md = deserialize_metadata(_xattrs[xkey]) assert r_md == md for key in self.obj_keys: assert key in md, "Expected key %s in %r" % (key, md) assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp( os.path.getctime(td)) assert md[utils.X_ETAG] == hashlib.md5().hexdigest() finally: os.rmdir(td)