def test_pyfile(): fname = 'pyfile.py' text = 'vim.command("let set_by_pyfile = 123")' with open(fname, 'w') as f: f.write(text) vim.command('pyfile pyfile.py') eq(vim.vars['set_by_pyfile'], 123) os.unlink(fname)
def test_constructor(self): # missing data arg with self.assertRaises(TypeError): # noinspection PyArgumentList UniqueIndex() # data has wrong dimensions data = [['A', 'C'], ['B', 'F']] with self.assertRaises(TypeError): UniqueIndex(data) # labels are not unique data = ['A', 'B', 'D', 'B'] with self.assertRaises(ValueError): UniqueIndex(data) # valid data data = ['A', 'C', 'B', 'F'] lbl = UniqueIndex(data) aeq(data, lbl) eq(1, lbl.ndim) eq(4, len(lbl)) # valid data (typed) data = np.array(['A', 'C', 'B', 'F'], dtype='S1') lbl = UniqueIndex(data, dtype='S1') aeq(data, lbl)
def test_constructor(self): # missing data arg with self.assertRaises(TypeError): # noinspection PyArgumentList HaplotypeArray() # data has wrong dtype data = 'foo bar' with self.assertRaises(TypeError): HaplotypeArray(data) # data has wrong dtype data = [4., 5., 3.7] with self.assertRaises(TypeError): HaplotypeArray(data) # data has wrong dimensions data = [1, 2, 3] with self.assertRaises(TypeError): HaplotypeArray(data) # data has wrong dimensions data = diploid_genotype_data # use GenotypeArray instead with self.assertRaises(TypeError): HaplotypeArray(data) # haploid data (typed) h = HaplotypeArray(haplotype_data, dtype='i1') aeq(haplotype_data, h) eq(np.int8, h.dtype)
def test_shortname_default(): r = remote.Remote( name='*****@*****.**', ssh=fudge.Fake('SSHConnection'), ) eq(r.shortname, '*****@*****.**') eq(str(r), '*****@*****.**')
def test_constructor(self): # missing data arg with self.assertRaises(TypeError): # noinspection PyArgumentList AlleleCountsArray() # data has wrong dtype data = 'foo bar' with self.assertRaises(TypeError): AlleleCountsArray(data) # data has wrong dtype data = [4., 5., 3.7] with self.assertRaises(TypeError): AlleleCountsArray(data) # data has wrong dimensions data = [1, 2, 3] with self.assertRaises(TypeError): AlleleCountsArray(data) # data has wrong dimensions data = diploid_genotype_data with self.assertRaises(TypeError): AlleleCountsArray(data) # valid data (typed) ac = AlleleCountsArray(allele_counts_data, dtype='u1') aeq(allele_counts_data, ac) eq(np.uint8, ac.dtype)
def test_fast_import_parent(): tmp = maketemp() path = os.path.join(tmp, 'repo.git') repository.init(path=path) repository.fast_import( git_dir=path, commit_msg='foo initial bar', committer='Mr. Unit Test <*****@*****.**>', files=[ ('foo', 'bar\n'), ], ) repository.fast_import( git_dir=path, commit_msg='another', committer='Sam One Else <*****@*****.**>', parent='refs/heads/master^0', files=[ ('quux', 'thud\n'), ], ) export = os.path.join(tmp, 'export') repository.export( git_dir=path, path=export, ) eq(sorted(os.listdir(export)), sorted(['foo', 'quux']))
def test_init_templates(): tmp = maketemp() path = os.path.join(tmp, 'repo.git') templatedir = os.path.join( os.path.dirname(__file__), 'mocktemplates', ) repository.init(path, template=templatedir) repository.init(path) got = readFile(os.path.join(path, 'no-confusion')) eq(got, 'i should show up\n') check_mode( os.path.join(path, 'hooks', 'post-update'), 0755, is_file=True, ) got = readFile(os.path.join(path, 'hooks', 'post-update')) eq(got, '#!/bin/sh\n# i can override standard templates\n') # standard templates are there, too assert ( # compatibility with git <1.6.0 os.path.isfile(os.path.join(path, 'hooks', 'pre-rebase')) # for git >=1.6.0 or os.path.isfile(os.path.join(path, 'hooks', 'pre-rebase.sample')) )
def test_read_yes_all(): cfg = RawConfigParser() cfg.add_section('group fooers') cfg.set('group fooers', 'members', '@all') cfg.set('group fooers', 'readonly', 'foo/bar') eq(access.haveAccess(config=cfg, user='******', mode='readonly', path='foo/bar'), ('repositories', 'foo/bar'))
def test_has_initial_commit_fail_notAGitDir(): tmp = maketemp() e = assert_raises( repository.GitRevParseError, repository.has_initial_commit, git_dir=tmp) eq(str(e), 'rev-parse failed: exit status 128')
def test_write_yes_map(): cfg = RawConfigParser() cfg.add_section('group fooers') cfg.set('group fooers', 'members', 'jdoe') cfg.set('group fooers', 'map writable foo/bar', 'quux/thud') eq(access.haveAccess(config=cfg, user='******', mode='writable', path='foo/bar'), ('repositories', 'quux/thud'))
def test_read_yes_map_wouldHaveWritable(): cfg = RawConfigParser() cfg.add_section('group fooers') cfg.set('group fooers', 'members', 'jdoe') cfg.set('group fooers', 'map writable foo/bar', 'quux/thud') eq(access.haveAccess(config=cfg, user='******', mode='readonly', path='foo/bar'), None)
def test_write_ops(self): with WriteOpCtx(self.ioctx) as write_op: write_op.new(0) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'') write_op.write_full(b'1') write_op.append(b'2') self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12') write_op.write_full(b'12345') write_op.write(b'x', 2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12x45') write_op.write_full(b'12345') write_op.zero(2, 2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12\x00\x005') write_op.write_full(b'12345') write_op.truncate(2) self.ioctx.operate_write_op(write_op, "write_ops") eq(self.ioctx.read('write_ops'), b'12') write_op.remove() self.ioctx.operate_write_op(write_op, "write_ops") with assert_raises(ObjectNotFound): self.ioctx.read('write_ops')
def test_write_no_simple_wouldHaveReadonly(): cfg = RawConfigParser() cfg.add_section('group fooers') cfg.set('group fooers', 'members', 'jdoe') cfg.set('group fooers', 'readonly', 'foo/bar') eq(access.haveAccess(config=cfg, user='******', mode='writable', path='foo/bar'), None)
def cb(arg, line, who, sec, nsec, seq, level, msg): # NOTE(sileht): the old pyrados API was received the pointer as int # instead of the value of arg eq(arg, "arg") with lock: lock.notify() return 0
def test_list_objects(self): self.ioctx.write('a', b'') self.ioctx.write('b', b'foo') self.ioctx.write_full('c', b'bar') self.ioctx.append('d', b'jazz') object_names = [obj.key for obj in self.ioctx.list_objects()] eq(sorted(object_names), ['a', 'b', 'c', 'd'])
def validate_response(r, **options): " Validate webtest response " # Validate: status, status_int, status_code ok(r.status.startswith("%d " % r.status_int)) ok(r.status_int >= 100) eq(r.status_int, r.status_code) eq(r.status_int, options.get("status_int", 200)) # Validate: headers " Ref: https://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters " message = "Must be printable ASCII characters: %s" for key, value in r.headers.iteritems(): ok(re.match("^[\x20-\x7e]*$", value), message % repr(value)) # endfold eq(r.content_type, options.get("content_type", "text/plain")) # Validate: status: 200 if r.status_int == 200: ok("location" not in r.headers) # Validate: status: 301, 302 if r.status_int in [301, 302]: eq(urllib.unquote(r.headers["location"]), options["location"]) eq(r.normal_body, "")
def test_route_error(): app = natrix.Application([ ("/500", lambda x: x.response(None.None)), ]) @app.route(":error-404") def error_404(x): x.response("Custom error 404") @app.route(":error-500") def error_500(x): x.response("Custom error 500") # endfold testapp = webtest.TestApp(app) response = testapp.get("/", status=404) eq(response.normal_body, "Custom error 404") def _error(*args, **kwargs): pass # endfold natrix_error = natrix.error natrix.error = _error response = testapp.get("/500", status=500) eq(response.normal_body, "Custom error 500") natrix.error = natrix_error
def test_list_children(self): global ioctx global features self.image.set_snap('snap1') self.check_children([(pool_name, self.clone_name)]) self.clone.close() self.rbd.remove(ioctx, self.clone_name) eq(self.image.list_children(), []) clone_name = get_temp_image_name() + '_' expected_children = [] for i in xrange(10): self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name + str(i), features) expected_children.append((pool_name, clone_name + str(i))) self.check_children(expected_children) for i in xrange(10): self.rbd.remove(ioctx, clone_name + str(i)) expected_children.pop(0) self.check_children(expected_children) eq(self.image.list_children(), []) self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, features) self.check_children([(pool_name, self.clone_name)]) self.clone = Image(ioctx, self.clone_name)
def test_follower_flatten(self): with Image(ioctx, image_name) as image: image.create_snap('snap') image.protect_snap('snap') try: RBD().clone(ioctx, image_name, 'snap', ioctx, 'clone', features) with nested(Image(ioctx, 'clone'), Image(ioctx2, 'clone')) as ( image1, image2): data = rand_data(256) image1.write(data, 0) image2.flatten() assert_raises(ImageNotFound, image1.parent_info) parent = True for x in xrange(30): try: image2.parent_info() except ImageNotFound: parent = False break eq(False, parent) finally: RBD().remove(ioctx, 'clone') with Image(ioctx, image_name) as image: image.unprotect_snap('snap') image.remove_snap('snap')
def test_aio_stat(self): lock = threading.Condition() count = [0] def cb(_, size, mtime): with lock: count[0] += 1 lock.notify() comp = self.ioctx.aio_stat("foo", cb) comp.wait_for_complete() with lock: while count[0] < 1: lock.wait() eq(comp.get_return_value(), -2) self.ioctx.write("foo", b"bar") comp = self.ioctx.aio_stat("foo", cb) comp.wait_for_complete() with lock: while count[0] < 2: lock.wait() eq(comp.get_return_value(), 0) [i.remove() for i in self.ioctx.list_objects()]
def test_rename(): rbd = RBD() image_name2 = get_temp_image_name() rbd.rename(ioctx, image_name, image_name2) eq([image_name2], rbd.list(ioctx)) rbd.rename(ioctx, image_name2, image_name) eq([image_name], rbd.list(ioctx))
def test_applications(self): cmd = {"prefix":"osd dump", "format":"json"} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'') eq(ret, 0) assert len(buf) > 0 release = json.loads(buf.decode("utf-8")).get("require_osd_release", None) if not release or release[0] < 'l': raise SkipTest eq([], self.ioctx.application_list()) self.ioctx.application_enable("app1") assert_raises(Error, self.ioctx.application_enable, "app2") self.ioctx.application_enable("app2", True) assert_raises(Error, self.ioctx.application_metadata_list, "dne") eq([], self.ioctx.application_metadata_list("app1")) assert_raises(Error, self.ioctx.application_metadata_set, "dne", "key", "key") self.ioctx.application_metadata_set("app1", "key1", "val1") self.ioctx.application_metadata_set("app1", "key2", "val2") self.ioctx.application_metadata_set("app2", "key1", "val1") eq([("key1", "val1"), ("key2", "val2")], self.ioctx.application_metadata_list("app1")) self.ioctx.application_metadata_remove("app1", "key1") eq([("key2", "val2")], self.ioctx.application_metadata_list("app1"))
def test_getsize(): store = dict() store['foo'] = b'aaa' store['bar'] = b'bbbb' store['baz/quux'] = b'ccccc' eq(7, getsize(store)) eq(5, getsize(store, 'baz'))
def test_init_group_overwrite_path(self): # setup path = 'foo/bar' store = self.create_store() meta = dict(shape=(2000,), chunks=(200,), dtype=np.dtype('u1'), compressor=None, fill_value=0, order='F', filters=None) store[array_meta_key] = encode_array_metadata(meta) store[path + '/' + array_meta_key] = encode_array_metadata(meta) # don't overwrite with assert_raises(ValueError): init_group(store, path=path) # do overwrite try: init_group(store, overwrite=True, path=path) except NotImplementedError: pass else: assert array_meta_key not in store assert group_meta_key in store assert (path + '/' + array_meta_key) not in store assert (path + '/' + group_meta_key) in store # should have been overwritten meta = decode_group_metadata(store[path + '/' + group_meta_key]) eq(ZARR_FORMAT, meta['zarr_format'])
def setdel_hierarchy_checks(store): # these tests are for stores that are aware of hierarchy levels; this # behaviour is not stricly required by Zarr but these tests are included # to define behaviour of DictStore and DirectoryStore classes # check __setitem__ and __delitem__ blocked by leaf store['a/b'] = b'aaa' with assert_raises(KeyError): store['a/b/c'] = b'xxx' with assert_raises(KeyError): del store['a/b/c'] store['d'] = b'ddd' with assert_raises(KeyError): store['d/e/f'] = b'xxx' with assert_raises(KeyError): del store['d/e/f'] # test __setitem__ overwrite level store['x/y/z'] = b'xxx' store['x/y'] = b'yyy' eq(b'yyy', store['x/y']) assert 'x/y/z' not in store store['x'] = b'zzz' eq(b'zzz', store['x']) assert 'x/y' not in store # test __delitem__ overwrite level store['r/s/t'] = b'xxx' del store['r/s'] assert 'r/s/t' not in store store['r/s'] = b'xxx' del store['r'] assert 'r/s' not in store
def test_get_set_del_contains(self): store = self.create_store() # test __contains__, __getitem__, __setitem__ assert 'foo' not in store with assert_raises(KeyError): # noinspection PyStatementEffect store['foo'] store['foo'] = b'bar' assert 'foo' in store eq(b'bar', store['foo']) # test __delitem__ (optional) try: del store['foo'] except NotImplementedError: pass else: assert 'foo' not in store with assert_raises(KeyError): # noinspection PyStatementEffect store['foo'] with assert_raises(KeyError): # noinspection PyStatementEffect del store['foo']
def test_init_group_overwrite(self): # setup store = self.create_store() store[array_meta_key] = encode_array_metadata( dict(shape=(2000,), chunks=(200,), dtype=np.dtype('u1'), compressor=None, fill_value=0, order='F', filters=None) ) # don't overwrite array (default) with assert_raises(ValueError): init_group(store) # do overwrite try: init_group(store, overwrite=True) except NotImplementedError: pass else: assert array_meta_key not in store assert group_meta_key in store meta = decode_group_metadata(store[group_meta_key]) eq(ZARR_FORMAT, meta['zarr_format']) # don't overwrite group with assert_raises(ValueError): init_group(store)
def test_update(self): store = self.create_store() assert 'foo' not in store assert 'baz' not in store store.update(foo=b'bar', baz=b'quux') eq(b'bar', store['foo']) eq(b'quux', store['baz'])
def test_comments(self): now = datetime.datetime.now() now_ts = int(time.mktime(now.timetuple())) before_ts = int(time.mktime((now - datetime.timedelta(minutes=15)).timetuple())) message = 'test message ' + str(now_ts) comment_id = dog.Comment.create(handle=TEST_USER, message=message)['comment']['id'] time.sleep(self.wait_time) event = dog.Event.get(comment_id) eq(event['event']['text'], message) dog.Comment.update(comment_id, handle=TEST_USER, message=message + ' updated') time.sleep(self.wait_time) event = dog.Event.get(comment_id) eq(event['event']['text'], message + ' updated') reply_id = dog.Comment.create(handle=TEST_USER, message=message + ' reply', related_event_id=comment_id)['comment']['id'] time.sleep(3) stream = dog.Event.query(start=before_ts, end=now_ts + 100)['events'] ok(stream is not None, msg="No events found in stream") ok(isinstance(stream, list), msg="Event stream is not a list") ok(len(stream) > 0, msg="No events found in stream") comment_ids = [x['id'] for x in stream[0]['comments']] ok(reply_id in comment_ids, msg="Should find {0} in {1}".format(reply_id, comment_ids)) # Delete the reply dog.Comment.delete(reply_id) # Then the post itself dog.Comment.delete(comment_id) time.sleep(self.wait_time) try: dog.Event.get(comment_id) except: pass else: assert False
def put_and_compare_file(size, content_func): """ Create file with `size` and content generated by `content_func`. Use CLI to PUT and GET that file. Compare afterwards """ obj = random_id() in_file = prepare_input_file(size, content_func) out_file = prepare_output_file() ret = call(["./veintidos.py", "--pool", POOL_NAME, "put", obj, in_file]) eq(0, ret) ret = call(["./veintidos.py", "--pool", POOL_NAME, "get", obj, out_file]) eq(0, ret) eq_file(in_file, out_file) os.unlink(in_file) os.unlink(out_file)
def check_children(self, expected): actual = self.image.list_children() # dedup for cache pools until # http://tracker.ceph.com/issues/8187 is fixed deduped = set([('rbd', image[1]) for image in actual]) eq(deduped, set(expected))
def test_absolute_subdir(): got = safepath.munge('/evil/here') eq(got, 'evil/here')
def test_absolute(): got = safepath.munge('/evil') eq(got, 'evil')
def test_slashslash(): got = safepath.munge('//') eq(got, '_')
def test_init_no_simple(): cfg = RawConfigParser() eq(access.haveAccess(config=cfg, user='******', mode='init', path='foo/bar'), None)
def test_fnmatch_write_yes_map(): cfg = RawConfigParser() cfg.add_section('group fooers') cfg.set('group fooers', 'members', 'jiangxin') cfg.set('group fooers', 'map writable foo/*', 'ossxp/\\1') cfg.set('group fooers', 'map read bar**', 'ossxp/\\1') eq( access.haveAccess(config=cfg, user='******', mode='write', path='foo'), None) eq( access.haveAccess(config=cfg, user='******', mode='write', path='foo-bar'), None) eq( access.haveAccess(config=cfg, user='******', mode='write', path='foo/bar'), ('repositories', 'ossxp/foo/bar', 'write')) eq(access.haveAccess(config=cfg, user='******', mode='read', path='bar'), ('repositories', 'ossxp/bar', 'read')) eq( access.haveAccess(config=cfg, user='******', mode='read', path='bar-foo'), ('repositories', 'ossxp/bar-foo', 'read')) eq( access.haveAccess(config=cfg, user='******', mode='read', path='bar/foo'), ('repositories', 'ossxp/bar/foo', 'read')) eq( access.haveAccess(config=cfg, user='******', mode='read', path='bar/foo/1'), ('repositories', 'ossxp/bar/foo/1', 'read'))
def check_object_eq(k1, k2, check_extra = True): assert k1 assert k2 log(10, 'comparing key name=', k1.name) eq(k1.name, k2.name) eq(k1.get_contents_as_string(), k2.get_contents_as_string()) eq(k1.metadata, k2.metadata) eq(k1.cache_control, k2.cache_control) eq(k1.content_type, k2.content_type) eq(k1.content_encoding, k2.content_encoding) eq(k1.content_disposition, k2.content_disposition) eq(k1.content_language, k2.content_language) eq(k1.etag, k2.etag) # eq(k1.last_modified, k2.last_modified) if check_extra: eq(k1.owner.id, k2.owner.id) eq(k1.owner.display_name, k2.owner.display_name) eq(k1.storage_class, k2.storage_class) eq(k1.size, k2.size) eq(k1.version_id, k2.version_id) eq(k1.encrypted, k2.encrypted)
def test_resize_stat(self): self.clone.resize(IMG_SIZE / 2) image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], IMG_SIZE / 2) eq(image_info['size'], IMG_SIZE) eq(self.clone.overlap(), IMG_SIZE / 2) self.clone.resize(IMG_SIZE * 2) image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], IMG_SIZE * 2) eq(image_info['size'], IMG_SIZE) eq(self.clone.overlap(), IMG_SIZE / 2)
def test_unprotect_with_children(self): global features # can't remove a snapshot that has dependent clones assert_raises(ImageBusy, self.image.remove_snap, 'snap1') # validate parent info of clone created by TestClone.setUp (pool, image, snap) = self.clone.parent_info() eq(pool, 'rbd') eq(image, IMG_NAME) eq(snap, 'snap1') # create a new pool... rados.create_pool('rbd2') other_ioctx = rados.open_ioctx('rbd2') # ...with a clone of the same parent self.rbd.clone(ioctx, IMG_NAME, 'snap1', other_ioctx, 'other_clone', features) self.other_clone = Image(other_ioctx, 'other_clone') # validate its parent info (pool, image, snap) = self.other_clone.parent_info() eq(pool, 'rbd') eq(image, IMG_NAME) eq(snap, 'snap1') # can't unprotect snap with children assert_raises(ImageBusy, self.image.unprotect_snap, 'snap1') # 2 children, check that cannot remove the parent snap assert_raises(ImageBusy, self.image.remove_snap, 'snap1') # close and remove other pool's clone self.other_clone.close() self.rbd.remove(other_ioctx, 'other_clone') # check that we cannot yet remove the parent snap assert_raises(ImageBusy, self.image.remove_snap, 'snap1') other_ioctx.close() rados.delete_pool('rbd2')
def test_read(self): parent_data = self.image.read(IMG_SIZE / 2, 256) child_data = self.clone.read(IMG_SIZE / 2, 256) eq(child_data, parent_data)
def test_remove_with_watcher(self): data = rand_data(256) self.image.write(data, 0) assert_raises(ImageBusy, remove_image) read = self.image.read(0, 256) eq(read, data)
def test_stat(self): image_info = self.image.stat() clone_info = self.clone.stat() eq(clone_info['size'], image_info['size']) eq(clone_info['size'], self.clone.overlap())
def test_write_read(self): data = rand_data(256) offset = 50 self.image.write(data, offset) read = self.image.read(offset, 256) eq(data, read)
def test_list_lockers(self): eq([], self.image.list_lockers()) self.image.lock_exclusive('test') lockers = self.image.list_lockers() eq(1, len(lockers['lockers'])) _, cookie, _ = lockers['lockers'][0] eq(cookie, 'test') eq('', lockers['tag']) assert lockers['exclusive'] self.image.unlock('test') eq([], self.image.list_lockers()) num_shared = 10 for i in xrange(num_shared): self.image.lock_shared(str(i), 'tag') lockers = self.image.list_lockers() eq('tag', lockers['tag']) assert not lockers['exclusive'] eq(num_shared, len(lockers['lockers'])) cookies = sorted(map(lambda x: x[1], lockers['lockers'])) for i in xrange(num_shared): eq(str(i), cookies[i]) self.image.unlock(str(i)) eq([], self.image.list_lockers())
def test_read(self): data = self.image.read(0, 20) eq(data, '\0' * 20)
def test_remove_snap(self): eq([], list(self.image.list_snaps())) self.image.create_snap('snap1') eq(['snap1'], map(lambda snap: snap['name'], self.image.list_snaps())) self.image.remove_snap('snap1') eq([], list(self.image.list_snaps()))
def test_rename(): rbd = RBD() rbd.rename(ioctx, IMG_NAME, IMG_NAME + '2') eq([IMG_NAME + '2'], rbd.list(ioctx)) rbd.rename(ioctx, IMG_NAME + '2', IMG_NAME) eq([IMG_NAME], rbd.list(ioctx))
def test_large_read(self): data = self.image.read(0, IMG_SIZE) eq(data, '\0' * IMG_SIZE)
def test_list_empty(): eq([], RBD().list(ioctx))
def check_stat(info, size, order): assert 'block_name_prefix' in info eq(info['size'], size) eq(info['order'], order) eq(info['num_objs'], size / (1 << order)) eq(info['obj_size'], 1 << order)
def test_write(self): self.object.write(b'barbaz') self.object.seek(0) eq(self.object.read(3), b'bar') eq(self.object.read(3), b'baz')
def test_list(): eq([IMG_NAME], RBD().list(ioctx))
def test_read(self): eq(self.object.read(3), b'bar') eq(self.object.read(100), b'')
def test_monmap_dump(self): # check for success and some plain output with epoch in it cmd = {"prefix": "mon dump"} ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) assert len(buf) > 0 assert (b'epoch' in buf) # JSON, and grab current epoch cmd['format'] = 'json' ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, 0) assert len(buf) > 0 d = json.loads(buf.decode("utf-8")) assert ('epoch' in d) epoch = d['epoch'] # assume epoch + 1000 does not exist; test for ENOENT cmd['epoch'] = epoch + 1000 ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) eq(ret, -errno.ENOENT) eq(len(buf), 0) del cmd['epoch'] # send to specific target by name target = d['mons'][0]['name'] print(target) ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, target=target) eq(ret, 0) assert len(buf) > 0 d = json.loads(buf.decode("utf-8")) assert ('epoch' in d) # and by rank target = d['mons'][0]['rank'] print(target) ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, target=target) eq(ret, 0) assert len(buf) > 0 d = json.loads(buf.decode("utf-8")) assert ('epoch' in d)
def _let_osds_back_up(self): cmd = {"prefix": "osd unset", "key": "noup"} r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') eq(r, 0)
def test_seek(self): self.object.write(b'blah') self.object.seek(0) eq(self.object.read(4), b'blah') self.object.seek(1) eq(self.object.read(3), b'lah')
def test_remove_snap(self): self.ioctx.create_snap('foo') (snap, ) = self.ioctx.list_snaps() eq(snap.name, 'foo') self.ioctx.remove_snap('foo') eq(list(self.ioctx.list_snaps()), [])
def test_aio_read(self): # this is a list so that the local cb() can modify it retval = [None] lock = threading.Condition() def cb(_, buf): with lock: retval[0] = buf lock.notify() payload = b"bar\000frob" self.ioctx.write("foo", payload) # test1: use wait_for_complete() and wait for cb by # watching retval[0] self._take_down_acting_set('test_pool', 'foo') comp = self.ioctx.aio_read("foo", len(payload), 0, cb) eq(False, comp.is_complete()) time.sleep(3) eq(False, comp.is_complete()) with lock: eq(None, retval[0]) self._let_osds_back_up() comp.wait_for_complete() loops = 0 with lock: while retval[0] is None and loops <= 10: lock.wait(timeout=5) loops += 1 assert (loops <= 10) eq(retval[0], payload) # test2: use wait_for_complete_and_cb(), verify retval[0] is # set by the time we regain control retval[0] = None self._take_down_acting_set('test_pool', 'foo') comp = self.ioctx.aio_read("foo", len(payload), 0, cb) eq(False, comp.is_complete()) time.sleep(3) eq(False, comp.is_complete()) with lock: eq(None, retval[0]) self._let_osds_back_up() comp.wait_for_complete_and_cb() assert (retval[0] is not None) eq(retval[0], payload) [i.remove() for i in self.ioctx.list_objects()]
def test_lookup_snap(self): self.ioctx.create_snap('foo') snap = self.ioctx.lookup_snap('foo') eq(snap.name, 'foo')
def test_locator(self): self.ioctx.set_locator_key("bar") self.ioctx.write('foo', b'contents1') objects = [i for i in self.ioctx.list_objects()] eq(len(objects), 1) eq(self.ioctx.get_locator_key(), "bar") self.ioctx.set_locator_key("") objects[0].seek(0) objects[0].write(b"contents2") eq(self.ioctx.get_locator_key(), "") self.ioctx.set_locator_key("bar") contents = self.ioctx.read("foo") eq(contents, b"contents2") eq(self.ioctx.get_locator_key(), "bar") objects[0].remove() objects = [i for i in self.ioctx.list_objects()] eq(objects, []) self.ioctx.set_locator_key("")