def test_save_lazy(rk): key1 = next(rk) key2 = next(rk) m1 = M1.from_dict(key1, {'f1': 'm1'}) m2 = M1.from_dict(key2, {'f1': 'm2'}) m1.save_lazy() m2.save_lazy() m1g = M1.get(key1) m2g = M1.get(key2) assert m1 == m1g assert m2 == m2g assert set(x.key for x in M1._c.lazy_save) == {m1.key, m2.key} M1.session_end() assert M1._c.lazy_save == set() clear_cache() m1g2 = M1.get(key1) m2g2 = M1.get(key2) assert m1g != m1g2 assert m2g != m2g2
def test_several_updates(simple_plan): just_fail_task = next(t for t in simple_plan.nodes() if t.name == 'just_fail') just_fail_task.status = states.ERROR.name just_fail_task.save() assert next(graph.wait_finish(simple_plan.graph['uid'], 10)) == { 'SKIPPED': 0, 'SUCCESS': 0, 'NOOP': 0, 'ERROR': 1, 'INPROGRESS': 0, 'PENDING': 1, 'ERROR_RETRY': 0, } echo_task = next(t for t in simple_plan.nodes() if t.name == 'echo_stuff') echo_task.status = states.ERROR.name echo_task.save() clear_cache() assert next(graph.wait_finish(simple_plan.graph['uid'], 10)) == { 'SKIPPED': 0, 'SUCCESS': 0, 'NOOP': 0, 'ERROR': 2, 'INPROGRESS': 0, 'PENDING': 0, 'ERROR_RETRY': 0, }
def test_conflict_resolution_called(rk): pytest.importorskip('riak') uid = next(rk) lock = Lock.from_dict(uid, {'identity': uid}) lock.save() clear_cache() # manual mock like because riak_bucket disallow delattr # which is used by mock class PseudoMock(object): def __init__(self): self.call_count = 0 def __enter__(self): def _manual_pseudo_mock(riak_object): self.call_count += 1 assert len(riak_object.siblings) == 2 return Lock.bucket._orig_resolver(riak_object) Lock.bucket._orig_resolver = Lock.bucket.resolver Lock.bucket.resolver = _manual_pseudo_mock return self def __exit__(self, *exc_info): Lock.bucket.resolver = Lock.bucket._orig_resolver del Lock.bucket._orig_resolver return False with PseudoMock() as m: lock1 = Lock.from_dict(uid, {'identity': uid}) lock1.save() assert m.call_count == 1
def test_raise_error_if_acquired(): uid = '11' Lock._acquire(uid, '12', 'a') clear_cache() with pytest.raises(RuntimeError): with Lock(uid, '13'): assert True
def test_save_lazy(rk): key1 = next(rk) key2 = next(rk) m1 = M1.from_dict(key1, {'f1': 'm1'}) m2 = M1.from_dict(key2, {'f1': 'm2'}) m1.save_lazy() m2.save_lazy() m1g = M1.get(key1) m2g = M1.get(key2) assert m1 is m1g assert m2 is m2g assert M1._c.lazy_save == {m1, m2} M1.session_end() assert M1._c.lazy_save == set() clear_cache() m1g2 = M1.get(key1) m2g2 = M1.get(key2) assert m1g is not m1g2 assert m2g is not m2g2
def session_end(self, result=True): sess = self._sql_session if result: sess.commit() else: sess.rollback() clear_cache()
def test_concurrent_sequences_with_no_handler(scale, clients): total_resources = scale * 3 timeout = scale * 2 scheduler_client = clients['scheduler'] assert len(change.staged_log()) == total_resources ModelMeta.session_end() plan = change.send_to_orchestration() scheduler_client.next({}, plan.graph['uid']) def wait_function(timeout): try: for summary in wait_finish(plan.graph['uid'], timeout): assert summary[states.ERROR.name] == 0 time.sleep(0.5) except ExecutionTimeout: pass return summary waiter = gevent.spawn(wait_function, timeout) waiter.join(timeout=timeout) res = waiter.get(block=True) assert res[states.SUCCESS.name] == total_resources assert len(data.CL()) == total_resources clear_cache() assert len(change.staged_log()) == 0
def test_concurrent_sequences_with_no_handler(scale, clients): total_resources = scale * 3 timeout = scale * 2 scheduler_client = clients['scheduler'] assert len(change.staged_log()) == total_resources ModelMeta.save_all_lazy() plan = change.send_to_orchestration() ModelMeta.save_all_lazy() scheduler_client.next({}, plan.graph['uid']) def wait_function(timeout): try: for summary in wait_finish(plan.graph['uid'], timeout): assert summary[states.ERROR.name] == 0 time.sleep(0.5) except ExecutionTimeout: pass return summary waiter = gevent.spawn(wait_function, timeout) waiter.join(timeout=timeout) res = waiter.get(block=True) assert res[states.SUCCESS.name] == total_resources assert len(data.CL()) == total_resources clear_cache() assert len(change.staged_log()) == 0
def session_start(self): clear_cache() sess = self._sql_session # TODO: (jnowak) remove this, it's a hack # because of pytest nested calls if getattr(sess, '_started', False): sess.begin() setattr(sess, '_started', True)
def session_end(self, result=True): sess = self._sql_session if result: sess.commit() else: sess.rollback() clear_cache() setattr(sess, '_started', False)
def test_lazy(rk): k = next(rk) m1 = M1.from_dict(k, {'f1': 'blah', 'f2': 150}) m1.save() clear_cache() m1 = M1(k) with pytest.raises(DBLayerNoRiakObj): assert m1.f1 == 'blah'
def test_time_sleep_called(msleep): uid = '11' Lock._acquire(uid, '12', 'a') clear_cache() sleep_time = 5 with pytest.raises(RuntimeError): with Lock(uid, '13', 1, waiter=Waiter(sleep_time)): assert True msleep.assert_called_once_with(uid, '13')
def test_time_sleep_called(msleep): uid = '11' Lock._acquire(uid, '12', 'a') clear_cache() sleep_time = 5 with pytest.raises(RuntimeError): with Lock(uid, '13', 1, sleep_time): assert True msleep.assert_called_once_with(sleep_time)
def test_acquire_release_logic(): uid = '2131' first = '1111' second = '2222' assert Lock._acquire(uid, first, 'a').who_is_locking() == first clear_cache() assert Lock._acquire(uid, second, 'a').who_is_locking() == first Lock._release(uid, first, 'a') assert Lock._acquire(uid, second, 'a').who_is_locking() == second
def test_non_unique_key(rk): peewee = pytest.importorskip('peewee') uid = next(rk) lock = Lock.from_dict(uid, {'identity': '1'}) lock.save(force_insert=True) clear_cache() lock1 = Lock.from_dict(uid, {'identity': '2'}) with pytest.raises(peewee.IntegrityError): lock1.save(force_insert=True)
def test_raise_riak_error_on_incorrect_update(rk): riak = pytest.importorskip('riak') uid = next(rk) lock = Lock.from_dict(uid, {'identity': uid}) lock.save() clear_cache() with pytest.raises(riak.RiakError): lock1 = Lock.from_dict(uid, {'identity': uid}) lock1.save()
def test_delete_cache_behaviour(rk): key1 = next(rk) m1 = M1.from_dict(key1, {'f1': 'm1'}) m1.save() clear_cache() M1.get(key1).delete() with pytest.raises(DBLayerNotFound): M1.get(key1)
def test_return_siblings_on_write(rk): pytest.importorskip('riak') uid = next(rk) lock = Lock.from_dict(uid, {'identity': uid}) lock.save() clear_cache() with pytest.raises(SiblingsError): lock1 = Lock.from_dict(uid, {'identity': uid}) lock1.save() s1, s2 = lock1._riak_object.siblings assert s1.data == s2.data
def test_update(rk): k = next(rk) m1 = M1.from_dict(k, {'f1': 'blah', 'f2': 150}) m1.save() m1.f1 = 'blub' assert m1.f1 == 'blub' m1.save() assert m1.f1 == 'blub' m11 = M1.get(k) assert m11.f1 == 'blub' clear_cache() m12 = M1.get(k) assert m12.f1 == 'blub'
def test_cache_behaviour(rk): key1 = next(rk) m1 = M1.from_dict(key1, {'f1': 'm1'}) m11 = M1.get(key1) assert m1 == m11 m1.save() assert m1 == m11 m12 = M1.get(key1) assert m1 == m12 clear_cache() m13 = M1.get(key1) assert m1 != m13
def test_revert_removal(): res = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res.inputs['a'] = '9' res.save_lazy() commited = CommitedResource.from_dict('test1', {'inputs': {'a': '9'}, 'state': 'operational'}) commited.save_lazy() resource_obj = resource.load(res.name) resource_obj.remove() ModelMeta.save_all_lazy() changes = change.stage_changes() assert len(changes) == 1 assert changes[0].diff == [['remove', '', [['a', '9']]]] operations.move_to_commited(changes[0].log_action) clear_cache() assert DBResource._c.obj_cache == {} # assert DBResource.bucket.get('test1').siblings == [] with mock.patch.object(repository.Repository, 'read_meta') as mread: mread.return_value = { 'input': {'a': {'schema': 'str!'}}, 'id': 'mocked' } with mock.patch.object(repository.Repository, 'get_path') as mpath: mpath.return_value = 'x' change.revert(changes[0].uid) ModelMeta.save_all_lazy() # assert len(DBResource.bucket.get('test1').siblings) == 1 resource_obj = resource.load('test1') assert resource_obj.args == { 'a': '9', 'location_id': '', 'transports_id': '' }
def wait_finish(uid, timeout): """Check if graph is finished Will return when no PENDING or INPROGRESS otherwise yields summary """ start_time = time.time() while start_time + timeout >= time.time(): # need to clear cache before fetching updated status clear_cache() dg = get_graph(uid) summary = Counter() summary.update({s.name: 0 for s in states}) summary.update([s['status'] for s in dg.node.values()]) yield summary if summary[states.PENDING.name] + summary[states.INPROGRESS.name] == 0: return else: raise errors.ExecutionTimeout('Run %s wasnt able to finish' % uid)
def test_update_behaviour(rk): key = next(rk) m1 = M1.from_dict(key, {'f1': 'blah', 'f2': 150}) assert m1.changed() is True m1.save() assert m1.changed() is False with pytest.raises(DBLayerException): m1.save() m1.f1 = 'updated' assert m1.changed() is True m1.save() assert m1.f1 == 'updated' clear_cache() m11 = M1.get(key) assert m11.f1 == 'updated'
def wait_finish(uid, timeout): """Check if graph is finished Will return when no PENDING or INPROGRESS otherwise yields summary """ start_time = time.time() while start_time + timeout >= time.time(): # need to clear cache before fetching updated status clear_cache() dg = get_graph(uid) summary = Counter() summary.update({s.name: 0 for s in states}) summary.update([s['status'] for s in dg.node.values()]) yield summary if summary[states.PENDING.name] + summary[states.INPROGRESS.name] == 0: return else: raise errors.ExecutionTimeout( 'Run %s wasnt able to finish' % uid)
def test_double_create(self): sample_meta_dir = self.make_resource_meta( """ id: sample handler: ansible version: 1.0.0 input: value: schema: int value: 0 """ ) self.create_resource("sample1", sample_meta_dir, {"value": 1}) with self.assertRaisesRegexp(DBLayerException, "Object already exists in cache cannot create second"): self.create_resource("sample1", sample_meta_dir, {"value": 1}) clear_cache() with self.assertRaisesRegexp(DBLayerException, "Object already exists in database cannot create second"): self.create_resource("sample1", sample_meta_dir, {"value": 1})
def test_double_create(self): sample_meta_dir = self.make_resource_meta(""" id: sample handler: ansible version: 1.0.0 input: value: schema: int value: 0 """) self.create_resource('sample1', sample_meta_dir, {'value': 1}) with self.assertRaisesRegexp( DBLayerException, "Object already exists in cache cannot create second"): self.create_resource('sample1', sample_meta_dir, {'value': 1}) clear_cache() with self.assertRaisesRegexp( DBLayerException, "Object already exists in database cannot create second"): self.create_resource('sample1', sample_meta_dir, {'value': 1})
def session_start(self): clear_cache() sess = self._sql_session sess.begin()
def test_lock_acquired_released(): uid = '11' with Lock(uid, uid, waiter=Waiter(1)): clear_cache() assert Lock._acquire(uid, '12', 'a').who_is_locking() == '11' assert Lock._acquire(uid, '12', 'a').who_is_locking() == '12'
def session_end(self, result=True): # ignore result clear_cache()
def session_start(self): clear_cache()
def test_lock_acquired_released(): uid = '11' with Lock(uid, uid): clear_cache() assert Lock._acquire(uid, '12', 'a').who_is_locking() == '11' assert Lock._acquire(uid, '12', 'a').who_is_locking() == '12'