class ProvisionerCoreTests(unittest.TestCase): """Testing the provisioner core functionality """ def setUp(self): self.notifier = FakeProvisionerNotifier() self.store = ProvisionerStore() self.ctx = FakeContextClient() self.dtrs = FakeDTRS() self.site1_driver = FakeNodeDriver() self.site2_driver = FakeNodeDriver() drivers = {'site1': self.site1_driver, 'site2': self.site2_driver} self.core = ProvisionerCore(store=self.store, notifier=self.notifier, dtrs=self.dtrs, context=self.ctx, site_drivers=drivers) @defer.inlineCallbacks def test_prepare_dtrs_error(self): self.dtrs.error = DeployableTypeLookupError() nodes = { "i1": dict(ids=[_new_id()], site="chicago", allocation="small") } request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah', ), nodes=nodes) yield self.core.prepare_provision(request) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_prepare_broker_error(self): self.ctx.create_error = BrokerError("fake ctx create failed") self.dtrs.result = { 'document': "<fake>document</fake>", "nodes": { "i1": {} } } nodes = {"i1": dict(ids=[_new_id()], site="site1", allocation="small")} request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah', ), nodes=nodes) yield self.core.prepare_provision(request) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_prepare_execute(self): yield self._prepare_execute() self.assertTrue(self.notifier.assure_state(states.PENDING)) @defer.inlineCallbacks def test_prepare_execute_iaas_fail(self): self.site1_driver.create_node_error = InvalidCredsError() yield self._prepare_execute() self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def _prepare_execute(self): self.dtrs.result = { 'document': _get_one_node_cluster_doc("node1", "image1"), "nodes": { "node1": {} } } request_node = dict(ids=[_new_id()], site="site1", allocation="small") request_nodes = {"node1": request_node} request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah', ), nodes=request_nodes) launch, nodes = yield self.core.prepare_provision(request) self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node['node_id'], request_node['ids'][0]) self.assertEqual(launch['launch_id'], request['launch_id']) self.assertTrue(self.ctx.last_create) self.assertEqual(launch['context'], self.ctx.last_create) for key in ('uri', 'secret', 'context_id', 'broker_uri'): self.assertIn(key, launch['context']) self.assertTrue(self.notifier.assure_state(states.REQUESTED)) yield self.core.execute_provision(launch, nodes) @defer.inlineCallbacks def test_execute_bad_doc(self): ctx = yield self.ctx.create() launch_record = { 'launch_id': "thelaunchid", 'document': "<this><isnt><a><real><doc>", 'deployable_type': "dt", 'context': ctx, 'subscribers': [], 'state': states.PENDING, 'node_ids': ['node1'] } nodes = [{ 'node_id': 'node1', 'launch_id': "thelaunchid", 'state': states.REQUESTED }] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) # TODO this should be a better error coming from nimboss #self.assertEqual(self.notifier.nodes['node1']['state_desc'], "CONTEXT_DOC_INVALID") @defer.inlineCallbacks def test_execute_bad_doc_nodes(self): ctx = yield self.ctx.create() launch_record = { 'launch_id': "thelaunchid", 'document': _get_one_node_cluster_doc("node1", "image1"), 'deployable_type': "dt", 'context': ctx, 'subscribers': [], 'state': states.PENDING, 'node_ids': ['node1'] } nodes = [{ 'node_id': 'node1', 'launch_id': "thelaunchid", 'state': states.REQUESTED, 'ctx_name': "adifferentname" }] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_execute_bad_doc_node_count(self): ctx = yield self.ctx.create() launch_record = { 'launch_id': "thelaunchid", 'document': _get_one_node_cluster_doc("node1", "image1"), 'deployable_type': "dt", 'context': ctx, 'subscribers': [], 'state': states.PENDING, 'node_ids': ['node1'] } # two nodes where doc expects 1 nodes = [{ 'node_id': 'node1', 'launch_id': "thelaunchid", 'state': states.REQUESTED, 'ctx_name': "node1" }, { 'node_id': 'node1', 'launch_id': "thelaunchid", 'state': states.REQUESTED, 'ctx_name': "node1" }] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_query_missing_node_within_window(self): launch_id = _new_id() node_id = _new_id() ts = time.time() - 30.0 launch = { 'launch_id': launch_id, 'node_ids': [node_id], 'state': states.PENDING, 'subscribers': 'fake-subscribers' } node = { 'launch_id': launch_id, 'node_id': node_id, 'state': states.PENDING, 'pending_timestamp': ts } yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.core.query_one_site('fake-site', [node], driver=FakeEmptyNodeQueryDriver()) self.assertEqual(len(self.notifier.nodes), 0) @defer.inlineCallbacks def test_query_missing_node_past_window(self): launch_id = _new_id() node_id = _new_id() ts = time.time() - 120.0 launch = { 'launch_id': launch_id, 'node_ids': [node_id], 'state': states.PENDING, 'subscribers': 'fake-subscribers' } node = { 'launch_id': launch_id, 'node_id': node_id, 'state': states.PENDING, 'pending_timestamp': ts } yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.core.query_one_site('fake-site', [node], driver=FakeEmptyNodeQueryDriver()) self.assertEqual(len(self.notifier.nodes), 1) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_query(self): launch_id = _new_id() node_id = _new_id() iaas_node = self.site1_driver.create_node()[0] self.site1_driver.set_node_running(iaas_node.id) ts = time.time() - 120.0 launch = { 'launch_id': launch_id, 'node_ids': [node_id], 'state': states.PENDING, 'subscribers': 'fake-subscribers' } node = { 'launch_id': launch_id, 'node_id': node_id, 'state': states.PENDING, 'pending_timestamp': ts, 'iaas_id': iaas_node.id, 'site': 'site1' } req_node = { 'launch_id': launch_id, 'node_id': _new_id(), 'state': states.REQUESTED } nodes = [node, req_node] yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.store.put_node(req_node) yield self.core.query_one_site('site1', nodes) node = yield self.store.get_node(node_id) self.assertEqual(node['public_ip'], iaas_node.public_ip) self.assertEqual(node['private_ip'], iaas_node.private_ip) self.assertEqual(node['state'], states.STARTED) # query again should detect no changes yield self.core.query_one_site('site1', nodes) # now destroy yield self.core.terminate_nodes([node_id]) node = yield self.store.get_node(node_id) yield self.core.query_one_site('site1', [node]) node = yield self.store.get_node(node_id) self.assertEqual(node['public_ip'], iaas_node.public_ip) self.assertEqual(node['private_ip'], iaas_node.private_ip) self.assertEqual(node['state'], states.TERMINATED) @defer.inlineCallbacks def test_query_ctx(self): node_count = 3 launch_id = _new_id() node_records = [ make_node(launch_id, states.STARTED) for i in range(node_count) ] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.expected_count = len(node_records) self.ctx.complete = False self.ctx.error = False #first query with no ctx nodes. zero records should be updated yield self.core.query_contexts() self.assertTrue(self.notifier.assure_record_count(0)) # all but 1 node have reported ok self.ctx.nodes = [ _one_fake_ctx_node_ok(node_records[i]['public_ip'], _new_id(), _new_id()) for i in range(node_count - 1) ] yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING)) self.assertEqual(len(self.notifier.nodes), node_count - 1) #last node reports ok self.ctx.nodes.append( _one_fake_ctx_node_ok(node_records[-1]['public_ip'], _new_id(), _new_id())) self.ctx.complete = True yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING)) self.assertTrue(self.notifier.assure_record_count(1)) @defer.inlineCallbacks def test_query_ctx_error(self): node_count = 3 launch_id = _new_id() node_records = [ make_node(launch_id, states.STARTED) for i in range(node_count) ] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.expected_count = len(node_records) self.ctx.complete = False self.ctx.error = False # all but 1 node have reported ok self.ctx.nodes = [ _one_fake_ctx_node_ok(node_records[i]['public_ip'], _new_id(), _new_id()) for i in range(node_count - 1) ] self.ctx.nodes.append( _one_fake_ctx_node_error(node_records[-1]['public_ip'], _new_id(), _new_id())) ok_ids = [node_records[i]['node_id'] for i in range(node_count - 1)] error_ids = [node_records[-1]['node_id']] self.ctx.complete = True self.ctx.error = True yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING, ok_ids)) self.assertTrue( self.notifier.assure_state(states.RUNNING_FAILED, error_ids)) @defer.inlineCallbacks def test_query_ctx_nodes_not_started(self): launch_id = _new_id() node_records = [make_node(launch_id, states.PENDING) for i in range(3)] node_records.append(make_node(launch_id, states.STARTED)) launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) yield self.core.query_contexts() # ensure that no context was actually queried. See the note in # _query_one_context for the reason why this is important. self.assertEqual(len(self.ctx.queried_uris), 0) @defer.inlineCallbacks def test_query_ctx_permanent_broker_error(self): node_count = 3 launch_id = _new_id() node_records = [ make_node(launch_id, states.STARTED) for i in range(node_count) ] node_ids = [node['node_id'] for node in node_records] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.query_error = ContextNotFoundError() yield self.core.query_contexts() self.assertTrue( self.notifier.assure_state(states.RUNNING_FAILED, node_ids)) launch = yield self.store.get_launch(launch_id) self.assertEqual(launch['state'], states.FAILED) def test_update_node_ip_info(self): node = dict(public_ip=None) iaas_node = Mock(public_ip=None, private_ip=None) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], None) self.assertEqual(node['private_ip'], None) iaas_node = Mock(public_ip=["pub1"], private_ip=["priv1"]) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], "pub1") self.assertEqual(node['private_ip'], "priv1") iaas_node = Mock(public_ip=[], private_ip=[]) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], "pub1") self.assertEqual(node['private_ip'], "priv1") def test_update_nodes_from_ctx(self): launch_id = _new_id() nodes = [make_node(launch_id, states.STARTED) for i in range(5)] ctx_nodes = [ _one_fake_ctx_node_ok(node['public_ip'], _new_id(), _new_id()) for node in nodes ] self.assertEquals(len(nodes), len(update_nodes_from_context(nodes, ctx_nodes))) def test_update_nodes_from_ctx_with_hostname(self): launch_id = _new_id() nodes = [make_node(launch_id, states.STARTED) for i in range(5)] #libcloud puts the hostname in the public_ip field ctx_nodes = [ _one_fake_ctx_node_ok(ip=_new_id(), hostname=node['public_ip'], pubkey=_new_id()) for node in nodes ] self.assertEquals(len(nodes), len(update_nodes_from_context(nodes, ctx_nodes))) @defer.inlineCallbacks def test_query_broker_exception(self): for i in range(2): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) # no guaranteed order here so grabbing first launch from store # and making that one return a BrokerError during context query. # THe goal is to ensure that one error doesn't prevent querying # for other contexts. launches = yield self.store.get_launches(state=states.PENDING) error_launch = launches[0] error_launch_ctx = error_launch['context']['uri'] ok_node_id = launches[1]['node_ids'][0] ok_node = yield self.store.get_node(ok_node_id) self.ctx.uri_query_error[error_launch_ctx] = BrokerError("bad broker") self.ctx.nodes = [ _one_fake_ctx_node_ok(ok_node['public_ip'], _new_id(), _new_id()) ] self.ctx.complete = True yield self.core.query_contexts() launches = yield self.store.get_launches() for launch in launches: self.assertIn(launch['context']['uri'], self.ctx.queried_uris) if launch['launch_id'] == error_launch['launch_id']: self.assertEqual(launch['state'], states.PENDING) expected_node_state = states.STARTED else: self.assertEqual(launch['state'], states.RUNNING) expected_node_state = states.RUNNING node = yield self.store.get_node(launch['node_ids'][0]) self.assertEqual(node['state'], expected_node_state) @defer.inlineCallbacks def test_query_ctx_without_valid_nodes(self): # if there are no nodes < TERMINATING, no broker query should happen for i in range(3): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) launches = yield self.store.get_launches(state=states.PENDING) error_launch = launches[0] # mark first launch's node as TERMINATING, should prevent # context query and result in launch being marked FAILED error_launch_node = yield self.store.get_node( error_launch['node_ids'][0]) error_launch_node['state'] = states.TERMINATING yield self.store.put_node(error_launch_node) yield self.core.query_contexts() self.assertNotIn(error_launch['context']['uri'], self.ctx.queried_uris) launches = yield self.store.get_launches() for launch in launches: if launch['launch_id'] == error_launch['launch_id']: self.assertEqual(launch['state'], states.FAILED) expected_node_state = states.TERMINATING else: self.assertEqual(launch['state'], states.PENDING) expected_node_state = states.STARTED node = yield self.store.get_node(launch['node_ids'][0]) self.assertEqual(node['state'], expected_node_state) @defer.inlineCallbacks def test_query_unexpected_exception(self): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.query_error = ValueError("bad programmer") # digging into internals a bit: patching one of the methods query() # calls to raise an exception. This will let us ensure exceptions do # not bubble up def raiser(self): raise KeyError("notreallyaproblem") self.patch(self.core, 'query_nodes', raiser) yield self.core.query() # ensure that exception doesn't bubble up @defer.inlineCallbacks def test_dump_state(self): node_ids = [] node_records = [] for i in range(3): launch_id = _new_id() nodes = [make_node(launch_id, states.PENDING)] node_ids.append(nodes[0]['node_id']) node_records.extend(nodes) launch = make_launch(launch_id, states.PENDING, nodes) yield self.store.put_launch(launch) yield self.store.put_nodes(nodes) yield self.core.dump_state(node_ids[:2]) # should have gotten notifications about the 2 nodes self.assertEqual(self.notifier.nodes_rec_count[node_ids[0]], 1) self.assertEqual(node_records[0], self.notifier.nodes[node_ids[0]]) self.assertEqual(node_records[1], self.notifier.nodes[node_ids[1]]) self.assertEqual(self.notifier.nodes_rec_count[node_ids[1]], 1) self.assertNotIn(node_ids[2], self.notifier.nodes) @defer.inlineCallbacks def test_mark_nodes_terminating(self): launch_id = _new_id() node_records = [make_node(launch_id, states.RUNNING) for i in range(3)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) first_two_node_ids = [ node_records[0]['node_id'], node_records[1]['node_id'] ] yield self.core.mark_nodes_terminating(first_two_node_ids) self.assertTrue( self.notifier.assure_state(states.TERMINATING, nodes=first_two_node_ids)) self.assertNotIn(node_records[2]['node_id'], self.notifier.nodes) for node_id in first_two_node_ids: terminating_node = yield self.store.get_node(node_id) self.assertEqual(terminating_node['state'], states.TERMINATING)
class BaseProvisionerStoreTests(unittest.TestCase): def setUp(self): self.store = ProvisionerStore() def tearDown(self): self.store = None @defer.inlineCallbacks def test_put_get_launches(self): launch_id_1 = new_id() l1 = {'launch_id' : launch_id_1, 'state' : states.REQUESTED} yield self.store.put_launch(l1) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) l2 = l1.copy() l2['state'] = states.PENDING yield self.store.put_launch(l2) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.PENDING, latest['state']) # store another launch altogether launch_id_2 = new_id() l3 = {'launch_id' : launch_id_2, 'state' : states.REQUESTED} yield self.store.put_launch(l3) latest = yield self.store.get_launch(launch_id_2) self.assertEqual(launch_id_2, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) # put the first launch record again, should not overwrite l2 # because state is lower yield self.store.put_launch(l2) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.PENDING, latest['state']) latest = yield self.store.get_launch(launch_id_2) self.assertEqual(launch_id_2, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) # add a third launch with request, pending, and running records launch_id_3 = new_id() l4 = {'launch_id' : launch_id_3, 'state' : states.REQUESTED} yield self.store.put_launch(l4) l5 = {'launch_id' : launch_id_3, 'state' : states.PENDING} yield self.store.put_launch(l5) l6 = {'launch_id' : launch_id_3, 'state' : states.RUNNING} yield self.store.put_launch(l6) all = yield self.store.get_launches() self.assertEqual(3, len(all)) for l in all: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2, launch_id_3)) # try some range queries requested = yield self.store.get_launches(state=states.REQUESTED) self.assertEqual(1, len(requested)) self.assertEqual(launch_id_2, requested[0]['launch_id']) requested = yield self.store.get_launches( min_state=states.REQUESTED, max_state=states.REQUESTED) self.assertEqual(1, len(requested)) self.assertEqual(launch_id_2, requested[0]['launch_id']) at_least_requested = yield self.store.get_launches( min_state=states.REQUESTED) self.assertEqual(3, len(at_least_requested)) for l in at_least_requested: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2, launch_id_3)) at_least_pending = yield self.store.get_launches( min_state=states.PENDING) self.assertEqual(2, len(at_least_pending)) for l in at_least_pending: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_3)) at_most_pending = yield self.store.get_launches( max_state=states.PENDING) self.assertEqual(2, len(at_most_pending)) for l in at_most_pending: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2)) @defer.inlineCallbacks def put_node(self, node_id, *states): for state in states: record = {'node_id' : node_id, 'state':state} yield self.store.put_node(record) @defer.inlineCallbacks def put_many_nodes(self, count, *states): node_ids = set(str(uuid.uuid4()) for i in range(count)) for node_id in node_ids: yield self.put_node(node_id, *states) defer.returnValue(node_ids) def assertNodesInSet(self, nodes, *sets): node_ids = set(node["node_id"] for node in nodes) self.assertEqual(len(nodes), len(node_ids)) for node_id in node_ids: found = False for aset in sets: if node_id in aset: found = True break if not found: self.fail("node %s not in any set" % node_id)
class ProvisionerCoreTests(unittest.TestCase): """Testing the provisioner core functionality """ def setUp(self): self.notifier = FakeProvisionerNotifier() self.store = ProvisionerStore() self.ctx = FakeContextClient() self.dtrs = FakeDTRS() self.site1_driver = FakeNodeDriver() self.site2_driver = FakeNodeDriver() drivers = {'site1' : self.site1_driver, 'site2' : self.site2_driver} self.core = ProvisionerCore(store=self.store, notifier=self.notifier, dtrs=self.dtrs, context=self.ctx, site_drivers=drivers) @defer.inlineCallbacks def test_prepare_dtrs_error(self): self.dtrs.error = DeployableTypeLookupError() nodes = {"i1" : dict(ids=[_new_id()], site="chicago", allocation="small")} request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah',), nodes=nodes) yield self.core.prepare_provision(request) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_prepare_broker_error(self): self.ctx.create_error = BrokerError("fake ctx create failed") self.dtrs.result = {'document' : "<fake>document</fake>", "nodes" : {"i1" : {}}} nodes = {"i1" : dict(ids=[_new_id()], site="site1", allocation="small")} request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah',), nodes=nodes) yield self.core.prepare_provision(request) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_prepare_execute(self): yield self._prepare_execute() self.assertTrue(self.notifier.assure_state(states.PENDING)) @defer.inlineCallbacks def test_prepare_execute_iaas_fail(self): self.site1_driver.create_node_error = InvalidCredsError() yield self._prepare_execute() self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def _prepare_execute(self): self.dtrs.result = {'document' : _get_one_node_cluster_doc("node1", "image1"), "nodes" : {"node1" : {}}} request_node = dict(ids=[_new_id()], site="site1", allocation="small") request_nodes = {"node1" : request_node} request = dict(launch_id=_new_id(), deployable_type="foo", subscribers=('blah',), nodes=request_nodes) launch, nodes = yield self.core.prepare_provision(request) self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node['node_id'], request_node['ids'][0]) self.assertEqual(launch['launch_id'], request['launch_id']) self.assertTrue(self.ctx.last_create) self.assertEqual(launch['context'], self.ctx.last_create) for key in ('uri', 'secret', 'context_id', 'broker_uri'): self.assertIn(key, launch['context']) self.assertTrue(self.notifier.assure_state(states.REQUESTED)) yield self.core.execute_provision(launch, nodes) @defer.inlineCallbacks def test_execute_bad_doc(self): ctx = yield self.ctx.create() launch_record = { 'launch_id' : "thelaunchid", 'document' : "<this><isnt><a><real><doc>", 'deployable_type' : "dt", 'context' : ctx, 'subscribers' : [], 'state' : states.PENDING, 'node_ids' : ['node1']} nodes = [{'node_id' : 'node1', 'launch_id' : "thelaunchid", 'state' : states.REQUESTED}] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) # TODO this should be a better error coming from nimboss #self.assertEqual(self.notifier.nodes['node1']['state_desc'], "CONTEXT_DOC_INVALID") @defer.inlineCallbacks def test_execute_bad_doc_nodes(self): ctx = yield self.ctx.create() launch_record = { 'launch_id' : "thelaunchid", 'document' : _get_one_node_cluster_doc("node1", "image1"), 'deployable_type' : "dt", 'context' : ctx, 'subscribers' : [], 'state' : states.PENDING, 'node_ids' : ['node1']} nodes = [{'node_id' : 'node1', 'launch_id' : "thelaunchid", 'state' : states.REQUESTED, 'ctx_name' : "adifferentname"}] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_execute_bad_doc_node_count(self): ctx = yield self.ctx.create() launch_record = { 'launch_id' : "thelaunchid", 'document' : _get_one_node_cluster_doc("node1", "image1"), 'deployable_type' : "dt", 'context' : ctx, 'subscribers' : [], 'state' : states.PENDING, 'node_ids' : ['node1']} # two nodes where doc expects 1 nodes = [{'node_id' : 'node1', 'launch_id' : "thelaunchid", 'state' : states.REQUESTED, 'ctx_name' : "node1"}, {'node_id' : 'node1', 'launch_id' : "thelaunchid", 'state' : states.REQUESTED, 'ctx_name' : "node1"}] yield self.core.execute_provision(launch_record, nodes) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_query_missing_node_within_window(self): launch_id = _new_id() node_id = _new_id() ts = time.time() - 30.0 launch = {'launch_id' : launch_id, 'node_ids' : [node_id], 'state' : states.PENDING, 'subscribers' : 'fake-subscribers'} node = {'launch_id' : launch_id, 'node_id' : node_id, 'state' : states.PENDING, 'pending_timestamp' : ts} yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.core.query_one_site('fake-site', [node], driver=FakeEmptyNodeQueryDriver()) self.assertEqual(len(self.notifier.nodes), 0) @defer.inlineCallbacks def test_query_missing_node_past_window(self): launch_id = _new_id() node_id = _new_id() ts = time.time() - 120.0 launch = { 'launch_id' : launch_id, 'node_ids' : [node_id], 'state' : states.PENDING, 'subscribers' : 'fake-subscribers'} node = {'launch_id' : launch_id, 'node_id' : node_id, 'state' : states.PENDING, 'pending_timestamp' : ts} yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.core.query_one_site('fake-site', [node], driver=FakeEmptyNodeQueryDriver()) self.assertEqual(len(self.notifier.nodes), 1) self.assertTrue(self.notifier.assure_state(states.FAILED)) @defer.inlineCallbacks def test_query(self): launch_id = _new_id() node_id = _new_id() iaas_node = self.site1_driver.create_node()[0] self.site1_driver.set_node_running(iaas_node.id) ts = time.time() - 120.0 launch = { 'launch_id' : launch_id, 'node_ids' : [node_id], 'state' : states.PENDING, 'subscribers' : 'fake-subscribers'} node = {'launch_id' : launch_id, 'node_id' : node_id, 'state' : states.PENDING, 'pending_timestamp' : ts, 'iaas_id' : iaas_node.id, 'site':'site1'} req_node = {'launch_id' : launch_id, 'node_id' : _new_id(), 'state' : states.REQUESTED} nodes = [node, req_node] yield self.store.put_launch(launch) yield self.store.put_node(node) yield self.store.put_node(req_node) yield self.core.query_one_site('site1', nodes) node = yield self.store.get_node(node_id) self.assertEqual(node['public_ip'], iaas_node.public_ip) self.assertEqual(node['private_ip'], iaas_node.private_ip) self.assertEqual(node['state'], states.STARTED) # query again should detect no changes yield self.core.query_one_site('site1', nodes) # now destroy yield self.core.terminate_nodes([node_id]) node = yield self.store.get_node(node_id) yield self.core.query_one_site('site1', [node]) node = yield self.store.get_node(node_id) self.assertEqual(node['public_ip'], iaas_node.public_ip) self.assertEqual(node['private_ip'], iaas_node.private_ip) self.assertEqual(node['state'], states.TERMINATED) @defer.inlineCallbacks def test_query_ctx(self): node_count = 3 launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED) for i in range(node_count)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.expected_count = len(node_records) self.ctx.complete = False self.ctx.error = False #first query with no ctx nodes. zero records should be updated yield self.core.query_contexts() self.assertTrue(self.notifier.assure_record_count(0)) # all but 1 node have reported ok self.ctx.nodes = [_one_fake_ctx_node_ok(node_records[i]['public_ip'], _new_id(), _new_id()) for i in range(node_count-1)] yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING)) self.assertEqual(len(self.notifier.nodes), node_count-1) #last node reports ok self.ctx.nodes.append(_one_fake_ctx_node_ok(node_records[-1]['public_ip'], _new_id(), _new_id())) self.ctx.complete = True yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING)) self.assertTrue(self.notifier.assure_record_count(1)) @defer.inlineCallbacks def test_query_ctx_error(self): node_count = 3 launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED) for i in range(node_count)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.expected_count = len(node_records) self.ctx.complete = False self.ctx.error = False # all but 1 node have reported ok self.ctx.nodes = [_one_fake_ctx_node_ok(node_records[i]['public_ip'], _new_id(), _new_id()) for i in range(node_count-1)] self.ctx.nodes.append(_one_fake_ctx_node_error(node_records[-1]['public_ip'], _new_id(), _new_id())) ok_ids = [node_records[i]['node_id'] for i in range(node_count-1)] error_ids = [node_records[-1]['node_id']] self.ctx.complete = True self.ctx.error = True yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING, ok_ids)) self.assertTrue(self.notifier.assure_state(states.RUNNING_FAILED, error_ids)) @defer.inlineCallbacks def test_query_ctx_nodes_not_started(self): launch_id = _new_id() node_records = [make_node(launch_id, states.PENDING) for i in range(3)] node_records.append(make_node(launch_id, states.STARTED)) launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) yield self.core.query_contexts() # ensure that no context was actually queried. See the note in # _query_one_context for the reason why this is important. self.assertEqual(len(self.ctx.queried_uris), 0) @defer.inlineCallbacks def test_query_ctx_permanent_broker_error(self): node_count = 3 launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED) for i in range(node_count)] node_ids = [node['node_id'] for node in node_records] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.query_error = ContextNotFoundError() yield self.core.query_contexts() self.assertTrue(self.notifier.assure_state(states.RUNNING_FAILED, node_ids)) launch = yield self.store.get_launch(launch_id) self.assertEqual(launch['state'], states.FAILED) def test_update_node_ip_info(self): node = dict(public_ip=None) iaas_node = Mock(public_ip=None, private_ip=None) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], None) self.assertEqual(node['private_ip'], None) iaas_node = Mock(public_ip=["pub1"], private_ip=["priv1"]) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], "pub1") self.assertEqual(node['private_ip'], "priv1") iaas_node = Mock(public_ip=[], private_ip=[]) update_node_ip_info(node, iaas_node) self.assertEqual(node['public_ip'], "pub1") self.assertEqual(node['private_ip'], "priv1") def test_update_nodes_from_ctx(self): launch_id = _new_id() nodes = [make_node(launch_id, states.STARTED) for i in range(5)] ctx_nodes = [_one_fake_ctx_node_ok(node['public_ip'], _new_id(), _new_id()) for node in nodes] self.assertEquals(len(nodes), len(update_nodes_from_context(nodes, ctx_nodes))) def test_update_nodes_from_ctx_with_hostname(self): launch_id = _new_id() nodes = [make_node(launch_id, states.STARTED) for i in range(5)] #libcloud puts the hostname in the public_ip field ctx_nodes = [_one_fake_ctx_node_ok(ip=_new_id(), hostname=node['public_ip'], pubkey=_new_id()) for node in nodes] self.assertEquals(len(nodes), len(update_nodes_from_context(nodes, ctx_nodes))) @defer.inlineCallbacks def test_query_broker_exception(self): for i in range(2): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) # no guaranteed order here so grabbing first launch from store # and making that one return a BrokerError during context query. # THe goal is to ensure that one error doesn't prevent querying # for other contexts. launches = yield self.store.get_launches(state=states.PENDING) error_launch = launches[0] error_launch_ctx = error_launch['context']['uri'] ok_node_id = launches[1]['node_ids'][0] ok_node = yield self.store.get_node(ok_node_id) self.ctx.uri_query_error[error_launch_ctx] = BrokerError("bad broker") self.ctx.nodes = [_one_fake_ctx_node_ok(ok_node['public_ip'], _new_id(), _new_id())] self.ctx.complete = True yield self.core.query_contexts() launches = yield self.store.get_launches() for launch in launches: self.assertIn(launch['context']['uri'], self.ctx.queried_uris) if launch['launch_id'] == error_launch['launch_id']: self.assertEqual(launch['state'], states.PENDING) expected_node_state = states.STARTED else: self.assertEqual(launch['state'], states.RUNNING) expected_node_state = states.RUNNING node = yield self.store.get_node(launch['node_ids'][0]) self.assertEqual(node['state'], expected_node_state) @defer.inlineCallbacks def test_query_ctx_without_valid_nodes(self): # if there are no nodes < TERMINATING, no broker query should happen for i in range(3): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) launches = yield self.store.get_launches(state=states.PENDING) error_launch = launches[0] # mark first launch's node as TERMINATING, should prevent # context query and result in launch being marked FAILED error_launch_node = yield self.store.get_node(error_launch['node_ids'][0]) error_launch_node['state'] = states.TERMINATING yield self.store.put_node(error_launch_node) yield self.core.query_contexts() self.assertNotIn(error_launch['context']['uri'], self.ctx.queried_uris) launches = yield self.store.get_launches() for launch in launches: if launch['launch_id'] == error_launch['launch_id']: self.assertEqual(launch['state'], states.FAILED) expected_node_state = states.TERMINATING else: self.assertEqual(launch['state'], states.PENDING) expected_node_state = states.STARTED node = yield self.store.get_node(launch['node_ids'][0]) self.assertEqual(node['state'], expected_node_state) @defer.inlineCallbacks def test_query_unexpected_exception(self): launch_id = _new_id() node_records = [make_node(launch_id, states.STARTED)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) self.ctx.query_error = ValueError("bad programmer") # digging into internals a bit: patching one of the methods query() # calls to raise an exception. This will let us ensure exceptions do # not bubble up def raiser(self): raise KeyError("notreallyaproblem") self.patch(self.core, 'query_nodes', raiser) yield self.core.query() # ensure that exception doesn't bubble up @defer.inlineCallbacks def test_dump_state(self): node_ids = [] node_records = [] for i in range(3): launch_id = _new_id() nodes = [make_node(launch_id, states.PENDING)] node_ids.append(nodes[0]['node_id']) node_records.extend(nodes) launch = make_launch(launch_id, states.PENDING, nodes) yield self.store.put_launch(launch) yield self.store.put_nodes(nodes) yield self.core.dump_state(node_ids[:2]) # should have gotten notifications about the 2 nodes self.assertEqual(self.notifier.nodes_rec_count[node_ids[0]], 1) self.assertEqual(node_records[0], self.notifier.nodes[node_ids[0]]) self.assertEqual(node_records[1], self.notifier.nodes[node_ids[1]]) self.assertEqual(self.notifier.nodes_rec_count[node_ids[1]], 1) self.assertNotIn(node_ids[2], self.notifier.nodes) @defer.inlineCallbacks def test_mark_nodes_terminating(self): launch_id = _new_id() node_records = [make_node(launch_id, states.RUNNING) for i in range(3)] launch_record = make_launch(launch_id, states.PENDING, node_records) yield self.store.put_launch(launch_record) yield self.store.put_nodes(node_records) first_two_node_ids = [node_records[0]['node_id'], node_records[1]['node_id']] yield self.core.mark_nodes_terminating(first_two_node_ids) self.assertTrue(self.notifier.assure_state(states.TERMINATING, nodes=first_two_node_ids)) self.assertNotIn(node_records[2]['node_id'], self.notifier.nodes) for node_id in first_two_node_ids: terminating_node = yield self.store.get_node(node_id) self.assertEqual(terminating_node['state'], states.TERMINATING)
class BaseProvisionerStoreTests(unittest.TestCase): def setUp(self): self.store = ProvisionerStore() def tearDown(self): self.store = None @defer.inlineCallbacks def test_put_get_launches(self): launch_id_1 = new_id() l1 = {'launch_id': launch_id_1, 'state': states.REQUESTED} yield self.store.put_launch(l1) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) l2 = l1.copy() l2['state'] = states.PENDING yield self.store.put_launch(l2) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.PENDING, latest['state']) # store another launch altogether launch_id_2 = new_id() l3 = {'launch_id': launch_id_2, 'state': states.REQUESTED} yield self.store.put_launch(l3) latest = yield self.store.get_launch(launch_id_2) self.assertEqual(launch_id_2, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) # put the first launch record again, should not overwrite l2 # because state is lower yield self.store.put_launch(l2) latest = yield self.store.get_launch(launch_id_1) self.assertEqual(launch_id_1, latest['launch_id']) self.assertEqual(states.PENDING, latest['state']) latest = yield self.store.get_launch(launch_id_2) self.assertEqual(launch_id_2, latest['launch_id']) self.assertEqual(states.REQUESTED, latest['state']) # add a third launch with request, pending, and running records launch_id_3 = new_id() l4 = {'launch_id': launch_id_3, 'state': states.REQUESTED} yield self.store.put_launch(l4) l5 = {'launch_id': launch_id_3, 'state': states.PENDING} yield self.store.put_launch(l5) l6 = {'launch_id': launch_id_3, 'state': states.RUNNING} yield self.store.put_launch(l6) all = yield self.store.get_launches() self.assertEqual(3, len(all)) for l in all: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2, launch_id_3)) # try some range queries requested = yield self.store.get_launches(state=states.REQUESTED) self.assertEqual(1, len(requested)) self.assertEqual(launch_id_2, requested[0]['launch_id']) requested = yield self.store.get_launches(min_state=states.REQUESTED, max_state=states.REQUESTED) self.assertEqual(1, len(requested)) self.assertEqual(launch_id_2, requested[0]['launch_id']) at_least_requested = yield self.store.get_launches( min_state=states.REQUESTED) self.assertEqual(3, len(at_least_requested)) for l in at_least_requested: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2, launch_id_3)) at_least_pending = yield self.store.get_launches( min_state=states.PENDING) self.assertEqual(2, len(at_least_pending)) for l in at_least_pending: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_3)) at_most_pending = yield self.store.get_launches( max_state=states.PENDING) self.assertEqual(2, len(at_most_pending)) for l in at_most_pending: self.assertTrue(l['launch_id'] in (launch_id_1, launch_id_2)) @defer.inlineCallbacks def put_node(self, node_id, *states): for state in states: record = {'node_id': node_id, 'state': state} yield self.store.put_node(record) @defer.inlineCallbacks def put_many_nodes(self, count, *states): node_ids = set(str(uuid.uuid4()) for i in range(count)) for node_id in node_ids: yield self.put_node(node_id, *states) defer.returnValue(node_ids) def assertNodesInSet(self, nodes, *sets): node_ids = set(node["node_id"] for node in nodes) self.assertEqual(len(nodes), len(node_ids)) for node_id in node_ids: found = False for aset in sets: if node_id in aset: found = True break if not found: self.fail("node %s not in any set" % node_id)