def _check_retries(self, get_pairs_data): errors = { "errors": [ server_not_member(self.lbs[0].upper(), self.nodes[0]), "Cloud Server {} does not exist".format(self.nodes[1]), "Load Balancer Pool {} does not exist".format( self.lbs[2].upper()) ] } lbr1 = "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" noder1 = "a95ae0c4-6ab8-4873-b82f-f8433840cff2" lbr2 = "e6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" noder2 = "e95ae0c4-6ab8-4873-b82f-f8433840cff2" pairs, data = get_pairs_data(lbr1, noder1, lbr2, noder2) retried_data = r._sorted_data([(lbr1, noder1), (lbr2, noder2)]) success_resp = {"good": "response"} seq = [ (self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", data)), noop), (self.svc_req_intent(retried_data), const(stub_json_response(success_resp, 204))), (log_intent( "request-rcv3-bulk", success_resp, req_body=("jsonified", retried_data)), noop) ] self.assertEqual( perform_sequence(seq, r.bulk_delete(pairs)), success_resp)
def _check_retries(self, get_pairs_data): errors = { "errors": [ server_not_member(self.lbs[0].upper(), self.nodes[0]), "Cloud Server {} does not exist".format(self.nodes[1]), "Load Balancer Pool {} does not exist".format( self.lbs[2].upper()) ] } lbr1 = "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" noder1 = "a95ae0c4-6ab8-4873-b82f-f8433840cff2" lbr2 = "e6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" noder2 = "e95ae0c4-6ab8-4873-b82f-f8433840cff2" pairs, data = get_pairs_data(lbr1, noder1, lbr2, noder2) retried_data = r._sorted_data([(lbr1, noder1), (lbr2, noder2)]) success_resp = {"good": "response"} seq = [(self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", data)), noop), (self.svc_req_intent(retried_data), const(stub_json_response(success_resp, 204))), (log_intent("request-rcv3-bulk", success_resp, req_body=("jsonified", retried_data)), noop)] self.assertEqual(perform_sequence(seq, r.bulk_delete(pairs)), success_resp)
def _check_retries(self, pairs, data, retried_data, errors): resp = {"response": "yo"} seq = [(self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", data)), noop), (self.svc_req_intent(retried_data), const(stub_json_response(resp, 201))), (log_intent("request-rcv3-bulk", resp, req_body=("jsonified", retried_data)), noop)] self.assertEqual(perform_sequence(seq, r.bulk_add(pairs)), resp)
def test_all_retries(self): """ If bulk_delete returns "server not a member", lb or server deleted for all attempted pairs then there is no retry and returns None """ errors = { "errors": [ server_not_member(self.lbs[0].upper(), self.nodes[0]), "Cloud Server {} does not exist".format(self.nodes[1]), "Load Balancer Pool {} does not exist".format( self.lbs[2].upper()) ] } pairs = pset([ (self.lbs[0], self.nodes[1]), # test same server pairs (self.lbs[2], self.nodes[0]) # test same lb pairs ]) pairs = self.pairs | pairs data = r._sorted_data(pairs) seq = [ (self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", data)), noop) ] self.assertIsNone(perform_sequence(seq, r.bulk_delete(pairs)))
def test_multiple_errors(self): """ If bulk add returns 409 then multiple errors returned are collected and raised as a single `BulkErrors` """ errors = { "errors": [ lb_inactive(self.lbs[0]), "Load Balancer Pool {} does not exist".format(self.lbs[1]), "Cloud Server {} is unprocessable".format(self.nodes[2]) ] } seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_add(self.pairs)) self.assertEqual( ec.exception.errors, pset([r.LBInactive(self.lbs[0]), r.NoSuchLBError(self.lbs[1]), r.ServerUnprocessableError(self.nodes[2])]) )
def test_all_retries(self): """ If bulk_delete returns "server not a member", lb or server deleted for all attempted pairs then there is no retry and returns None """ errors = { "errors": [ server_not_member(self.lbs[0].upper(), self.nodes[0]), "Cloud Server {} does not exist".format(self.nodes[1]), "Load Balancer Pool {} does not exist".format( self.lbs[2].upper()) ] } pairs = pset([ (self.lbs[0], self.nodes[1]), # test same server pairs (self.lbs[2], self.nodes[0]) # test same lb pairs ]) pairs = self.pairs | pairs data = r._sorted_data(pairs) seq = [(self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", data)), noop)] self.assertIsNone(perform_sequence(seq, r.bulk_delete(pairs)))
def test_multiple_errors(self): """ If bulk add returns 409 then multiple errors returned are collected and raised as a single `BulkErrors` """ errors = { "errors": [ lb_inactive(self.lbs[0]), "Load Balancer Pool {} does not exist".format(self.lbs[1]), "Cloud Server {} is unprocessable".format(self.nodes[2]) ] } seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_add(self.pairs)) self.assertEqual( ec.exception.errors, pset([ r.LBInactive(self.lbs[0]), r.NoSuchLBError(self.lbs[1]), r.ServerUnprocessableError(self.nodes[2]) ]))
def _check_retries(self, pairs, data, retried_data, errors): resp = {"response": "yo"} seq = [ (self.svc_req_intent(data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", data)), noop), (self.svc_req_intent(retried_data), const(stub_json_response(resp, 201))), (log_intent( "request-rcv3-bulk", resp, req_body=("jsonified", retried_data)), noop) ] self.assertEqual(perform_sequence(seq, r.bulk_add(pairs)), resp)
def test_get_clbs(self): """Returns all the load balancer details from the LBs endpoint.""" expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!')
def test_get_clb_nodes(self): """:func:`get_clb_nodes` returns all the nodes for a LB.""" req = get_clb_nodes(self.lb_id) expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!')
def test_add_clb_nodes(self): """ Produce a request for adding nodes to a load balancer, which returns a successful result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. """ nodes = [{"address": "1.1.1.1", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.2", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.5", "port": 81, "condition": "ENABLED"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = ("Duplicate nodes detected. One or more nodes already " "configured on load balancer.") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = "Nodes must not exceed 25 per load balancer." limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def test_get_clbs(self): """Returns all the load balancer details from the LBs endpoint.""" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!')
def test_get_clb_nodes(self): """:func:`get_clb_nodes` returns all the nodes for a LB.""" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!')
def test_single_page(self, rel): """ Collects entries and goes to next link if there are entries and returns if next one is empty """ feed1str = self.feed(rel, "https://url?page=2", ["summary1", "summ2"]) feed2str = self.feed(rel, "link", []) seq = [ (self.svc_intent({"a": "b"}), const(stub_json_response(feed1str))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feed2str))) ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {"a": "b"}, self.directions[rel])) self.assertEqual( [atom.summary(entry) for entry in entries], ["summary1", "summ2"]) self.assertEqual(params, {"page": ["2"]})
def test_success(self): """ bulk add resulting in 201 returns Effect of None """ resp = {"resp": "yo"} seq = [(self.svc_req_intent(self.data), const(stub_json_response(resp, 201))), (log_intent("request-rcv3-bulk", resp, req_body=("jsonified", self.data)), noop)] self.assertEqual(perform_sequence(seq, r.bulk_add(self.pairs)), resp)
def test_empty(self, rel): """ Does not go further when there are no entries and return [] """ feedstr = self.feed(rel, "link-doesnt-matter", []) seq = [ (self.svc_intent(), const(stub_json_response(feedstr))) ] entries_eff = cf.read_entries( self.service_type, self.url, {}, self.directions[rel]) self.assertEqual(perform_sequence(seq, entries_eff), ([], {}))
def test_success(self): """ bulk add resulting in 201 returns Effect of None """ resp = {"resp": "yo"} seq = [ (self.svc_req_intent(self.data), const(stub_json_response(resp, 201))), (log_intent( "request-rcv3-bulk", resp, req_body=("jsonified", self.data)), noop) ] self.assertEqual(perform_sequence(seq, r.bulk_add(self.pairs)), resp)
def test_empty_errors(self): """ If bulk_delete returns 409 with empty errors then `UnknownBulkResponse` is raised """ errors = {"errors": []} seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] self.assertRaises(r.UnknownBulkResponse, perform_sequence, seq, r.bulk_delete(self.pairs))
def test_log_responses(self, rel): """ Each request sent is logged if `log_msg_type` is given """ feed1_str = self.feed(rel, "https://url?page=2", ["summ1", "summ2"]) feed2_str = self.feed(rel, "https://url?page=3", ["summ3", "summ4"]) feed3_str = self.feed(rel, "link", []) seq = [ (self.svc_intent(), const(stub_json_response(feed1_str))), (log_intent("nodemsg", feed1_str, False), noop), (self.svc_intent({"page": ['2']}), const(stub_json_response(feed2_str))), (log_intent("nodemsg", feed2_str, False), noop), (self.svc_intent({"page": ['3']}), const(stub_json_response(feed3_str))), (log_intent("nodemsg", feed3_str, False), noop) ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {}, self.directions[rel], log_msg_type="nodemsg"))
def test_unknown_errors(self): """ If any of the errors returned with 409 are unknown then `UnknownBulkResponse` is raised """ errors = {"errors": ["unknown error", lb_inactive(self.lbs[0])]} seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] self.assertRaises(r.UnknownBulkResponse, perform_sequence, seq, r.bulk_delete(self.pairs))
def test_multiple_pages(self, rel): """ Collects entries and goes to next link if there are entries and continues until next link returns empty list """ feed1_str = self.feed(rel, "https://url?page=2", ["summ1", "summ2"]) feed2_str = self.feed(rel, "https://url?page=3", ["summ3", "summ4"]) feed3_str = self.feed(rel, "link", []) seq = [ (self.svc_intent(), const(stub_json_response(feed1_str))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feed2_str))), (self.svc_intent({"page": ['3']}), const(stub_json_response(feed3_str))), ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {}, self.directions[rel])) self.assertEqual( [atom.summary(entry) for entry in entries], ["summ1", "summ2", "summ3", "summ4"]) self.assertEqual(params, {"page": ["3"]})
def test_follow_limit(self, rel): """ Collects entries and keeping following rel link until `follow_limit` is reached. """ feeds = [self.feed(rel, "https://url?page={}".format(i + 1), ["summ{}".format(i + 1)]) for i in range(5)] seq = [ (self.svc_intent(), const(stub_json_response(feeds[0]))), (self.svc_intent({"page": ['1']}), const(stub_json_response(feeds[1]))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feeds[2]))), ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {}, self.directions[rel], 3)) self.assertEqual( [atom.summary(entry) for entry in entries], ["summ1", "summ2", "summ3"]) self.assertEqual(params, {"page": ["3"]})
def test_all_already_member(self): """ If bulk_add returns 409 with all attempted pairs as "lb node already member" then it will return None """ errors = { "errors": [node_already_member(lb.upper(), node) for lb, node in self.pairs] } seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] self.assertIsNone(perform_sequence(seq, r.bulk_add(self.pairs)))
def test_no_link(self, rel): """ Returns entries collected till now if there is no rel link """ feedstr = ( '<feed xmlns="http://www.w3.org/2005/Atom">' '<entry><summary>summary</summary></entry></feed>') seq = [ (self.svc_intent({"a": "b"}), const(stub_json_response(feedstr))) ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {"a": "b"}, self.directions[rel])) self.assertEqual(atom.summary(entries[0]), "summary") self.assertEqual(params, {"a": "b"})
def test_empty_errors(self): """ If bulk_delete returns 409 with empty errors then `UnknownBulkResponse` is raised """ errors = {"errors": []} seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] self.assertRaises( r.UnknownBulkResponse, perform_sequence, seq, r.bulk_delete(self.pairs))
def test_get_clb_health_mon(self): """ :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {"healthMonitor": ...} """ expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { "type": "CONNECT", "delay": 10, "timeout": 10, "attemptsBeforeDeactivation": 3 } body = {"healthMonitor": settings} seq = [(expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop)] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings)
def test_all_already_member(self): """ If bulk_add returns 409 with all attempted pairs as "lb node already member" then it will return None """ errors = { "errors": [ node_already_member(lb.upper(), node) for lb, node in self.pairs ] } seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] self.assertIsNone(perform_sequence(seq, r.bulk_add(self.pairs)))
def test_bulk_and_retry_error(self): """ If bulk adding returns "LB node already member" error along with other errors then there is no retry and BulkErrors is raised """ errors = { "errors": [ node_already_member(self.lbs[0].upper(), self.nodes[0]), lb_inactive(self.lbs[1]) ] } seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_add(self.pairs)) self.assertEqual(ec.exception.errors, pset([r.LBInactive(self.lbs[1])]))
def test_lb_inactive(self): """ If bulk_delete returns 409 with only LB inactive errors then it raises `BulkErrors` with LBInActive errors in it """ errors = { "errors": [lb_inactive(self.lbs[0]), lb_inactive(self.lbs[1])] } seq = [(self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent("request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop)] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_delete(self.pairs)) self.assertEqual( ec.exception.errors, pset([r.LBInactive(self.lbs[0]), r.LBInactive(self.lbs[1])]))
def test_unknown_errors(self): """ If any of the errors returned with 409 are unknown then `UnknownBulkResponse` is raised """ errors = { "errors": [ "unknown error", lb_inactive(self.lbs[0]) ] } seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] self.assertRaises( r.UnknownBulkResponse, perform_sequence, seq, r.bulk_delete(self.pairs))
def test_bulk_and_retry_error(self): """ If bulk adding returns "LB node already member" error along with other errors then there is no retry and BulkErrors is raised """ errors = { "errors": [ node_already_member(self.lbs[0].upper(), self.nodes[0]), lb_inactive(self.lbs[1]) ] } seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_add(self.pairs)) self.assertEqual( ec.exception.errors, pset([r.LBInactive(self.lbs[1])]))
def test_get_clb_health_mon(self): """ :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {"healthMonitor": ...} """ expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { "type": "CONNECT", "delay": 10, "timeout": 10, "attemptsBeforeDeactivation": 3 } body = {"healthMonitor": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings)
def test_lb_inactive(self): """ If bulk_delete returns 409 with only LB inactive errors then it raises `BulkErrors` with LBInActive errors in it """ errors = { "errors": [ lb_inactive(self.lbs[0]), lb_inactive(self.lbs[1]) ] } seq = [ (self.svc_req_intent(self.data), const(stub_json_response(errors, 409))), (log_intent( "request-rcv3-bulk", errors, req_body=("jsonified", self.data)), noop) ] with self.assertRaises(r.BulkErrors) as ec: perform_sequence(seq, r.bulk_delete(self.pairs)) self.assertEqual( ec.exception.errors, pset([r.LBInactive(self.lbs[0]), r.LBInactive(self.lbs[1])]) )
def test_add_clb_nodes(self): """ Produce a request for adding nodes to a load balancer, which returns a successful result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. """ nodes = [{ "address": "1.1.1.1", "port": 80, "condition": "ENABLED" }, { "address": "1.1.1.2", "port": 80, "condition": "ENABLED" }, { "address": "1.1.1.5", "port": 81, "condition": "ENABLED" }] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format( self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [(expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = ("Duplicate nodes detected. One or more nodes already " "configured on load balancer.") duplicate_nodes = stub_pure_response( json.dumps({ 'message': msg, 'code': 422 }), 422) dispatcher = EQFDispatcher([(expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = "Nodes must not exceed 25 per load balancer." limit = stub_pure_response(json.dumps({ 'message': msg, 'code': 413 }), 413) dispatcher = EQFDispatcher([(expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, "123456")