def test_returns_flat_list_of_rcv3nodes(self): """ All the nodes returned are in a flat list. """ dispatcher = self.get_dispatcher([ (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools').intent, (None, [{'id': str(i)} for i in range(2)])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/0/nodes').intent, (None, [{'id': "0node{0}".format(i), 'cloud_server': {'id': '0server{0}'.format(i)}} for i in range(2)])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/1/nodes').intent, (None, [{'id': "1node{0}".format(i), 'cloud_server': {'id': '1server{0}'.format(i)}} for i in range(2)])), ]) self.assertEqual( sorted(sync_perform(dispatcher, get_rcv3_contents())), sorted( [RCv3Node(node_id='0node0', cloud_server_id='0server0', description=RCv3Description(lb_id='0')), RCv3Node(node_id='0node1', cloud_server_id='0server1', description=RCv3Description(lb_id='0')), RCv3Node(node_id='1node0', cloud_server_id='1server0', description=RCv3Description(lb_id='1')), RCv3Node(node_id='1node1', cloud_server_id='1server1', description=RCv3Description(lb_id='1'))]))
def test_change_clb_node_default_type(self): """ Produce a request for modifying a node on a load balancer with the default type, which returns a successful result on 202. """ eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition="DRAINING", weight=50) expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format( self.lb_id), data={ 'node': { 'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY' } }, success_pred=has_code(202)) dispatcher = EQFDispatcher([ (expected.intent, service_request_eqf(stub_pure_response('', 202))) ]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202))
def _perform_add_event(self, response_sequence): """ Given a sequence of functions that take an intent and returns a response (or raises an exception), perform :func:`add_event` and return the result. """ log = object() eff = add_event(self.event, 'tid', 'ord', log) uid = '00000000-0000-0000-0000-000000000000' svrq = service_request( ServiceType.CLOUD_FEEDS, 'POST', 'autoscale/events', headers={'content-type': ['application/vnd.rackspace.atom+json']}, data=self._get_request('INFO', uid, 'tid'), log=log, success_pred=has_code(201), json_response=False) seq = [ (TenantScope(mock.ANY, 'tid'), nested_sequence([ retry_sequence( Retry(effect=svrq, should_retry=ShouldDelayAndRetry( can_retry=mock.ANY, next_interval=exponential_backoff_interval(2))), response_sequence) ])) ] return perform_sequence(seq, eff)
def _perform_add_event(self, response_sequence): """ Given a sequence of functions that take an intent and returns a response (or raises an exception), perform :func:`add_event` and return the result. """ log = object() eff = add_event(self.event, 'tid', 'ord', log) uid = '00000000-0000-0000-0000-000000000000' svrq = service_request( ServiceType.CLOUD_FEEDS, 'POST', 'autoscale/events', headers={ 'content-type': ['application/vnd.rackspace.atom+json']}, data=self._get_request('INFO', uid, 'tid'), log=log, success_pred=has_code(201), json_response=False) seq = [ (TenantScope(mock.ANY, 'tid'), nested_sequence([ retry_sequence( Retry(effect=svrq, should_retry=ShouldDelayAndRetry( can_retry=mock.ANY, next_interval=exponential_backoff_interval(2))), response_sequence ) ])) ] return perform_sequence(seq, eff)
def test_params(self): """Params are passed through.""" svcreq = service_request(ServiceType.CLOUD_SERVERS, "GET", "servers", params={"foo": ["bar"]}).intent eff = self._concrete(svcreq) pure_request_eff = resolve_authenticate(eff) self.assertEqual(pure_request_eff.intent.params, {"foo": ["bar"]})
def delete_and_verify(server_id): """ Check the status of the server to see if it's actually been deleted. Succeeds only if it has been either deleted (404) or acknowledged by Nova to be deleted (task_state = "deleted"). Note that ``task_state`` is in the server details key ``OS-EXT-STS:task_state``, which is supported by Openstack but available only when looking at the extended status of a server. """ def check_task_state((resp, server_blob)): if resp.code == 404: return server_details = server_blob['server'] is_deleting = server_details.get("OS-EXT-STS:task_state", "") if is_deleting.strip().lower() != "deleting": raise UnexpectedServerStatus(server_id, is_deleting, "deleting") def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200, 404)) return ver_eff.on(check_task_state) return service_request( ServiceType.CLOUD_SERVERS, 'DELETE', append_segments('servers', server_id), success_pred=has_code(404)).on(error=catch(APIError, verify))
def _delete_stack_intent(self, stack_name, stack_id): return service_request( ServiceType.CLOUD_ORCHESTRATION, 'DELETE', 'stacks/{0}/{1}'.format(stack_name, stack_id), success_pred=has_code(204), reauth_codes=(401,), json_response=False).intent
def get_rcv3_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `RCv3Node`. """ eff = service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools') def on_listing_pools(lblist_result): _, body = lblist_result return parallel([ service_request( ServiceType.RACKCONNECT_V3, 'GET', append_segments('load_balancer_pools', lb_pool['id'], 'nodes')).on( partial( on_listing_nodes, RCv3Description(lb_id=lb_pool['id']))) for lb_pool in body ]) def on_listing_nodes(rcv3_description, lbnodes_result): _, body = lbnodes_result return [ RCv3Node(node_id=node['id'], description=rcv3_description, cloud_server_id=get_in(('cloud_server', 'id'), node)) for node in body ] return eff.on(on_listing_pools).on(success=compose(list, concat), error=catch(NoSuchEndpoint, lambda _: []))
def test_filters_on_user_criteria(self): """ Considers user provided filter if provided """ as_servers = ([{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': i } for i in range(5)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'b' }, 'id': i } for i in range(5, 8)]) servers = as_servers + [{'metadata': 'junk'}] * 3 eff = get_all_scaling_group_servers( server_predicate=lambda s: s['id'] % 3 == 0) body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, { 'a': [as_servers[0], as_servers[3]], 'b': [as_servers[6]] })
def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, "GET", append_segments("servers", server_id), success_pred=has_code(200, 404) ) return ver_eff.on(check_task_state)
def setUp(self): """Save some common parameters.""" self.log = object() self.authenticator = object() self.service_configs = make_service_configs() eff = service_request(ServiceType.CLOUD_SERVERS, 'GET', 'servers') self.svcreq = eff.intent
def test_default_arguments(self): """ :func:`get_all_server_details` called with arguments will use a default batch size. """ self.assertEqual(get_all_server_details().intent, service_request(**svc_request_args(limit=100)).intent)
def test_returns_as_servers(self): """ Returns servers with AS metadata in it grouped by scaling group ID """ as_servers = ([{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': i } for i in range(5)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'b' }, 'id': i } for i in range(5, 8)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': 10 }]) servers = as_servers + [{'metadata': 'junk'}] * 3 eff = get_all_scaling_group_servers() body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, { 'a': as_servers[:5] + [as_servers[-1]], 'b': as_servers[5:8] })
def _create_stack_intent(self, data): return service_request(ServiceType.CLOUD_ORCHESTRATION, 'POST', 'stacks', data=data, success_pred=has_code(201), reauth_codes=(401, )).intent
def _list_stacks_all_intent(self, params): """Return the expected intent for listing stacks given parameters.""" return service_request(ServiceType.CLOUD_ORCHESTRATION, 'GET', 'stacks', reauth_codes=(401, ), params=params).intent
def lb_req(url, json_response, response): """ Return a SequenceDispatcher two-tuple that matches a service request to a particular load balancer endpoint (using GET), and returns the given ``response`` as the content in an HTTP 200 ``StubResponse``. """ if isinstance(response, Exception): def handler(i): raise response log_seq = [] else: def handler(i): return (StubResponse(200, {}), response) log_seq = [(Log(mock.ANY, mock.ANY), lambda i: None)] return ( Retry( effect=mock.ANY, should_retry=ShouldDelayAndRetry( can_retry=retry_times(5), next_interval=exponential_backoff_interval(2)) ), nested_sequence([ (service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', url, json_response=json_response).intent, handler) ] + log_seq) )
def _list_stacks_all_intent(self, params): """Return the expected intent for listing stacks given parameters.""" return service_request( ServiceType.CLOUD_ORCHESTRATION, 'GET', 'stacks', reauth_codes=(401,), params=params).intent
def test_added(self): """ total desired, pending and actual are added to cloud metrics """ td = 10 ta = 20 tp = 3 tt = 7 tg = 13 m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60} md = merge(m, {'metricValue': td, 'metricName': 'ord.desired'}) ma = merge(m, {'metricValue': ta, 'metricName': 'ord.actual'}) mp = merge(m, {'metricValue': tp, 'metricName': 'ord.pending'}) mt = merge(m, {'metricValue': tt, 'metricName': 'ord.tenants'}) mg = merge(m, {'metricValue': tg, 'metricName': 'ord.groups'}) req_data = [md, ma, mp, mt, mg] log = object() seq = [ (Func(time.time), const(100)), (service_request( ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest", data=req_data, log=log).intent, noop) ] eff = add_to_cloud_metrics( m['ttlInSeconds'], 'ord', td, ta, tp, tt, tg, log=log) self.assertIsNone(perform_sequence(seq, eff))
def test_throttling(self): """ When the throttler function returns a bracketing function, it's used to throttle the request. """ def throttler(stype, method, tid): if (stype == ServiceType.CLOUD_SERVERS and method == 'get' and tid == 1): return bracket bracket = object() svcreq = service_request(ServiceType.CLOUD_SERVERS, 'GET', 'servers').intent response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (_Throttle(bracket=bracket, effect=mock.ANY), nested_sequence([ (Authenticate(authenticator=self.authenticator, tenant_id=1, log=self.log), lambda i: ('token', fake_service_catalog)), (Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log), lambda i: response), ])), ]) eff = self._concrete(svcreq, throttler=throttler) with seq.consume(): result = sync_perform(seq, eff) self.assertEqual(result, (response[0], {}))
def _create_stack_intent(self, data): return service_request( ServiceType.CLOUD_ORCHESTRATION, 'POST', 'stacks', data=data, success_pred=has_code(201), reauth_codes=(401,)).intent
def test_added(self): """ total desired, pending and actual are added to cloud metrics """ metrics = [GroupMetrics('t1', 'g1', 3, 2, 0), GroupMetrics('t2', 'g1', 4, 4, 1), GroupMetrics('t2', 'g', 100, 20, 0)] m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60} md = merge(m, {'metricValue': 107, 'metricName': 'ord.desired'}) ma = merge(m, {'metricValue': 26, 'metricName': 'ord.actual'}) mp = merge(m, {'metricValue': 1, 'metricName': 'ord.pending'}) mt = merge(m, {'metricValue': 2, 'metricName': 'ord.tenants'}) mg = merge(m, {'metricValue': 3, 'metricName': 'ord.groups'}) mt1d = merge(m, {'metricValue': 3, 'metricName': 'ord.t1.desired'}) mt1a = merge(m, {'metricValue': 2, 'metricName': 'ord.t1.actual'}) mt1p = merge(m, {'metricValue': 0, 'metricName': 'ord.t1.pending'}) mt2d = merge(m, {'metricValue': 104, 'metricName': 'ord.t2.desired'}) mt2a = merge(m, {'metricValue': 24, 'metricName': 'ord.t2.actual'}) mt2p = merge(m, {'metricValue': 1, 'metricName': 'ord.t2.pending'}) req_data = [md, ma, mp, mt, mg, mt1d, mt1a, mt1p, mt2d, mt2a, mt2p] log = mock_log() seq = [ (Func(time.time), const(100)), (service_request( ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest", data=req_data, log=log).intent, noop) ] eff = add_to_cloud_metrics(m['ttlInSeconds'], 'ord', metrics, 2, log) self.assertIsNone(perform_sequence(seq, eff)) log.msg.assert_called_once_with( 'total desired: {td}, total_actual: {ta}, total pending: {tp}', td=107, ta=26, tp=1)
def test_publish_autoscale_event(self): """ Publish an event to cloudfeeds. Successfully handle non-JSON data. """ _log = object() eff = cf.publish_autoscale_event({'event': 'stuff'}, log=_log) expected = service_request( ServiceType.CLOUD_FEEDS, 'POST', 'autoscale/events', headers={'content-type': ['application/vnd.rackspace.atom+json']}, data={'event': 'stuff'}, log=_log, success_pred=has_code(201), json_response=False) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('<this is xml>', 201)))]) resp, body = sync_perform(dispatcher, eff) self.assertEqual(body, '<this is xml>') # Add regression test that 202 should be an API error because this # is a bug in CF dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('<this is xml>', 202)))]) self.assertRaises(APIError, sync_perform, dispatcher, eff)
def test_get_clb_nodes_error_handling(self): """:func:`get_clb_nodes` parses the common CLB errors.""" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), "123456")
def test_stack_tag(self): """The query parameters include `tags` when `stack_tag` is passed.""" tag = 'footag' svc_intent = service_request(ServiceType.CLOUD_ORCHESTRATION, 'GET', 'stacks', reauth_codes=(401,), params={'tags': tag}).intent self.assertEqual(get_all_stacks(stack_tag=tag).intent, svc_intent)
def test_throttling(self): """ When the throttler function returns a bracketing function, it's used to throttle the request. """ def throttler(stype, method, tid): if (stype == ServiceType.CLOUD_SERVERS and method == 'get' and tid == 1): return bracket bracket = object() svcreq = service_request( ServiceType.CLOUD_SERVERS, 'GET', 'servers').intent response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (_Throttle(bracket=bracket, effect=mock.ANY), nested_sequence([ (Authenticate(authenticator=self.authenticator, tenant_id=1, log=self.log), lambda i: ('token', fake_service_catalog)), (Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log), lambda i: response), ])), ]) eff = self._concrete(svcreq, throttler=throttler) with seq.consume(): result = sync_perform(seq, eff) self.assertEqual(result, (response[0], {}))
def _delete_stack_intent(self, stack_name, stack_id): return service_request(ServiceType.CLOUD_ORCHESTRATION, 'DELETE', 'stacks/{0}/{1}'.format(stack_name, stack_id), success_pred=has_code(204), reauth_codes=(401, ), json_response=False).intent
def get_rcv3_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `RCv3Node`. """ eff = service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools') def on_listing_pools(lblist_result): _, body = lblist_result return parallel([ service_request(ServiceType.RACKCONNECT_V3, 'GET', append_segments('load_balancer_pools', lb_pool['id'], 'nodes')).on( partial(on_listing_nodes, RCv3Description(lb_id=lb_pool['id']))) for lb_pool in body ]) def on_listing_nodes(rcv3_description, lbnodes_result): _, body = lbnodes_result return [ RCv3Node(node_id=node['id'], description=rcv3_description, cloud_server_id=get_in(('cloud_server', 'id'), node)) for node in body ] return eff.on(on_listing_pools).on( success=compose(list, concat), error=catch(NoSuchEndpoint, lambda _: []))
def test_remove_nodes_from_clb_retry(self): """ :obj:`RemoveNodesFromCLB`, on receiving a 400, parses out the nodes that are no longer on the load balancer, and retries the bulk delete with those nodes removed. TODO: this has been left in as a regression test - this can probably be removed the next time it's touched, as this functionality happens in cloud_client now and there is a similar test there. """ lb_id = "12345" node_ids = [str(i) for i in range(5)] error_body = { "validationErrors": { "messages": [ "Node ids 1,2,3 are not a part of your loadbalancer" ] }, "message": "Validation Failure", "code": 400, "details": "The object is not valid" } expected_req = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/12345/nodes', params={'id': transform_eq(sorted, node_ids)}, success_pred=ANY, json_response=True).intent expected_req2 = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/12345/nodes', params={'id': transform_eq(sorted, ['0', '4'])}, success_pred=ANY, json_response=True).intent step = RemoveNodesFromCLB(lb_id=lb_id, node_ids=pset(node_ids)) seq = [ (expected_req, lambda i: raise_(APIError(400, json.dumps(error_body)))), (expected_req2, lambda i: stub_pure_response('', 202)), ] r = perform_sequence(seq, step.as_effect()) self.assertEqual(r, (StepResult.SUCCESS, []))
def test_no_nodes_on_lbs_no_nodes(self): """ If there are no nodes on each of the load balancer pools, there are no nodes returned overall. """ dispatcher = self.get_dispatcher( [ ( service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools").intent, (None, [{"id": str(i)} for i in range(2)]), ), (service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools/0/nodes").intent, (None, [])), (service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools/1/nodes").intent, (None, [])), ] ) self.assertEqual(sync_perform(dispatcher, get_rcv3_contents()), [])
def _check_stack_intent(self, stack_name, stack_id): return service_request( ServiceType.CLOUD_ORCHESTRATION, 'POST', 'stacks/{0}/{1}/actions'.format(stack_name, stack_id), data={'check': None}, success_pred=has_code(200, 201), reauth_codes=(401,), json_response=False).intent
def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200, 404)) return ver_eff.on(check_task_state)
def _update_stack_intent(self, stack_name, stack_id, stack_args): return service_request(ServiceType.CLOUD_ORCHESTRATION, 'PUT', 'stacks/{0}/{1}'.format(stack_name, stack_id), data=stack_args, success_pred=has_code(202), reauth_codes=(401, ), json_response=False).intent
def _update_stack_intent(self, stack_name, stack_id, stack_args): return service_request( ServiceType.CLOUD_ORCHESTRATION, 'PUT', 'stacks/{0}/{1}'.format(stack_name, stack_id), data=stack_args, success_pred=has_code(202), reauth_codes=(401,), json_response=False).intent
def test_default(self): """Passing no arguments causes all stacks to be requested.""" svc_intent = service_request(ServiceType.CLOUD_ORCHESTRATION, 'GET', 'stacks', reauth_codes=(401, ), params={}).intent self.assertEqual(get_all_stacks().intent, svc_intent)
def test_no_lb_pools_returns_no_nodes(self): """ If there are no load balancer pools, there are no nodes. """ dispatcher = self.get_dispatcher( [(service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools").intent, (None, []))] ) self.assertEqual(sync_perform(dispatcher, get_rcv3_contents()), [])
def test_get_clb_nodes(self): """:func:`get_clb_nodes` returns all the nodes for a LB.""" req = get_clb_nodes(self.lb_id) expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!')
def test_added(self): """ total desired, pending and actual are added to cloud metrics """ metrics = [ GroupMetrics('t1', 'g1', 3, 2, 0), GroupMetrics('t2', 'g1', 4, 4, 1), GroupMetrics('t2', 'g', 100, 20, 0), GroupMetrics('t3', 'g3', 5, 3, 0) ] config = {"non-convergence-tenants": ["t1"]} m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60} md = merge(m, {'metricValue': 112, 'metricName': 'ord.desired'}) ma = merge(m, {'metricValue': 29, 'metricName': 'ord.actual'}) mp = merge(m, {'metricValue': 1, 'metricName': 'ord.pending'}) mt = merge(m, {'metricValue': 3, 'metricName': 'ord.tenants'}) mg = merge(m, {'metricValue': 4, 'metricName': 'ord.groups'}) mt1d = merge(m, {'metricValue': 3, 'metricName': 'ord.t1.desired'}) mt1a = merge(m, {'metricValue': 2, 'metricName': 'ord.t1.actual'}) mt1p = merge(m, {'metricValue': 0, 'metricName': 'ord.t1.pending'}) mt2d = merge(m, {'metricValue': 104, 'metricName': 'ord.t2.desired'}) mt2a = merge(m, {'metricValue': 24, 'metricName': 'ord.t2.actual'}) mt2p = merge(m, {'metricValue': 1, 'metricName': 'ord.t2.pending'}) mt3d = merge(m, {'metricValue': 5, 'metricName': 'ord.t3.desired'}) mt3a = merge(m, {'metricValue': 3, 'metricName': 'ord.t3.actual'}) mt3p = merge(m, {'metricValue': 0, 'metricName': 'ord.t3.pending'}) cd = merge(m, {'metricValue': 109, 'metricName': 'ord.conv_desired'}) ca = merge(m, {'metricValue': 27, 'metricName': 'ord.conv_actual'}) cdiv = merge(m, { 'metricValue': 82, 'metricName': 'ord.conv_divergence' }) req_data = [ md, ma, mp, mt, mg, mt1d, mt1a, mt1p, mt2d, mt2a, mt2p, mt3d, mt3a, mt3p, cd, ca, cdiv ] log = mock_log() seq = [(Func(time.time), const(100)), (service_request(ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest", data=req_data, log=log).intent, noop)] eff = add_to_cloud_metrics( m['ttlInSeconds'], 'ord', metrics, 3, # number of tenants config, log) self.assertIsNone(perform_sequence(seq, eff)) log.msg.assert_called_once_with( 'total desired: {td}, total_actual: {ta}, total pending: {tp}', td=112, ta=29, tp=1)
def expected_node_removal_req(self, nodes=(1, 2)): """ :return: Expected effect for a node removal request. """ return service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202))
def get_clbs(): """Fetch all LBs for a tenant. Returns list of loadbalancer JSON.""" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers', ).on(log_success_response( 'request-list-clbs', identity)).on(success=lambda (response, body): body['loadBalancers'])
def test_no_lb_pools_returns_no_nodes(self): """ If there are no load balancer pools, there are no nodes. """ dispatcher = self.get_dispatcher([ (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools').intent, (None, [])) ]) self.assertEqual(sync_perform(dispatcher, get_rcv3_contents()), [])
def test_get_clbs(self): """Returns all the load balancer details from the LBs endpoint.""" expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!')
def test_get_clb_health_mon_error(self): """ :func:`get_clb_health_monitor` parses the common CLB errors. """ expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors(self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id)
def test_returns_flat_list_of_rcv3nodes(self): """ All the nodes returned are in a flat list. """ dispatcher = self.get_dispatcher( [ ( service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools").intent, (None, [{"id": str(i)} for i in range(2)]), ), ( service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools/0/nodes").intent, ( None, [ {"id": "0node{0}".format(i), "cloud_server": {"id": "0server{0}".format(i)}} for i in range(2) ], ), ), ( service_request(ServiceType.RACKCONNECT_V3, "GET", "load_balancer_pools/1/nodes").intent, ( None, [ {"id": "1node{0}".format(i), "cloud_server": {"id": "1server{0}".format(i)}} for i in range(2) ], ), ), ] ) self.assertEqual( sorted(sync_perform(dispatcher, get_rcv3_contents())), sorted( [ RCv3Node(node_id="0node0", cloud_server_id="0server0", description=RCv3Description(lb_id="0")), RCv3Node(node_id="0node1", cloud_server_id="0server1", description=RCv3Description(lb_id="0")), RCv3Node(node_id="1node0", cloud_server_id="1server0", description=RCv3Description(lb_id="1")), RCv3Node(node_id="1node1", cloud_server_id="1server1", description=RCv3Description(lb_id="1")), ] ), )
def test_add_clb_nodes(self): """ Produce a request for adding nodes to a load balancer, which returns a successful result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. """ nodes = [{"address": "1.1.1.1", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.2", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.5", "port": 81, "condition": "ENABLED"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = ("Duplicate nodes detected. One or more nodes already " "configured on load balancer.") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = "Nodes must not exceed 25 per load balancer." limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def expected_node_removal_req(self, nodes=(1, 2)): """ :return: Expected effect for a node removal request. """ return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202))
def test_no_nodes_on_lbs_no_nodes(self): """ If there are no nodes on each of the load balancer pools, there are no nodes returned overall. """ dispatcher = self.get_dispatcher([ (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools').intent, (None, [{ 'id': str(i) } for i in range(2)])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/0/nodes').intent, (None, [])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/1/nodes').intent, (None, [])) ]) self.assertEqual(sync_perform(dispatcher, get_rcv3_contents()), [])
def test_get_clbs(self): """Returns all the load balancer details from the LBs endpoint.""" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!')
def test_get_clb_health_mon_error(self): """ :func:`get_clb_health_monitor` parses the common CLB errors. """ expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id)
def on_listing_pools(lblist_result): _, body = lblist_result return parallel([ service_request(ServiceType.RACKCONNECT_V3, 'GET', append_segments('load_balancer_pools', lb_pool['id'], 'nodes')).on( partial(on_listing_nodes, RCv3Description(lb_id=lb_pool['id']))) for lb_pool in body ])
def test_error_handling(self): """ Parses regular CLB errors and raises corresponding exceptions """ svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, "GET", "loadbalancers/12/nodes/13.atom", params={}, json_response=False).intent assert_parses_common_clb_errors( self, svc_intent, get_clb_node_feed("12", "13"), "12")
def test_delete_and_verify_del_404(self): """ :func:`delete_and_verify` invokes server delete and succeeds on 404 """ eff = delete_and_verify('sid') self.assertEqual( eff.intent, service_request( ServiceType.CLOUD_SERVERS, 'DELETE', 'servers/sid', success_pred=has_code(404)).intent) self.assertEqual(resolve_effect(eff, (ANY, {})), (ANY, {}))
def add_to_cloud_metrics(ttl, region, group_metrics, num_tenants, config, log=None, _print=False): """ Add total number of desired, actual and pending servers of a region to Cloud metrics. :param str region: which region's metric is collected :param group_metrics: List of :obj:`GroupMetric` :param int num_tenants: total number of tenants :param dict config: Config json dict containing convergence tenants info :param log: Optional logger :param bool _print: Should it print activity on stdout? Useful when running as a script :return: `Effect` with None """ epoch = yield Effect(Func(time.time)) metric_part = {'collectionTime': int(epoch * 1000), 'ttlInSeconds': ttl} tenanted_metrics, total = calc_total(group_metrics) if log is not None: log.msg( 'total desired: {td}, total_actual: {ta}, total pending: {tp}', td=total.desired, ta=total.actual, tp=total.pending) if _print: print('total desired: {}, total actual: {}, total pending: {}'.format( total.desired, total.actual, total.pending)) metrics = [('desired', total.desired), ('actual', total.actual), ('pending', total.pending), ('tenants', num_tenants), ('groups', len(group_metrics))] for tenant_id, metric in sorted(tenanted_metrics.items()): metrics.append(("{}.desired".format(tenant_id), metric.desired)) metrics.append(("{}.actual".format(tenant_id), metric.actual)) metrics.append(("{}.pending".format(tenant_id), metric.pending)) # convergence tenants desired and actual conv_tenants = keyfilter( partial(tenant_is_enabled, get_config_value=lambda k: get_in([k], config)), tenanted_metrics) conv_desired = sum(m.desired for m in conv_tenants.itervalues()) conv_actual = sum(m.actual for m in conv_tenants.itervalues()) metrics.extend( [("conv_desired", conv_desired), ("conv_actual", conv_actual), ("conv_divergence", conv_desired - conv_actual)]) data = [merge(metric_part, {'metricValue': value, 'metricName': '{}.{}'.format(region, metric)}) for metric, value in metrics] yield service_request(ServiceType.CLOUD_METRICS_INGEST, 'POST', 'ingest', data=data, log=log)