def test_add_nodes_to_clbs(self): """Logs :obj:`AddNodesToCLB`.""" adds = pbag([ AddNodesToCLB(lb_id='lbid1', address_configs=pset([('10.0.0.1', _clbd('lbid1', 1234))])), AddNodesToCLB(lb_id='lbid1', address_configs=pset([('10.0.0.2', _clbd('lbid1', 1235))])), AddNodesToCLB(lb_id='lbid2', address_configs=pset([('10.0.0.1', _clbd('lbid2', 4321))])) ]) self.assert_logs(adds, [ Log('convergence-add-clb-nodes', fields={ 'lb_id': 'lbid1', 'addresses': ['10.0.0.1:1234', '10.0.0.2:1235'], 'cloud_feed': True }), Log('convergence-add-clb-nodes', fields={ 'lb_id': 'lbid2', 'addresses': ['10.0.0.1:4321'], 'cloud_feed': True }) ])
def test_change_clb_node(self): """Logs :obj:`ChangeCLBNode`.""" changes = pbag([ ChangeCLBNode(lb_id='lbid1', node_id='node1', condition=CLBNodeCondition.DRAINING, type=CLBNodeType.PRIMARY, weight=50), ChangeCLBNode(lb_id='lbid1', node_id='node2', condition=CLBNodeCondition.DRAINING, type=CLBNodeType.PRIMARY, weight=50), ChangeCLBNode(lb_id='lbid1', node_id='node3', condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY, weight=50), ChangeCLBNode(lb_id='lbid2', node_id='node4', condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY, weight=50), ]) self.assert_logs(changes, [ Log('convergence-change-clb-nodes', fields={ 'lb_id': 'lbid1', 'nodes': ['node3'], 'type': 'PRIMARY', 'condition': 'ENABLED', 'weight': 50, 'cloud_feed': True }), Log('convergence-change-clb-nodes', fields={ 'lb_id': 'lbid1', 'nodes': ['node1', 'node2'], 'type': 'PRIMARY', 'condition': 'DRAINING', 'weight': 50, 'cloud_feed': True }), Log('convergence-change-clb-nodes', fields={ 'lb_id': 'lbid2', 'nodes': ['node4'], 'type': 'PRIMARY', 'condition': 'ENABLED', 'weight': 50, 'cloud_feed': True }), ])
def test_filters_on_user_criteria(self): """ Considers user provided filter if provided """ as_servers = ([{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': i } for i in range(5)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'b' }, 'id': i } for i in range(5, 8)]) servers = as_servers + [{'metadata': 'junk'}] * 3 eff = get_all_scaling_group_servers( server_predicate=lambda s: s['id'] % 3 == 0) body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, { 'a': [as_servers[0], as_servers[3]], 'b': [as_servers[6]] })
def lb_req(url, json_response, response): """ Return a SequenceDispatcher two-tuple that matches a service request to a particular load balancer endpoint (using GET), and returns the given ``response`` as the content in an HTTP 200 ``StubResponse``. """ if isinstance(response, Exception): def handler(i): raise response log_seq = [] else: def handler(i): return (StubResponse(200, {}), response) log_seq = [(Log(mock.ANY, mock.ANY), lambda i: None)] return ( Retry( effect=mock.ANY, should_retry=ShouldDelayAndRetry( can_retry=retry_times(5), next_interval=exponential_backoff_interval(2)) ), nested_sequence([ (service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', url, json_response=json_response).intent, handler) ] + log_seq) )
def test_returns_as_servers(self): """ Returns servers with AS metadata in it grouped by scaling group ID """ as_servers = ([{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': i } for i in range(5)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'b' }, 'id': i } for i in range(5, 8)] + [{ 'metadata': { 'rax:auto_scaling_group_id': 'a' }, 'id': 10 }]) servers = as_servers + [{'metadata': 'junk'}] * 3 eff = get_all_scaling_group_servers() body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, { 'a': as_servers[:5] + [as_servers[-1]], 'b': as_servers[5:8] })
def test_set_metadata_item(self): """ :obj:`SetMetadataItemOnServer.as_effect` produces a request for setting a metadata item on a particular server. It succeeds if successful, but does not fail for any errors. """ server_id = u'abc123' meta = SetMetadataItemOnServer(server_id=server_id, key='metadata_key', value='teapot') eff = meta.as_effect() seq = [ (eff.intent, lambda i: (StubResponse(202, {}), {})), (Log(ANY, ANY), lambda _: None) ] self.assertEqual( perform_sequence(seq, eff), (StepResult.SUCCESS, [])) exceptions = (NoSuchServerError("msg", server_id=server_id), ServerMetadataOverLimitError("msg", server_id=server_id), NovaRateLimitError("msg"), APIError(code=500, body="", headers={})) for exception in exceptions: self.assertRaises( type(exception), perform_sequence, [(eff.intent, lambda i: raise_(exception))], eff)
def test_cf_msg(self): """ `cf_msg` returns Effect with `Log` intent with cloud_feed=True """ seq = [(Log('message', dict(cloud_feed=True, a=2, b=3)), lambda _: 'logged')] self.assertEqual(perform_sequence(seq, cf_msg('message', a=2, b=3)), 'logged')
def test_retry_false(self): """Tests correct behavior when retry is passed as false.""" seq = [ (self.update_call.intent, lambda _: (StubResponse(202, ''), None)), (Log('request-update-stack', ANY), lambda _: None) ] update = UpdateStack(stack=self.stack, stack_config=self.config, retry=False) result = perform_sequence(seq, update.as_effect()) self.assertEqual(result, (StepResult.SUCCESS, []))
def test_group_deleted(self): """ Does nothing if group has been deleted """ seq = [(GetScalingGroupInfo(tenant_id="tid", group_id="gid"), lambda i: raise_(NoSuchScalingGroupError("tid", "gid"))), (Log("selfheal-group-deleted", dict(tenant_id="tid", scaling_group_id="gid")), noop)] self.assertIsNone( perform_sequence(seq, sh.check_and_trigger("tid", "gid")))
def test_ensure_retry(self): """Tests that retry will be returned.""" seq = [ (self.check_call.intent, lambda _: (StubResponse(204, ''), None)), (Log('request-check-stack', ANY), lambda _: None) ] reason = 'Waiting for stack check to complete' result = perform_sequence(seq, CheckStack(self.stack).as_effect()) self.assertEqual(result, (StepResult.RETRY, [ErrorReason.String(reason)]))
def test_retry_default(self): """Tests correct behavior when retry is not specified.""" seq = [ (self.update_call.intent, lambda _: (StubResponse(202, ''), None)), (Log('request-update-stack', ANY), lambda _: None) ] update = UpdateStack(stack=self.stack, stack_config=self.config) reason = 'Waiting for stack to update' result = perform_sequence(seq, update.as_effect()) self.assertEqual(result, (StepResult.RETRY, [ErrorReason.String(reason)]))
def test_remove_nodes_from_clbs(self): """Logs :obj:`RemoveNodesFromCLB`.""" removes = pbag([ RemoveNodesFromCLB(lb_id='lbid1', node_ids=pset(['a', 'b', 'c'])), RemoveNodesFromCLB(lb_id='lbid2', node_ids=pset(['d', 'e', 'f'])) ]) self.assert_logs(removes, [ Log('convergence-remove-clb-nodes', fields={ 'lb_id': 'lbid1', 'nodes': ['a', 'b', 'c'], 'cloud_feed': True }), Log('convergence-remove-clb-nodes', fields={ 'lb_id': 'lbid2', 'nodes': ['d', 'e', 'f'], 'cloud_feed': True }), ])
def test_filters_no_metadata(self): """ Servers without metadata are not included in the result. """ servers = [{'id': i} for i in range(10)] eff = get_all_scaling_group_servers() body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, {})
def test_bulk_remove_from_rcv3(self): """Logs :obj:`BulkRemoveFromRCv3`.""" adds = pbag([ BulkRemoveFromRCv3(lb_node_pairs=pset([( 'lb1', 'node1'), ('lb1', 'node2'), ('lb2', 'node2'), ('lb2', 'node3'), ('lb3', 'node4')])), BulkRemoveFromRCv3( lb_node_pairs=pset([('lba', 'nodea'), ('lba', 'nodeb'), ('lb1', 'nodea')])) ]) self.assert_logs(adds, [ Log('convergence-remove-rcv3-nodes', fields={ 'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'], 'cloud_feed': True }), Log('convergence-remove-rcv3-nodes', fields={ 'lb_id': 'lb2', 'servers': ['node2', 'node3'], 'cloud_feed': True }), Log('convergence-remove-rcv3-nodes', fields={ 'lb_id': 'lb3', 'servers': ['node4'], 'cloud_feed': True }), Log('convergence-remove-rcv3-nodes', fields={ 'lb_id': 'lba', 'servers': ['nodea', 'nodeb'], 'cloud_feed': True }) ])
def test_ensure_retry(self): """Tests that retry will be returned.""" seq = [ (delete_stack(stack_id='foo', stack_name='bar').intent, lambda _: (StubResponse(204, ''), None)), (Log('request-delete-stack', ANY), lambda _: None) ] foo_stack = stack(id='foo', name='bar') delete = DeleteStack(foo_stack) reason = ('Waiting for stack to delete') result = perform_sequence(seq, delete.as_effect()) self.assertEqual(result, (StepResult.RETRY, [ErrorReason.String(reason)]))
def test_filters_no_as_metadata(self): """ Does not include servers which have metadata but does not have AS info in it """ servers = [{'id': i, 'metadata': {}} for i in range(10)] eff = get_all_scaling_group_servers() body = {'servers': servers} sequence = [(service_request(*self.req).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, {})
def log_intent(msg_type, body, log_as_json=True, req_body=''): """ Return a :obj:`Log` intent for the given mesasge type and body. """ body = json.dumps(body, sort_keys=True) if log_as_json else body return Log( msg_type, { 'url': "original/request/URL", 'method': 'method', 'request_id': "original-request-id", 'response_body': body, 'request_body': req_body })
def test_create_servers(self): """Logs :obj:`CreateServer`.""" cfg = {'configgy': 'configged', 'nested': {'a': 'b'}} cfg2 = {'configgy': 'configged', 'nested': {'a': 'c'}} creates = pbag([ CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg2)) ]) self.assert_logs(creates, [ Log('convergence-create-servers', fields={ 'num_servers': 2, 'server_config': cfg, 'cloud_feed': True }), Log('convergence-create-servers', fields={ 'num_servers': 1, 'server_config': cfg2, 'cloud_feed': True }) ])
def test_with_changes_since(self): """ If given, servers are fetched based on changes_since """ since = datetime(2010, 10, 10, 10, 10, 0) eff = get_all_scaling_group_servers(changes_since=since) body = {'servers': []} sequence = [(service_request( **svc_request_args(changes_since=since, limit=100)).intent, lambda i: (StubResponse(200, None), body)), (Log(mock.ANY, mock.ANY), lambda i: None)] result = perform_sequence(sequence, eff) self.assertEqual(result, {})
def test_delete_servers(self): """Logs :obj:`DeleteServer`.""" deletes = pbag([ DeleteServer(server_id='1'), DeleteServer(server_id='2'), DeleteServer(server_id='3') ]) self.assert_logs(deletes, [ Log('convergence-delete-servers', fields={ 'servers': ['1', '2', '3'], 'cloud_feed': True }) ])
def full_intents(): return legacy_intents() + [ CreateOrSet(path='foo', content='bar'), GetScalingGroupInfo(tenant_id='foo', group_id='bar'), EvictServerFromScalingGroup(log='log', transaction_id='transaction_id', scaling_group='scaling_group', server_id='server_id'), Log('msg', {}), LogErr('f', 'msg', {}), BoundFields(Effect(None), {}), MsgWithTime('msg', Effect(None)), CQLQueryExecute(query='q', params={}, consistency_level=7) ]
def test_set_metadata_item_on_server(self): """Logs :obj:`SetMetadataItemOnServer`.""" sets = pbag([ SetMetadataItemOnServer(server_id='s1', key='k1', value='v1'), SetMetadataItemOnServer(server_id='s2', key='k1', value='v1'), SetMetadataItemOnServer(server_id='s3', key='k2', value='v2'), ]) self.assert_logs(sets, [ Log('convergence-set-server-metadata', fields={ 'servers': ['s1', 's2'], 'key': 'k1', 'value': 'v1', 'cloud_feed': True }), Log('convergence-set-server-metadata', fields={ 'servers': ['s3'], 'key': 'k2', 'value': 'v2', 'cloud_feed': True }) ])
def test_add_nodes_to_clb_success_response_codes(self): """ :obj:`AddNodesToCLB` succeeds on 202. """ eff = self._add_one_node_to_clb() seq = SequenceDispatcher([ (eff.intent, lambda i: (StubResponse(202, {}), '')), (Log(ANY, ANY), lambda _: None) ]) expected = ( StepResult.RETRY, [ErrorReason.String('must re-gather after adding to CLB in order ' 'to update the active cache')]) with seq.consume(): self.assertEquals(sync_perform(seq, eff), expected)
def test_normal_use(self): """Tests normal usage.""" stack_config = pmap({'stack_name': 'baz', 'foo': 'bar'}) new_stack_config = pmap({'stack_name': 'baz_foo', 'foo': 'bar'}) self.create = CreateStack(stack_config) self.seq = [ (Func(uuid4), lambda _: 'foo'), (create_stack(thaw(new_stack_config)).intent, lambda _: (StubResponse(200, {}), {'stack': {}})), (Log('request-create-stack', ANY), lambda _: None) ] reason = 'Waiting for stack to create' result = perform_sequence(self.seq, self.create.as_effect()) self.assertEqual(result, (StepResult.RETRY, [ErrorReason.String(reason)]))
def test_create_server_success(self): """ Creating a server, when Nova responds with a 202, returns Nova's response with the body as a JSON dictionary. It logs this response minus the adminstrative password. """ server_body = {'server': {'id': 'server_id', 'adminPass': "******"}} log_intent = Log( 'request-create-server', { 'url': "original/request/URL", 'method': 'method', 'request_id': "original-request-id", 'response_body': '{"server": {"id": "server_id"}}', 'request_body': '' }) expected, real = self._setup_for_create_server() resp, body = _perform_one_request(expected.intent, real, 202, json.dumps(server_body), log_intent) self.assertEqual(body, server_body)
def test_create_server_success_case(self): """ :obj:`CreateServer.as_effect`, when it results in a successful create, returns with :obj:`StepResult.RETRY`. """ eff = CreateServer( server_config=freeze({'server': {'flavorRef': '1'}})).as_effect() seq = [ (Func(generate_server_name), lambda _: 'random-name'), (service_request( ServiceType.CLOUD_SERVERS, 'POST', 'servers', data={'server': {'name': 'random-name', 'flavorRef': '1'}}, success_pred=has_code(202), reauth_codes=(401,)).intent, lambda _: (StubResponse(202, {}), {"server": {}})), (Log('request-create-server', ANY), lambda _: None) ] self.assertEqual( perform_sequence(seq, eff), (StepResult.RETRY, [ErrorReason.String('waiting for server to become active')]))