def _check_calls(self, replace): self.mock_eba.assert_called_once_with(matches(IsInstance(Request)), 'replace', True) req = self.mock_eba.call_args[0][0] self.assertEqual(req.uri, self.endpoint + 's1') self.mock_rsfg.assert_called_once_with( matches(IsBoundWith(system='otter.rest.groups.delete_server', tenant_id='11111', scaling_group_id='one', server_id='s1', transaction_id='transaction-id')), 'transaction-id', 's1', replace, self.mock_group, self.mock_state)
def test_invalidate(self): """ The invalidate method causes the next authenticate_tenant call to re-authenticate. """ self.ca.authenticate_tenant(1) self.ca.invalidate(1) self.ca.authenticate_tenant(1) self.auth_function.assert_has_calls([ mock.call(1, log=matches(IsInstance(default_log.__class__))), mock.call(1, log=matches(IsInstance(default_log.__class__)))])
def test_collect_called_again(self): """ `self.collect` is called again based on interval given in config """ s = self._service() s.startService() self.assertTrue(s.running) self.collect.assert_called_once_with( "r", self.config, self.log, client=self.client, authenticator=matches(Provides(IAuthenticator)) ) self.clock.advance(20) self.assertEqual(len(self.collect.mock_calls), 2) self.collect.assert_called_with( "r", self.config, self.log, client=self.client, authenticator=matches(Provides(IAuthenticator)) )
def test_generates_new_audit_log(self): """ The observer generates two logs for an audit-loggable eventDict - the audit log dictionary and a regular log (passes timestamp and hostname too) """ self.wrapper({'message': 'meh', 'audit_log': True, 'time': 1234.0}) self.observer.has_calls([ mock.call(matches(ContainsDict({'message': Equals('meh'), 'audit_log': Equals(True), '@timestamp': Equals(1234), 'host': Equals('hostname')}))), mock.call(matches(ContainsDict({ 'short_message': Equals('meh'), 'audit_log_event_source': Equals(True)}))) ])
def test_merge_effectful_fields_no_log_with_context(self): """ A log is returned with fields from the default otter log and the context when no log is passed. """ result = merge_effectful_fields(self.disp, None) self.assertEqual(result, matches(IsBoundWith(f1='v', system='otter')))
def test_contains_exception_type(self): """ The observer includes "exception_type" if event contains error """ self.wrapper({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'exception_type': Equals("ValueError")})))
def test_isError_sets_level_error(self): """ The observer sets the level to LogLevel.ERROR when isError is true. """ self.wrapper({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'level': Equals(LogLevel.ERROR)})))
def test_failure_repr_in_short_message(self): """ The observer includes the repr of failure.value in short_message. """ self.wrapper({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals((repr(ValueError()), ))})))
def test_cloudfeeds_setup(self): """ Cloud feeds observer is setup if it is there in config """ self.addCleanup(set_fanout, None) self.assertEqual(get_fanout(), None) conf = deepcopy(test_config) conf['cloudfeeds'] = { 'service': 'cloudFeeds', 'tenant_id': 'tid', 'url': 'url' } makeService(conf) serv_confs = get_service_configs(conf) serv_confs[ServiceType.CLOUD_FEEDS] = {'url': 'url'} self.assertEqual(len(get_fanout().subobservers), 1) cf_observer = get_fanout().subobservers[0] self.assertEqual( cf_observer, CloudFeedsObserver(reactor=self.reactor, authenticator=matches( IsInstance(CachingAuthenticator)), tenant_id='tid', region='ord', service_configs=serv_confs)) # single tenant authenticator is created authenticator = cf_observer.authenticator self.assertIsInstance( authenticator._authenticator._authenticator._authenticator, SingleTenantAuthenticator)
def test_execute_webhook(self): """ Execute a webhook by hash returns a 202 """ self.mock_store.webhook_info_by_hash.return_value = defer.succeed( (self.tenant_id, self.group_id, self.policy_id)) self.mock_controller.maybe_execute_scaling_policy.return_value = \ defer.succeed(None) response_body = self.assert_status_code( 202, '/v1.0/execute/1/11111/', 'POST') self.mock_store.get_scaling_group.assert_called_once_with( mock.ANY, self.tenant_id, self.group_id) logargs = dict(tenant_id=self.tenant_id, scaling_group_id=self.group_id, policy_id=self.policy_id, transaction_id='transaction-id', capability_hash='11111', capability_version='1', system='otter.rest.webhooks.execute_webhook') self.mock_controller.modify_and_trigger.assert_called_once_with( "disp", self.mock_group, logargs, mock.ANY, modify_state_reason="execute_webhook") exec_pol = self.mock_controller.maybe_execute_scaling_policy exec_pol.assert_called_once_with( matches(IsBoundWith(**logargs)), 'transaction-id', self.mock_group, self.mock_state, policy_id=self.policy_id ) self.assertEqual(response_body, '')
def test_cloudfeeds_setup(self): """ Cloud feeds observer is setup if it is there in config """ self.addCleanup(set_fanout, None) self.assertEqual(get_fanout(), None) conf = deepcopy(test_config) conf['cloudfeeds'] = {'service': 'cloudFeeds', 'tenant_id': 'tid', 'url': 'url'} makeService(conf) serv_confs = get_service_configs(conf) serv_confs[ServiceType.CLOUD_FEEDS] = {'url': 'url'} self.assertEqual(len(get_fanout().subobservers), 1) cf_observer = get_fanout().subobservers[0] self.assertEqual( cf_observer, CloudFeedsObserver( reactor=self.reactor, authenticator=matches(IsInstance(CachingAuthenticator)), tenant_id='tid', region='ord', service_configs=serv_confs)) # single tenant authenticator is created authenticator = cf_observer.authenticator self.assertIsInstance( authenticator._authenticator._authenticator._authenticator, SingleTenantAuthenticator)
def _assert_delete_scheduled(self): """ Assert that the server was scheduled for deletion. """ self.assertEqual(self.supervisor.del_calls[-1], (matches(IsBoundWith(server_id='s0', system='otter.job.delete')), self.tid, self.group, {'id': 's0'}))
def test_log_none_effectful_fields(self): """ When log is not passed, but there are log fields from BoundFields, the log passed to treq has those fields. """ log = mock_log() # we have to include system='otter' in the expected log here because # the code falls back to otter.log.log, which has the system key bound. expected_log = matches(IsBoundWith(bound='stuff', system='otter')) req = ('GET', 'http://google.com/', None, None, None, { 'log': expected_log }) response = StubResponse(200, {}) treq = StubTreq(reqs=[(req, response)], contents=[(response, "content")]) req = Request(method="get", url="http://google.com/") req.treq = treq req_eff = Effect(req) bound_log_eff = with_log(req_eff, bound='stuff') dispatcher = ComposedDispatcher( [get_simple_dispatcher(None), get_log_dispatcher(log, {})]) self.assertEqual( self.successResultOf(perform(dispatcher, bound_log_eff)), (response, "content"))
def test_filters_clb_types(self): """ Only one CLB step is returned per CLB """ steps = pbag([ AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), # Unoptimizable step CreateServer(server_config=pmap({})), ]) # returned steps could be pbag of any of the 2 lists below depending # on how `one_clb_step` iterates over the steps. Since it is pbag the # order of elements is not guaranteed list1 = [ AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), CreateServer(server_config=pmap({})) ] list2 = [ RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), CreateServer(server_config=pmap({})) ] self.assertEqual( matches(MatchesAny(Equals(pbag(list1)), Equals(pbag(list2)))), optimize_steps(steps))
def test_history_with_one_page_pagination(self): """ The history api endpoint returns the items from the audit log, and paginates them if there are ``limit`` items in the collection, with the marker being the last timestamp """ expected = { 'events': [{ 'event_type': 'event-abc', 'timestamp': 1234567890, 'policy_id': 'policy-xyz', 'scaling_group_id': 'scaling-group-uvw', 'server_id': 'server-rst', 'message': 'audit log event', }], 'events_links': [ {'href': 'http://localhost/v1.0/101010/history?limit=1', 'rel': 'self'}, {'href': 'http://localhost/v1.0/101010/history?limit=1&marker=1234567890', 'rel': 'next'}] } result = self.successResultOf( request(self.root, "GET", self.endpoint + "?limit=1")) self.assertEqual(200, result.response.code) self.assertEqual(expected, json.loads(result.content)) self.treq.get.assert_called_once_with( 'http://dummy/_search', data='{"tenant_id": 101010}', log=matches(IsInstance(self.log.__class__))) self.assertTrue(self.treq.json_content.called) self.make_auditlog_query.assert_called_once_with('101010', 'ord', limit=1)
def test_pending_server_delete(self): """ When a pending job is cancelled, it is deleted from the job list. When the server finishes building, then ``execute_launch_config`` is called to remove the job from pending job list. It then notices that pending job_id is not there in job list and calls ``execute_delete_server`` to delete the server. """ self.supervisor.execute_delete_server.return_value = succeed(None) s = GroupState('tenant', 'group', 'name', {}, {'1': {}}, None, {}, False) def fake_modify_state(callback, *args, **kwargs): callback(self.group, s, *args, **kwargs) self.group.modify_state.side_effect = fake_modify_state supervisor.execute_launch_config(self.log, '1', self.fake_state, 'launch', self.group, 1) s.remove_job('1') self.execute_config_deferreds[0].callback({'id': 's1'}) # first bind is system='otter.job.launch', second is job_id='1' self.del_job.assert_called_once_with( matches(IsInstance(self.log.__class__)), '1', self.group, {'id': 's1'}, self.supervisor) self.del_job.return_value.start.assert_called_once_with()
def test_filters_clb_types(self): """ Only one CLB step is returned per CLB """ steps = pbag([ AddNodesToCLB( lb_id='5', address_configs=s(('1.1.1.1', CLBDescription(lb_id='5', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), # Unoptimizable step CreateServer(server_config=pmap({})), ]) # returned steps could be pbag of any of the 2 lists below depending # on how `one_clb_step` iterates over the steps. Since it is pbag the # order of elements is not guaranteed list1 = [ AddNodesToCLB( lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), CreateServer(server_config=pmap({})) ] list2 = [ RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), CreateServer(server_config=pmap({})) ] self.assertEqual( matches(MatchesAny(Equals(pbag(list1)), Equals(pbag(list2)))), optimize_steps(steps) )
def test_failure_repr_in_short_message(self): """ The observer includes the repr of failure.value in short_message. """ self.wrapper({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals((repr(ValueError()),))})))
def test_empty_message(self): """ Empty message in event is overwritten with failure message """ self.wrapper({'message': (), 'isError': True, 'failure': Failure(ValueError())}) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals(('ValueError()',))})))
def test_isError_with_message_instead_of_failure(self): """ The observer should use message when there is no failure. """ self.wrapper({'message': ('uh oh',), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals(('uh oh',))})))
def test_failure_include_traceback_in_event_dict(self): """ The observer puts the traceback in the ``traceback`` key. """ self.wrapper({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'traceback': Contains('Traceback')})))
def test_isError_with_message_instead_of_failure(self): """ The observer should use message when there is no failure. """ self.wrapper({'message': ('uh oh', ), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals(('uh oh', ))})))
def test_includes_line(self): """ The observer includes line if it is specified. """ self.gelf({'line': 10, 'message': ''}) self.observer.assert_called_once_with( matches(ContainsDict({'line': Equals(10)})))
def test_merge_effectful_fields_log_and_context(self): """ A log is returned with fields from both the passed-in log and the effectful context, with the latter taking precedence. """ log = self.log.bind(f1='v2', passed_log=True) result = merge_effectful_fields(self.disp, log) self.assertEqual(result, matches(IsBoundWith(passed_log=True, f1='v')))
def test_includes_structured_data(self): """ The observer includes arbitrary structured data prefixed with an _. """ self.gelf({'uri': 'http://example.com', 'message': 'hooray'}) self.observer.assert_called_once_with( matches(ContainsDict({'_uri': Equals('http://example.com')})))
def test_includes_file(self): """ The observer includes file if it is specified. """ self.gelf({'message': 'hello', 'file': 'test.py'}) self.observer.assert_called_once_with( matches(ContainsDict({'file': Equals('test.py')})))
def test_failure_include_traceback_in_full_message(self): """ The observer puts the traceback in the full_message key. """ self.gelf({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'full_message': Contains('Traceback')})))
def test_wrapper_listens(self): """ GraylogUDPPublisher does a listenUDP to hook up the publishing protocol. """ self.reactor.listenUDP.assert_called_once_with( 0, matches(IsInstance(_GraylogProtocol)))
def _assert_delete_scheduled(self): """ Assert that the server was scheduled for deletion. """ self.assertEqual( self.supervisor.del_calls[-1], (matches(IsBoundWith(server_id="s0", system="otter.job.delete")), self.tid, self.group, {"id": "s0"}), )
def test_execute_delete_auths(self): """ ``execute_delete_server`` asks the provided authentication function for credentials for the tenant_id that owns the group. """ self.supervisor.execute_delete_server(self.log, "transaction-id", self.group, self.fake_server) self.auth_function.assert_called_once_with( 11111, log=matches(IsBoundWith(tenant_id=11111, server_id="server_id")) )
def test_isError_sets_level_3(self): """ The observer sets the level to 3 (syslog ERROR) when isError is true. """ self.gelf({'failure': Failure(ValueError()), 'isError': True}) self.observer.assert_called_once_with( matches(ContainsDict({'level': Equals(3)})))
def test_calls_auth_function_with_empty_cache(self): """ authenticate_tenant with no items in the cache returns the result of the auth_function passed to the authenticator. """ result = self.successResultOf(self.ca.authenticate_tenant(1, mock.Mock())) self.assertEqual(result, ('auth-token', 'catalog')) self.auth_function.assert_called_once_with( 1, log=matches(IsInstance(mock.Mock)))
def test_start_callbacks_with_job_id(self): """ The deferred returned by start callbacks immediately with just the job ID, without waiting for the `completion_deferred` to fire, and the log is bound """ d = self.job.start('launch') self.assertEqual(self.successResultOf(d), self.job_id) self.assertEqual(self.job.log, matches(IsInstance(self.log.__class__)))
def test_execute_config_auths(self): """ execute_config asks the provided authentication function for credentials for the tenant_id that owns the group. """ self.supervisor.execute_config(self.log, "transaction-id", self.group, self.launch_config) self.auth_function.assert_called_once_with( 11111, log=matches(IsBoundWith(tenant_id=11111, worker="launch_server")) )
def test_make_service(self, mock_ms): """ MetricsService is returned with config """ c = {"a": "v"} s = makeService(c) self.assertIs(s, mock_ms.return_value) from otter.metrics import metrics_log mock_ms.assert_called_once_with(matches(IsInstance(ReactorBase)), c, metrics_log)
def test_make_service(self, mock_ms): """ MetricsService is returned with config """ c = {'a': 'v'} s = makeService(c) self.assertIs(s, mock_ms.return_value) from otter.metrics import metrics_log mock_ms.assert_called_once_with(matches(IsInstance(ReactorBase)), c, metrics_log)
def test_empty_message(self): """ Empty message in event is overwritten with failure message """ self.wrapper({ 'message': (), 'isError': True, 'failure': Failure(ValueError()) }) self.observer.assert_called_once_with( matches(ContainsDict({'message': Equals(('ValueError()', ))})))
def test_collect_called_again(self): """ `self.collect` is called again based on interval given in config """ s = self._service() s.startService() self.assertTrue(s.running) self.collect.assert_called_once_with('r', self.config, self.log, client=self.client, authenticator=matches( Provides(IAuthenticator))) self.clock.advance(20) self.assertEqual(len(self.collect.mock_calls), 2) self.collect.assert_called_with('r', self.config, self.log, client=self.client, authenticator=matches( Provides(IAuthenticator)))
def test_formatting_failure(self): """ PEP3101FormattingWrapper should fall back to using the unformatted message and include an 'exception_formatting_message' key. """ self.wrapper({'message': ('{u"Hello": "There"}', )}) self.observer.assert_called_once_with({ 'message': '{u"Hello": "There"}', 'message_formatting_error': matches(Contains('KeyError')) })
def test_uses_step_request(self): """Steps are converted to requests.""" steps = [ TestStep(Effect(Constant((StepResult.SUCCESS, 'foo')))), TestStep(Effect(Error(RuntimeError('uh oh')))) ] effect = steps_to_effect(steps) self.assertIs(type(effect.intent), ParallelEffects) expected_exc_info = matches(MatchesException(RuntimeError('uh oh'))) self.assertEqual( sync_perform(test_dispatcher(), effect), [(StepResult.SUCCESS, 'foo'), (StepResult.RETRY, [ErrorReason.Exception(expected_exc_info)])])
def test_delete_group_log_context(self): """ When run in an effectful log context, the fields are bound to the log passed to get_scaling_group. """ self.group.delete_group.return_value = succeed('del') expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)), '00', 'g1') result = self.perform_with_group( Effect(DeleteGroup(tenant_id='00', group_id='g1')), expected_lookup, self.group, fallback_dispatcher=get_log_dispatcher(self.log, {'effectful': True})) self.assertEqual(result, 'del')
def test_isError_includes_why_in_short_message(self): """ The observer includes 'why' in the short_message when isError is true. """ self.gelf({ 'failure': Failure(ValueError()), 'isError': True, 'why': 'Everything is terrible.' }) self.observer.assert_called_once_with( matches( ContainsDict( {'short_message': Contains('Everything is terrible.')})))
def test_details(self): """ If exception is serializable, then it is serialized and logged as "error_details" """ class MyException(Exception): pass @serialize_to_jsonable.register(MyException) def _(excp): return 'mine' err = MyException('heh') self.wrapper({'message': (), 'isError': True, 'failure': Failure(err)}) self.observer.assert_called_once_with( matches(ContainsDict({'error_details': Equals('mine')})))
def _assert_create_server_with_errs_has_status(self, exceptions, status): """ Helper function to make a :class:`CreateServer` effect, and resolve it with the provided exceptions, asserting that the result is the provided status, with the reason being the exception. """ eff = CreateServer( server_config=freeze({'server': {'flavorRef': '1'}})).as_effect() eff = resolve_effect(eff, 'random-name') for exc in exceptions: self.assertEqual( resolve_effect(eff, service_request_error_response(exc), is_error=True), (status, [ErrorReason.Exception( matches(ContainsAll([type(exc), exc])))]) )
def test_check_failure(self): """ If a check raises an exception, its health is unhealthy """ checker = HealthChecker(self.clock, {'fail': mock.Mock(side_effect=Exception)}) d = checker.health_check() self.assertEqual( self.successResultOf(d), { 'healthy': False, 'fail': { 'healthy': False, 'details': { 'reason': matches(Contains('Exception')) } } })
def test_remove_nodes_from_clb_terminal_failures(self): """ :obj:`AddNodesToCLB` fails if there are any 4xx errors, then the error is propagated up and the result is a failure. """ terminals = (APIError(code=403, body="You're out of luck."), APIError(code=422, body="Oh look another 422.")) eff = RemoveNodesFromCLB(lb_id='12345', node_ids=pset(['1', '2'])).as_effect() for exc in terminals: seq = SequenceDispatcher([(eff.intent, lambda i: raise_(exc))]) with seq.consume(): self.assertEquals( sync_perform(seq, eff), (StepResult.FAILURE, [ErrorReason.Exception( matches(ContainsAll([type(exc), exc])))]))
def test_get_scaling_group_info_log_context(self): """ When run in an effectful log context, the fields are bound to the log passed to delete_group. """ manifest = {} def view_manifest(with_policies, with_webhooks, get_deleting): return manifest self.group.view_manifest.side_effect = view_manifest eff = Effect(GetScalingGroupInfo(tenant_id='00', group_id='g1')) expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)), '00', 'g1') result = self.perform_with_group( eff, expected_lookup, self.group, fallback_dispatcher=get_log_dispatcher(self.log, {'effectful': True})) self.assertEqual(result, (self.group, manifest))
def test_remove_nodes_from_clb_non_terminal_failures_to_retry(self): """ :obj:`RemoveNodesFromCLB` retries if the CLB is temporarily locked, or if the request was rate-limited, or if there was an API error and the error is unknown but not a 4xx. """ non_terminals = (CLBImmutableError(lb_id=u"12345"), CLBRateLimitError(lb_id=u"12345"), APIError(code=500, body="oops!"), TypeError("You did something wrong in your code.")) eff = RemoveNodesFromCLB(lb_id='12345', node_ids=pset(['1', '2'])).as_effect() for exc in non_terminals: seq = SequenceDispatcher([(eff.intent, lambda i: raise_(exc))]) with seq.consume(): self.assertEquals( sync_perform(seq, eff), (StepResult.RETRY, [ErrorReason.Exception( matches(ContainsAll([type(exc), exc])))]))
def test_add_nodes_to_clb_terminal_failures(self): """ :obj:`AddNodesToCLB` fails if the CLB is not found or deleted, or if there is any other 4xx error, then the error is propagated up and the result is a failure. """ terminals = (CLBNotFoundError(lb_id=u"12345"), CLBDeletedError(lb_id=u"12345"), NoSuchCLBError(lb_id=u"12345"), CLBNodeLimitError(lb_id=u"12345", node_limit=25), APIError(code=403, body="You're out of luck."), APIError(code=422, body="Oh look another 422.")) eff = self._add_one_node_to_clb() for exc in terminals: seq = SequenceDispatcher([(eff.intent, lambda i: raise_(exc))]) with seq.consume(): self.assertEquals( sync_perform(seq, eff), (StepResult.FAILURE, [ErrorReason.Exception( matches(ContainsAll([type(exc), exc])))]))
def test_setup_still_active(self): """ If there are scheduled calls when perform is called, they are cancelled and err is logged. Future calls are scheduled as usual """ self.clock.advance(-0.6) call1 = self.clock.callLater(1, noop, None) call2 = self.clock.callLater(0, noop, None) call3 = self.clock.callLater(2, noop, None) self.clock.advance(0.6) self.s._calls = [call1, call2, call3] self.s.dispatcher = SequenceDispatcher([(("ggtc", "cf"), const(self.groups))]) d = self.s.setup() self.successResultOf(d) self.log.err.assert_called_once_with(matches(IsInstance(RuntimeError)), "selfheal-calls-err", active=2, otter_service="selfheal") self.assertFalse(call1.active()) self.assertFalse(call2.active())