def test_set_logger(self): """ `set_logger` replaces the logger with a new logger """ log_a, log_b = mock_log(), mock_log() a = AuditLogger(log_a) a.set_logger(log_b) a.audit('this is the audit log message') log_b.msg.assert_called_once_with('this is the audit log message', audit_log=True) self.assertFalse(log_a.msg.called)
def setUp(self): """ Mock all the dependencies of SchedulingService. This includes logging, store's fetch_and_delete, TxKazooClient stuff, check_events_in_bucket. """ super(SchedulerServiceTests, self).setUp() otter_log = patch(self, "otter.scheduler.otter_log") self.log = mock_log() otter_log.bind.return_value = self.log def pfactory(log, callable): self.fake_partitioner = FakePartitioner(log, callable) return self.fake_partitioner self.scheduler_service = SchedulerService("disp", 100, self.mock_store, pfactory, threshold=600) otter_log.bind.assert_called_once_with(system="otter.scheduler") self.scheduler_service.running = True self.assertIdentical(self.fake_partitioner, self.scheduler_service.partitioner) self.check_events_in_bucket = patch(self, "otter.scheduler.check_events_in_bucket") self.returns = [] self.setup_func(self.mock_store.get_oldest_event)
def setUp(self): """ mock all the dependencies of SchedulingService that includes logging, store's fetch_and_delete, TxKazooClient stuff, TimerService, check_events_in_bucket and twisted.internet.task.Clock is used to simulate time """ super(SchedulerServiceTests, self).setUp() otter_log = patch(self, 'otter.scheduler.otter_log') self.log = mock_log() otter_log.bind.return_value = self.log self.kz_client = mock.Mock(spec=['SetPartitioner']) self.kz_partition = mock.MagicMock(allocating=False, release=False, failed=False, acquired=False) self.kz_client.SetPartitioner.return_value = self.kz_partition self.zk_partition_path = '/part_path' self.time_boundary = 15 self.buckets = range(1, 10) self.clock = Clock() self.scheduler_service = SchedulerService( 100, 1, self.mock_store, self.kz_client, self.zk_partition_path, self.time_boundary, self.buckets, self.clock, threshold=600) otter_log.bind.assert_called_once_with(system='otter.scheduler') self.timer_service = patch(self, 'otter.scheduler.TimerService') self.check_events_in_bucket = patch(self, 'otter.scheduler.check_events_in_bucket') self.returns = [] self.setup_func(self.mock_store.get_oldest_event)
def get_groups(parsed, store, conf): """ Return groups based on argument provided :param Namespace parsed: arguments parsed :param store: Otter scaling group collection :param dict conf: config :return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict """ log = mock_log() if parsed.group: groups = [g.split(":") for g in parsed.group] return succeed([{ "tenantId": tid, "groupId": gid } for tid, gid in groups]) elif parsed.all: d = store.get_all_valid_groups() elif parsed.tenant_id: d = get_groups_of_tenants(log, store, parsed.tenant_id) elif parsed.disabled_tenants: non_conv_tenants = conf["non-convergence-tenants"] d = store.get_all_valid_groups() d.addCallback( filter(lambda g: g["tenantId"] not in set(non_conv_tenants))) d.addCallback(list) elif parsed.conf_conv_tenants: d = get_groups_of_tenants(log, store, conf["convergence-tenants"]) else: raise SystemExit("Unexpected group selection") return d
def setUp(self): """ Configure test resources. """ self.log = mock_log() self.group = iMock(IScalingGroup) self.group.tenant_id = 11111 self.group.uuid = "group-id" self.region = "ORD" self.auth_tokens = ("auth-token1", "auth-token2", "auth-token3") self._auth_token_queue = list(self.auth_tokens) self.service_catalog = {} self.authenticator = iMock(IAuthenticator) self.auth_function = self.authenticator.authenticate_tenant self.auth_function.side_effect = lambda *a, **kw: succeed((self._auth_token_queue.pop(0), self.service_catalog)) self.fake_server_details = {"server": {"id": "server_id", "links": ["links"], "name": "meh", "metadata": {}}} self.cooperator = mock.Mock(spec=Cooperator) self.service_mapping = { ServiceType.CLOUD_SERVERS: "SUPERVISOR_CS", ServiceType.CLOUD_LOAD_BALANCERS: "SUPERVISOR_CLB", ServiceType.RACKCONNECT_V3: "SUPERVISOR_RCV3", } self.supervisor = SupervisorService( self.authenticator, self.region, self.cooperator.coiterate, self.service_mapping ) self.InMemoryUndoStack = patch(self, "otter.supervisor.InMemoryUndoStack") self.undo = self.InMemoryUndoStack.return_value self.undo.rewind.return_value = succeed(None)
def test_error_per_tenant(self): """ When a request for servers fails, the associated effect results in None, and an error is logged. """ log = mock_log() log.err.return_value = None groups = { "t1": [{ 'tenantId': 't1', 'groupId': 'g1', 'desired': 0 }], "t2": [{ 'tenantId': 't2', 'groupId': 'g2', 'desired': 0 }] } effs = get_all_metrics_effects(groups, log) results = [] for eff in effs: if eff.intent.tenant_id == 't1': results.append(resolve_effect(eff, {})) elif eff.intent.tenant_id == 't2': err = (ZeroDivisionError, ZeroDivisionError('foo bar'), None) results.append(resolve_effect(eff, err, is_error=True)) self.assertEqual( results, [None, [GroupMetrics('t1', 'g1', desired=0, actual=0, pending=0)]]) log.err.assert_called_once_with( CheckFailureValue(ZeroDivisionError('foo bar')))
def setUp(self): """ Mock a fake supervisor, and also a fake log and group. """ self.transaction_id = 'transaction_id' self.job_id = 'job_id' self.log = mock.MagicMock() self.group = iMock(IScalingGroup, tenant_id='tenant', uuid='group') self.state = None self.supervisor = iMock(ISupervisor) self.completion_deferred = Deferred() self.supervisor.execute_config.return_value = succeed( (self.job_id, self.completion_deferred)) def fake_modify_state(f, *args, **kwargs): return maybeDeferred( f, self.group, self.state, *args, **kwargs) self.group.modify_state.side_effect = fake_modify_state self.log = mock_log() self.job = supervisor._Job(self.log, self.transaction_id, self.group, self.supervisor) self.del_job = patch(self, 'otter.supervisor._DeleteJob') self.mock_launch = {'type': 'launch_server', 'args': {'server': {'imageRef': 'imageID', 'flavorRef': '1'}}}
def setUp(self): """ Mock a fake supervisor, and also a fake log and group. """ self.transaction_id = 'transaction_id' self.job_id = 'job_id' patch(self, 'otter.supervisor.generate_job_id', return_value=self.job_id) self.state = GroupState('tenant', 'group', 'name', {}, {}, None, {}, False, ScalingGroupStatus.ACTIVE) self.group = mock_group(self.state, 'tenant', 'group') self.supervisor = iMock(ISupervisor) self.supervisor.deferred_pool = DeferredPool() self.completion_deferred = Deferred() self.supervisor.execute_config.return_value = self.completion_deferred self.log = mock_log() self.job = supervisor._Job(self.log, self.transaction_id, self.group, self.supervisor) self.del_job = patch(self, 'otter.supervisor._DeleteJob') self.mock_launch = {'type': 'launch_server', 'args': {'server': {'imageRef': 'imageID', 'flavorRef': '1'}}}
def _test_setup(self, config, interval): """ SelfHeal function wrapped with locking and logging is setup to call again using TimerService. It is setup on given interval based on config """ clock = Clock() log = mock_log() health_checker = HealthChecker("clock", {}) from otter.tap.api import zk from otter.util.config import config_value selfheal = SelfHeal(clock, base_dispatcher, config_value, interval, log) self.patch( zk, "locked_logged_func", exp_func(self, ("func", "lock"), base_dispatcher, "/selfheallock", log, "selfheal-lock-acquired", selfheal.setup)) self.patch(zk, "create_health_check", exp_func(self, "hc_func", "lock")) svc = setup_selfheal_service(clock, config, base_dispatcher, health_checker, log) self.assertIsInstance(svc, TimerService) self.assertEqual(svc.call, ("func", (), {})) self.assertIs(svc.clock, clock) self.assertIs(health_checker.checks["selfheal"], "hc_func")
def test_get_all_metrics(self): """ Metrics are returned based on the requests done to get server info. """ # Maybe this could use a parameterized "get_all_scaling_group_servers" # call to avoid needing to stub the nova responses, but it seems okay. servers_t1 = { 'g1': ([_server('g1', 'ACTIVE')] * 3 + [_server('g1', 'BUILD')] * 2), 'g2': [_server('g2', 'ACTIVE')]} servers_t2 = { 'g4': [_server('g4', 'ACTIVE'), _server('g4', 'BUILD')]} groups = { "t1": [{'tenantId': 't1', 'groupId': 'g1', 'desired': 3}, {'tenantId': 't1', 'groupId': 'g2', 'desired': 4}], "t2": [{'tenantId': 't2', 'groupId': 'g4', 'desired': 2}]} tenant_servers = {'t1': servers_t1, 't2': servers_t2} effs = get_all_metrics_effects(groups, mock_log()) # All the effs are wrapped in TenantScopes to indicate the tenant # of ServiceRequests made under them. We use that tenant to get the # stubbed result of get_all_scaling_group_servers. results = [ resolve_effect(eff, tenant_servers[eff.intent.tenant_id]) for eff in effs] self.assertEqual( set(reduce(operator.add, results)), set([GroupMetrics('t1', 'g1', desired=3, actual=3, pending=2), GroupMetrics('t1', 'g2', desired=4, actual=1, pending=0), GroupMetrics('t2', 'g4', desired=2, actual=1, pending=1)]))
def _test_setup(self, config, interval): """ SelfHeal function wrapped with locking and logging is setup to call again using TimerService. It is setup on given interval based on config """ clock = Clock() log = mock_log() health_checker = HealthChecker("clock", {}) from otter.tap.api import zk from otter.util.config import config_value selfheal = SelfHeal(clock, base_dispatcher, config_value, interval, log) self.patch( zk, "locked_logged_func", exp_func(self, ("func", "lock"), base_dispatcher, "/selfheallock", log, "selfheal-lock-acquired", selfheal.setup)) self.patch(zk, "create_health_check", exp_func(self, "hc_func", "lock")) svc = setup_selfheal_service( clock, config, base_dispatcher, health_checker, log) self.assertIsInstance(svc, TimerService) self.assertEqual(svc.call, ("func", (), {})) self.assertIs(svc.clock, clock) self.assertIs(health_checker.checks["selfheal"], "hc_func")
def setUp(self): self.clock = Clock() self.log = mock_log() self.disp = ComposedDispatcher([ get_msg_time_dispatcher(self.clock), get_log_dispatcher(self.log, {}) ])
def test_merge_effectful_fields_no_context(self): """ The given log is returned unmodified when there's no effectful context. """ log = mock_log() result = merge_effectful_fields(base_dispatcher, log) self.assertIs(result, log)
def test_added(self): """ total desired, pending and actual are added to cloud metrics """ metrics = [GroupMetrics('t1', 'g1', 3, 2, 0), GroupMetrics('t2', 'g1', 4, 4, 1), GroupMetrics('t2', 'g', 100, 20, 0)] m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60} md = merge(m, {'metricValue': 107, 'metricName': 'ord.desired'}) ma = merge(m, {'metricValue': 26, 'metricName': 'ord.actual'}) mp = merge(m, {'metricValue': 1, 'metricName': 'ord.pending'}) mt = merge(m, {'metricValue': 2, 'metricName': 'ord.tenants'}) mg = merge(m, {'metricValue': 3, 'metricName': 'ord.groups'}) mt1d = merge(m, {'metricValue': 3, 'metricName': 'ord.t1.desired'}) mt1a = merge(m, {'metricValue': 2, 'metricName': 'ord.t1.actual'}) mt1p = merge(m, {'metricValue': 0, 'metricName': 'ord.t1.pending'}) mt2d = merge(m, {'metricValue': 104, 'metricName': 'ord.t2.desired'}) mt2a = merge(m, {'metricValue': 24, 'metricName': 'ord.t2.actual'}) mt2p = merge(m, {'metricValue': 1, 'metricName': 'ord.t2.pending'}) req_data = [md, ma, mp, mt, mg, mt1d, mt1a, mt1p, mt2d, mt2a, mt2p] log = mock_log() seq = [ (Func(time.time), const(100)), (service_request( ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest", data=req_data, log=log).intent, noop) ] eff = add_to_cloud_metrics(m['ttlInSeconds'], 'ord', metrics, 2, log) self.assertIsNone(perform_sequence(seq, eff)) log.msg.assert_called_once_with( 'total desired: {td}, total_actual: {ta}, total pending: {tp}', td=107, ta=26, tp=1)
def test_log_none_effectful_fields(self): """ When log is not passed, but there are log fields from BoundFields, the log passed to treq has those fields. """ log = mock_log() # we have to include system='otter' in the expected log here because # the code falls back to otter.log.log, which has the system key bound. expected_log = matches(IsBoundWith(bound='stuff', system='otter')) req = ('GET', 'http://google.com/', None, None, None, { 'log': expected_log }) response = StubResponse(200, {}) treq = StubTreq(reqs=[(req, response)], contents=[(response, "content")]) req = Request(method="get", url="http://google.com/") req.treq = treq req_eff = Effect(req) bound_log_eff = with_log(req_eff, bound='stuff') dispatcher = ComposedDispatcher( [get_simple_dispatcher(None), get_log_dispatcher(log, {})]) self.assertEqual( self.successResultOf(perform(dispatcher, bound_log_eff)), (response, "content"))
def setUp(self): """ Setup fake supervisor and fake job """ self.supervisor = FakeSupervisor() set_supervisor(self.supervisor) self.addCleanup(set_supervisor, None) self.log = mock_log() self.jobs = [] class FakeJob(object): def __init__(jself, *args): jself.args = args jself.job_id = len(self.jobs) + 10 self.jobs.append(jself) def start(jself, launch): jself.launch = launch jself.d = Deferred() return jself.d patch(self, 'otter.supervisor._Job', new=FakeJob) self.state = GroupState('t', 'g', 'n', {}, {}, *range(3))
def setUp(self): """Set an elastic search config var.""" super(OtterHistoryTestCase, self).setUp() self.root = Otter(None, 'ord', es_host='http://dummy').app.resource() set_config_data({ 'limits': {'pagination': 20}, 'url_root': 'http://localhost'}) self.addCleanup(set_config_data, {}) self.log = patch(self, 'otter.rest.history.log', new=mock_log()) self.make_auditlog_query = patch( self, 'otter.rest.history.make_auditlog_query', return_value={'tenant_id': 101010}) self.treq = patch(self, 'otter.rest.history.treq', new=mock_treq( code=200, method='get', json_content={ 'hits': { 'hits': [{ '_source': { 'message': 'audit log event', 'event_type': 'event-abc', '@timestamp': 1234567890, 'policy_id': 'policy-xyz', 'scaling_group_id': 'scaling-group-uvw', 'server_id': 'server-rst', 'throwaway_key': 'ignore me!!!!' } }] } }))
def get_groups(parsed, store, conf): """ Return groups based on argument provided :param Namespace parsed: arguments parsed :param store: Otter scaling group collection :param dict conf: config :return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict """ log = mock_log() if parsed.group: groups = [g.split(":") for g in parsed.group] return succeed( [{"tenantId": tid, "groupId": gid} for tid, gid in groups]) elif parsed.all: d = store.get_all_valid_groups() elif parsed.tenant_id: d = get_groups_of_tenants(log, store, parsed.tenant_id) elif parsed.disabled_tenants: non_conv_tenants = conf["non-convergence-tenants"] d = store.get_all_valid_groups() d.addCallback( filter(lambda g: g["tenantId"] not in set(non_conv_tenants))) d.addCallback(list) elif parsed.conf_conv_tenants: d = get_groups_of_tenants(log, store, conf["convergence-tenants"]) else: raise SystemExit("Unexpected group selection") return d
def test_update_stack(self): """ update_stack PUTs data to the stack endpoint and returns the parsed JSON result. """ log = mock_log() treq = self._treq(code=202, method='put', json_content={'hello': 'world'}) client = HeatClient('my-auth-token', log, treq) result = client.update_stack( 'http://heat-url/my-stack', {'p1': 'v1'}, 60, 'my template') treq.put.assert_called_once_with( 'http://heat-url/my-stack', headers={'x-auth-token': ['my-auth-token'], 'content-type': ['application/json'], 'accept': ['application/json'], 'User-Agent': ['OtterScale/0.0']}, data=json.dumps({ 'parameters': {'p1': 'v1'}, 'timeout_mins': 60, 'template': 'my template'}), log=mock.ANY) self.assertEqual(self.successResultOf(result), {'hello': 'world'}) self._assert_bound(log, treq.put.mock_calls[-1][2]['log'], system='heatclient', event='update-stack')
def setUp(self): """ Setup fake supervisor and fake job """ self.supervisor = FakeSupervisor() set_supervisor(self.supervisor) self.addCleanup(set_supervisor, None) self.log = mock_log() self.jobs = [] class FakeJob(object): def __init__(jself, *args): jself.args = args jself.job_id = len(self.jobs) + 10 self.jobs.append(jself) def start(jself, launch): jself.launch = launch jself.d = Deferred() return jself.d patch(self, "otter.supervisor._Job", new=FakeJob) self.state = GroupState("t", "g", "n", {}, {}, 0, 1, 2, ScalingGroupStatus.ACTIVE)
def test_job_completed(self): """ `_job_completed` audit logs a successful deletion """ log = self.job.log = mock_log() self.job._job_completed("ignore") log.msg.assert_called_with("Server deleted.", audit_log=True, event_type="server.delete")
def setUp(self): """ Mock all the dependencies of SchedulingService. This includes logging, store's fetch_and_delete, TxKazooClient stuff, check_events_in_bucket. """ super(SchedulerServiceTests, self).setUp() otter_log = patch(self, 'otter.scheduler.otter_log') self.log = mock_log() otter_log.bind.return_value = self.log def pfactory(log, callable): self.fake_partitioner = FakePartitioner(log, callable) return self.fake_partitioner self.scheduler_service = SchedulerService("disp", 100, self.mock_store, pfactory, threshold=600) otter_log.bind.assert_called_once_with(system='otter.scheduler') self.scheduler_service.running = True self.assertIdentical(self.fake_partitioner, self.scheduler_service.partitioner) self.check_events_in_bucket = patch( self, 'otter.scheduler.check_events_in_bucket') self.returns = [] self.setup_func(self.mock_store.get_oldest_event)
def setUp(self): """ Sample group, collection and dispatcher """ self.log = mock_log().bind(base_log=True) self.state = GroupState('tid', 'gid', 'g', {}, {}, None, {}, True, ScalingGroupStatus.ACTIVE) self.group = mock_group(self.state)
def test_get_stack(self): """get_stack performs a GET on the given stack URL.""" log = mock_log() treq = self._treq(code=200, method='get', json_content={'hello': 'world'}) client = HeatClient('my-auth-token', log, treq) result = client.get_stack('http://heat-url/my-stack') self.assertEqual(self.successResultOf(result), {'hello': 'world'})
def setUp(self): """ Mock reactor, log and method """ self.lock = LockMixin().mock_lock() self.method = mock.Mock(return_value=succeed('result')) self.reactor = Clock() self.log = mock_log()
def set_desired_to_actual(groups, reactor, store, cass_client, authenticator, conf): dispatcher = get_full_dispatcher( reactor, authenticator, mock_log(), get_service_configs(conf), "kzclient", store, "supervisor", cass_client) return gatherResults( map(partial(set_desired_to_actual_group, dispatcher, cass_client), groups))
def setUp(self): """ Mock treq """ self.log = mock_log() self.treq = patch(self, "otter.worker.validate_config.treq", new=mock_treq(code=200, method="get")) patch(self, "otter.util.http.treq", new=self.treq) self.headers = {"content-type": ["application/json"], "accept": ["application/json"]}
def setUp(self): """ Mock store.add_cron_events and next_cron_occurrence. """ super(AddCronEventsTests, self).setUp() self.mock_store.add_cron_events.return_value = defer.succeed(None) self.next_cron_occurrence = patch(self, "otter.scheduler.next_cron_occurrence", return_value="next") self.log = mock_log()
def setUp(self): """ mock all the dependencies of SchedulingService that includes cass store, store's fetch and delete events methods, scaling group on which controller will execute scaling policy. Hence, controller.maybe_execute_scaling_policy. twisted.internet.task.Clock is used to simulate time """ self.mock_store = iMock(IScalingGroupCollection, IScalingScheduleCollection) self.mock_group = iMock(IScalingGroup) self.mock_store.get_scaling_group.return_value = self.mock_group self.returns = [None] def _responses(*args): result = self.returns.pop(0) if isinstance(result, Exception): return defer.fail(result) return defer.succeed(result) self.mock_store.fetch_batch_of_events.side_effect = _responses self.mock_store.update_delete_events.return_value = defer.succeed(None) self.mock_generate_transaction_id = patch( self, "otter.scheduler.generate_transaction_id", return_value="transaction-id" ) # mock out modify state self.mock_state = mock.MagicMock(spec=[]) # so nothing can call it def _mock_modify_state(modifier, *args, **kwargs): modifier(self.mock_group, self.mock_state, *args, **kwargs) return defer.succeed(None) self.mock_group.modify_state.side_effect = _mock_modify_state self.maybe_exec_policy = patch(self, "otter.scheduler.maybe_execute_scaling_policy") def _mock_with_lock(lock, func, *args, **kwargs): return defer.maybeDeferred(func, *args, **kwargs) self.mock_lock = patch(self, "otter.scheduler.BasicLock") self.mock_with_lock = patch(self, "otter.scheduler.with_lock") self.mock_with_lock.side_effect = _mock_with_lock self.slv_client = mock.MagicMock() otter_log = patch(self, "otter.scheduler.otter_log") self.log = mock_log() otter_log.bind.return_value = self.log self.clock = Clock() self.scheduler_service = SchedulerService(100, 1, self.slv_client, self.mock_store, self.clock) otter_log.bind.assert_called_once_with(system="otter.scheduler") self.next_cron_occurrence = patch(self, "otter.scheduler.next_cron_occurrence") self.next_cron_occurrence.return_value = "newtrigger"
def test_logs_the_message(self): """ Whatever message is passed to 'audit' is logged as the message """ log = mock_log() a = AuditLogger(log) a.audit('this is the audit log message') log.msg.assert_called_once_with('this is the audit log message', audit_log=True)
def test_get_stack_error(self): """Non-200 codes from getting a stack are considered an APIError.""" log = mock_log() treq = self._treq(code=201, method='get', json_content={'hello': 'world'}) client = HeatClient('my-auth-token', log, treq) result = client.get_stack('http://heat-url/my-stack') failure = self.failureResultOf(result) failure.trap(APIError)
def groups_steps(groups, reactor, store, cass_client, authenticator, conf): """ Return [(group, steps)] list """ eff = parallel(map(group_steps, groups)) disp = get_full_dispatcher( reactor, authenticator, mock_log(), get_service_configs(conf), "kzclient", store, "supervisor", cass_client) return perform(disp, eff).addCallback(lambda steps: zip(groups, steps))
def setUp(self): """ Mock store.add_cron_events and next_cron_occurrence. """ super(AddCronEventsTests, self).setUp() self.mock_store.add_cron_events.return_value = defer.succeed(None) self.next_cron_occurrence = patch( self, 'otter.scheduler.next_cron_occurrence', return_value='next') self.log = mock_log()
def test_added(self): """ total desired, pending and actual are added to cloud metrics """ metrics = [ GroupMetrics('t1', 'g1', 3, 2, 0), GroupMetrics('t2', 'g1', 4, 4, 1), GroupMetrics('t2', 'g', 100, 20, 0), GroupMetrics('t3', 'g3', 5, 3, 0) ] config = {"non-convergence-tenants": ["t1"]} m = {'collectionTime': 100000, 'ttlInSeconds': 5 * 24 * 60 * 60} md = merge(m, {'metricValue': 112, 'metricName': 'ord.desired'}) ma = merge(m, {'metricValue': 29, 'metricName': 'ord.actual'}) mp = merge(m, {'metricValue': 1, 'metricName': 'ord.pending'}) mt = merge(m, {'metricValue': 3, 'metricName': 'ord.tenants'}) mg = merge(m, {'metricValue': 4, 'metricName': 'ord.groups'}) mt1d = merge(m, {'metricValue': 3, 'metricName': 'ord.t1.desired'}) mt1a = merge(m, {'metricValue': 2, 'metricName': 'ord.t1.actual'}) mt1p = merge(m, {'metricValue': 0, 'metricName': 'ord.t1.pending'}) mt2d = merge(m, {'metricValue': 104, 'metricName': 'ord.t2.desired'}) mt2a = merge(m, {'metricValue': 24, 'metricName': 'ord.t2.actual'}) mt2p = merge(m, {'metricValue': 1, 'metricName': 'ord.t2.pending'}) mt3d = merge(m, {'metricValue': 5, 'metricName': 'ord.t3.desired'}) mt3a = merge(m, {'metricValue': 3, 'metricName': 'ord.t3.actual'}) mt3p = merge(m, {'metricValue': 0, 'metricName': 'ord.t3.pending'}) cd = merge(m, {'metricValue': 109, 'metricName': 'ord.conv_desired'}) ca = merge(m, {'metricValue': 27, 'metricName': 'ord.conv_actual'}) cdiv = merge(m, { 'metricValue': 82, 'metricName': 'ord.conv_divergence' }) req_data = [ md, ma, mp, mt, mg, mt1d, mt1a, mt1p, mt2d, mt2a, mt2p, mt3d, mt3a, mt3p, cd, ca, cdiv ] log = mock_log() seq = [(Func(time.time), const(100)), (service_request(ServiceType.CLOUD_METRICS_INGEST, "POST", "ingest", data=req_data, log=log).intent, noop)] eff = add_to_cloud_metrics( m['ttlInSeconds'], 'ord', metrics, 3, # number of tenants config, log) self.assertIsNone(perform_sequence(seq, eff)) log.msg.assert_called_once_with( 'total desired: {td}, total_actual: {ta}, total pending: {tp}', td=112, ta=29, tp=1)
def groups_steps(groups, reactor, store, cass_client, authenticator, conf): """ Return [(group, steps)] list """ eff = parallel(map(group_steps, groups)) disp = get_full_dispatcher(reactor, authenticator, mock_log(), get_service_configs(conf), "kzclient", store, "supervisor", cass_client) return perform(disp, eff).addCallback(lambda steps: zip(groups, steps))
def setUp(self): """ Set up some mocks. """ set_config_data(fake_config) self.addCleanup(set_config_data, {}) self.log = mock_log() self.treq = patch(self, 'otter.worker.launch_server_v1.treq') patch(self, 'otter.util.http.treq', new=self.treq)
def setUp(self): self.clock = Clock() self.log = mock_log() self.patch(sh, "get_groups_to_converge", intent_func("ggtc")) self.patch(sh, "check_and_trigger", lambda t, g: t + g) self.s = sh.SelfHeal(self.clock, base_dispatcher, "cf", 300.0, self.log) self.groups = [ {"tenantId": "t{}".format(i), "groupId": "g{}".format(i)} for i in range(5)]
def setUp(self): self.clock = Clock() self.log = mock_log() self.patch(sh, "get_groups_to_converge", intent_func("ggtc")) self.patch(sh, "check_and_trigger", lambda t, g: t + g) self.s = sh.SelfHeal(self.clock, base_dispatcher, "cf", 300.0, self.log) self.groups = [{ "tenantId": "t{}".format(i), "groupId": "g{}".format(i) } for i in range(5)]
def setUp(self): """ Mock `execute_event` and `add_cron_events` """ super(ProcessEventsTests, self).setUp() self.execute_event = patch(self, 'otter.scheduler.execute_event', return_value=defer.succeed(None)) self.add_cron_events = patch( self, 'otter.scheduler.add_cron_events', side_effect=lambda store, log, events, deleted_policy_ids: defer.succeed(events)) self.log = mock_log()
def setUp(self): # noqa """ Building sample observer """ self.reactor = object() self.authenticator = object() self.service_configs = {'service': 'configs'} self.log = mock_log() self.make_cf = partial( CloudFeedsObserver, reactor=self.reactor, authenticator=self.authenticator, tenant_id='tid', region='ord', service_configs=self.service_configs, log=self.log)
def test_get_all_metrics(self): """ Metrics are returned based on the requests done to get server info. """ # Maybe this could use a parameterized "get_all_scaling_group_servers" # call to avoid needing to stub the nova responses, but it seems okay. servers_t1 = { 'g1': ([_server('g1', 'ACTIVE')] * 3 + [_server('g1', 'BUILD')] * 2), 'g2': [_server('g2', 'ACTIVE')] } servers_t2 = {'g4': [_server('g4', 'ACTIVE'), _server('g4', 'BUILD')]} groups = { "t1": [{ 'tenantId': 't1', 'groupId': 'g1', 'desired': 3 }, { 'tenantId': 't1', 'groupId': 'g2', 'desired': 4 }], "t2": [{ 'tenantId': 't2', 'groupId': 'g4', 'desired': 2 }] } tenant_servers = {'t1': servers_t1, 't2': servers_t2} effs = get_all_metrics_effects(groups, mock_log()) # All the effs are wrapped in TenantScopes to indicate the tenant # of ServiceRequests made under them. We use that tenant to get the # stubbed result of get_all_scaling_group_servers. results = [ resolve_effect(eff, tenant_servers[eff.intent.tenant_id]) for eff in effs ] self.assertEqual( set(reduce(operator.add, results)), set([ GroupMetrics('t1', 'g1', desired=3, actual=3, pending=2), GroupMetrics('t1', 'g2', desired=4, actual=1, pending=0), GroupMetrics('t2', 'g4', desired=2, actual=1, pending=1) ]))
def test_add_new_params(self): """ Add updates the parameters without completely destroying the old parameters """ log = mock_log() a = AuditLogger(log) a.add(**{'gangnam': 'style', '_with': 'hammer'}) a.add(**{'_with': 'psy', 'extra': 'keyword'}) a.audit('') log.msg.assert_called_once_with('', gangnam='style', _with='psy', extra='keyword', audit_log=True)
def setUp(self): """ Mock execution of scaling policy. """ super(ExecuteEventTests, self).setUp() self.mock_group = iMock(IScalingGroup) self.mock_store.get_scaling_group.return_value = self.mock_group # mock out modify_and_trigger self.mock_mt = patch(self, "otter.scheduler.modify_and_trigger") self.new_state = None def _set_new_state(new_state): self.new_state = new_state def _mock_modify_trigger(disp, group, logargs, modifier, modify_state_reason=None, *args, **kwargs): self.assertEqual(disp, "disp") d = modifier(group, "state", *args, **kwargs) return d.addCallback(_set_new_state) self.mock_mt.side_effect = _mock_modify_trigger self.maybe_exec_policy = patch( self, 'otter.scheduler.maybe_execute_scaling_policy', return_value=defer.succeed('newstate')) self.log = mock_log() self.log_args = { 'tenant_id': '1234', 'scaling_group_id': 'scal44', 'policy_id': 'pol44', "scheduled_time": "1970-01-01T00:00:00Z" } self.event = { 'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44', 'trigger': datetime(1970, 1, 1), 'cron': '*', 'bucket': 1, 'version': 'v2' }
def setUp(self): """ Mock `execute_event` and `add_cron_events`. """ super(ProcessEventsTests, self).setUp() self.execute_event = patch(self, 'otter.scheduler.execute_event', return_value=defer.succeed(None)) def fake_add_cron_events(store, log, events, deleted_policy_ids): return defer.succeed(events) self.add_cron_events = patch(self, 'otter.scheduler.add_cron_events', side_effect=fake_add_cron_events) self.log = mock_log()
def test_auth_me_waits(self): """ _auth_me is called only once if it is called again while its previous call has not returned """ aud = Deferred() self.authenticate_user.side_effect = lambda *a, **k: aud log = mock_log() self.ia._auth_me(log=log) self.ia._auth_me(log=log) self.assertEqual(len(self.authenticate_user.mock_calls), 1) log.msg.assert_called_once_with('Getting new identity admin token') aud.callback({'access': {'token': {'id': 'auth-token'}}}) self.assertEqual(len(self.authenticate_user.mock_calls), 1) self.assertEqual(self.ia._token, 'auth-token')
def setUp(self): """ Set up test dependencies. """ self.log = mock_log() set_config_data(fake_config) self.addCleanup(set_config_data, {}) self.treq = patch(self, 'otter.worker.launch_server_v1.treq') patch(self, 'otter.util.http.treq', new=self.treq) self.generate_server_name = patch( self, 'otter.worker.launch_server_v1.generate_server_name') self.generate_server_name.return_value = 'as000000' self.scaling_group_uuid = '1111111-11111-11111-11111111' self.scaling_group = mock.Mock(uuid=self.scaling_group_uuid) self.undo = iMock(IUndoStack)
def setUp(self): """ Shortcut by mocking all the helper functions that do IO. """ self.authenticate_user = patch(self, 'otter.auth.authenticate_user') self.authenticate_user.side_effect = lambda *a, **kw: succeed({ 'access': { 'token': { 'id': 'auth-token' }, 'serviceCatalog': fake_service_catalog } }) self.url = 'http://identity/v2.0' self.user = '******' self.password = '******' self.st = SingleTenantAuthenticator(self.user, self.password, self.url) self.log = mock_log()
def setUp(self): """ Mock reactor, log and method """ self.lock = LockMixin().mock_lock() self.acquire_d, self.release_d = Deferred(), Deferred() self.lock.acquire.side_effect = lambda: self.acquire_d self.lock.release.side_effect = lambda: self.release_d self.method_d = Deferred() self.method = mock.Mock(return_value=self.method_d) self.reactor = Clock() self.log = mock_log() self.log_fields = {'lock': self.lock, 'locked_func': self.method} # This is shared between multiple tests, and used in negative # assertions. Centralizing the definition means that if the message # changes the negative assertions will also be updated. self.too_long_message = "Lock held for more than 120 seconds!"
def setUp(self): self.clock = Clock() self.kz_client = mock.Mock(spec=['SetPartitioner']) self.kz_partitioner = mock.MagicMock(allocating=False, release=False, failed=False, acquired=False) self.kz_client.SetPartitioner.return_value = self.kz_partitioner self.path = '/the-part-path' self.buckets = range(5) self.log = mock_log() self.time_boundary = 30 self.buckets_received = [] self.partitioner = Partitioner(self.kz_client, 10, self.path, self.buckets, self.time_boundary, self.log, self.buckets_received.append, clock=self.clock)
def setUp(self): """ Mock cass connection and authenticator """ self.client = mock.Mock(spec=['disconnect']) self.client.disconnect.return_value = succeed('disconnected') self.mock_ccs = patch(self, 'otter.metrics.connect_cass_servers', return_value=self.client) self.mock_cm = patch(self, 'otter.metrics.collect_metrics', return_value=succeed(None)) self.config = { 'cassandra': 'c', 'identity': identity_config, 'metrics': { 'interval': 20 } } self.log = mock_log() self.clock = Clock()
def setUp(self): """ Set up some mocks. """ self.log = mock_log() self.clock = Clock() configuration = {} for method in ('request', 'head', 'get', 'put', 'patch', 'post', 'delete'): configuration['{0}.__name__'.format(method)] = method configuration['{0}.return_value'.format(method)] = Deferred() self.treq = mock.MagicMock(spec=treq, **configuration) self.response = mock.MagicMock(code=204, headers={'1': '2'}) patch(self, 'otter.util.logging_treq.treq', self.treq) patch(self, 'otter.util.logging_treq.uuid4', mock.MagicMock(spec=[], return_value='uuid')) self.url = 'myurl'
def test_log_effectful_fields(self): """ The log passed to treq is bound with the fields from BoundFields. """ log = mock_log().bind(duplicate='should be overridden') expected_log = matches( IsBoundWith(duplicate='effectful', bound='stuff')) req = ('GET', 'http://google.com/', None, None, None, { 'log': expected_log }) response = StubResponse(200, {}) treq = StubTreq(reqs=[(req, response)], contents=[(response, "content")]) req = Request(method="get", url="http://google.com/", log=log) req.treq = treq req_eff = Effect(req) bound_log_eff = with_log(req_eff, bound='stuff', duplicate='effectful') dispatcher = ComposedDispatcher( [get_simple_dispatcher(None), get_log_dispatcher(log, {})]) self.assertEqual( self.successResultOf(perform(dispatcher, bound_log_eff)), (response, "content"))
class FakeApp(object): log = mock_log() @auditable('event_type', 'my message') def handler(inner_self, request, audit_logger): return 'yay'
class FakeApp(object): log = mock_log() @auditable('event_type', 'my message') def handler(inner_self, request, audit_logger): raise ValueError('no logs!')