def setUp(self): """ Setup the mocks """ set_config_data({'limits': {'absolute': {'maxGroups': 10, 'maxWebhooksPerPolicy': 10, 'maxPoliciesPerGroup': 10}}}) self.addCleanup(set_config_data, {}) self.collection = MockScalingGroupCollection() self.tenant_id = 'goo1234' self.config = { 'name': 'blah', 'cooldown': 600, 'minEntities': 0, 'maxEntities': 10, 'metadata': {} } self.launch = group_examples.launch_server_config()[1] self.mock_log = mock.MagicMock() self.counter = 0 def generate_uuid(): self.counter += 1 return self.counter self.mock_uuid = patch(self, 'otter.models.mock.uuid4', side_effect=generate_uuid)
def setUp(self): """ Setup sample collection """ self.coll = [{'id': '23'}, {'id': '567'}, {'id': '3444'}] set_config_data({'url_root': 'http://localhost'}) self.addCleanup(set_config_data, {})
def setUp(self): """ Mock public_endpoint_url and set_config_data """ set_config_data({'cloudServersOpenStack': 'cloud'}) self.public_endpoint_url = patch(self, 'otter.worker.validate_config.public_endpoint_url', return_value='http://service')
def setUp(self): """ Set the Cassandra store, and also patch the controller """ keyspace.resume() self.root = Otter(store, 'ord').app.resource() set_config_data(limits) self.addCleanup(set_config_data, {}) self.config = config()[0] self.config['minEntities'] = 0 self.active_pending_etc = (self.config['name'], {}, {}, 'date', {}, False) # patch both the config and the groups self.mock_controller = patch(self, 'otter.rest.configs.controller', spec=['obey_config_change']) patch(self, 'otter.rest.groups.controller', new=self.mock_controller) # Patch supervisor supervisor = mock.Mock(spec=['validate_launch_config']) supervisor.validate_launch_config.return_value = defer.succeed(None) set_supervisor(supervisor) def _mock_obey_config_change(log, trans, config, group, state, launch_config): return defer.succeed(GroupState( state.tenant_id, state.group_id, *self.active_pending_etc)) self.mock_controller.obey_config_change.side_effect = _mock_obey_config_change store.kz_client = mock.Mock(Lock=self.mock_lock())
def setUp(self): """ Mock the interface :return: None """ self.mock_store = iMock(IScalingGroupCollection) self.mock_group = iMock(IScalingGroup) self.mock_store.get_scaling_group.return_value = self.mock_group self.mock_generate_transaction_id = patch( self, "otter.rest.decorators.generate_transaction_id", return_value="transaction-id" ) # mock out modify state self.mock_state = mock.MagicMock(spec=[]) # so nothing can call it def _mock_modify_state(modifier, modify_state_reason=None, *args, **kwargs): return defer.maybeDeferred(modifier, self.mock_group, self.mock_state, *args, **kwargs) self.mock_group.modify_state.side_effect = _mock_modify_state self.otter = Otter(self.mock_store, "ord") self.root = self.otter.app.resource() # set pagination limits as it'll be used by all rest interfaces set_config_data({"limits": {"pagination": 100}, "url_root": ""}) self.addCleanup(set_config_data, {})
def setUp(self): """ Replace the store every time with a clean one. """ store = MockScalingGroupCollection() self.root = Otter(store).app.resource() set_config_data({'url_root': 'http://127.0.0.1', 'limits': {'pagination': 5}}) self.addCleanup(set_config_data, {}) self.config = config()[1] self.config['minEntities'] = 0 self.active_pending_etc = ({}, {}, 'date', {}, False) # patch both the config and the groups self.mock_controller = patch(self, 'otter.rest.configs.controller', spec=['obey_config_change']) patch(self, 'otter.rest.groups.controller', new=self.mock_controller) # Patch supervisor supervisor = mock.Mock(spec=['validate_launch_config']) supervisor.validate_launch_config.return_value = defer.succeed(None) set_supervisor(supervisor) def _mock_obey_config_change(log, trans, config, group, state): return defer.succeed(GroupState( state.tenant_id, state.group_id, state.group_name, *self.active_pending_etc)) self.mock_controller.obey_config_change.side_effect = _mock_obey_config_change
def _test_throttle(self, cfg_name, stype, method): """Test a specific throttling configuration.""" locks = WeakLocks() set_config_data( {'cloud_client': {'throttling': {cfg_name: 500}}}) self.addCleanup(set_config_data, {}) clock = Clock() bracket = _default_throttler(locks, clock, stype, method, 'tenant1') if bracket is None: self.fail("No throttler for %s and %s" % (stype, method)) d = bracket(lambda: 'foo') clock.advance(499) self.assertNoResult(d) clock.advance(500) self.assertEqual(self.successResultOf(d), 'foo') # also make sure that the lock is shared between different calls to the # throttler. bracket1 = _default_throttler(locks, clock, stype, method, 'tenant1') result1 = bracket1(lambda: 'bar1') bracket2 = _default_throttler(locks, clock, stype, method, 'tenant1') result2 = bracket2(lambda: 'bar2') clock.advance(499) self.assertNoResult(result1) self.assertNoResult(result2) clock.advance(1) self.assertEqual(self.successResultOf(result1), 'bar1') self.assertNoResult(result2) clock.advance(500) self.assertEqual(self.successResultOf(result2), 'bar2')
def _test_throttle(self, cfg_name, stype, method): """Test a specific throttling configuration.""" locks = WeakLocks() set_config_data({'cloud_client': {'throttling': {cfg_name: 500}}}) self.addCleanup(set_config_data, {}) clock = Clock() bracket = _default_throttler(locks, clock, stype, method, 'tenant1') if bracket is None: self.fail("No throttler for %s and %s" % (stype, method)) d = bracket(lambda: 'foo') clock.advance(499) self.assertNoResult(d) clock.advance(500) self.assertEqual(self.successResultOf(d), 'foo') # also make sure that the lock is shared between different calls to the # throttler. bracket1 = _default_throttler(locks, clock, stype, method, 'tenant1') result1 = bracket1(lambda: 'bar1') bracket2 = _default_throttler(locks, clock, stype, method, 'tenant1') result2 = bracket2(lambda: 'bar2') clock.advance(499) self.assertNoResult(result1) self.assertNoResult(result2) clock.advance(1) self.assertEqual(self.successResultOf(result1), 'bar1') self.assertNoResult(result2) clock.advance(500) self.assertEqual(self.successResultOf(result2), 'bar2')
def setUp(self): """ Replace the store every time with a clean one. """ store = MockScalingGroupCollection() self.mock_log = mock.MagicMock() manifest = self.successResultOf( store.create_scaling_group(self.mock_log, self.tenant_id, config()[0], launch_server_config()[0])) self.group_id = manifest['id'] set_store(store) self.policies_url = '/v1.0/{tenant}/groups/{group}/policies/'.format( tenant=self.tenant_id, group=self.group_id) controller_patcher = mock.patch('otter.rest.policies.controller') self.mock_controller = controller_patcher.start() self.mock_controller.maybe_execute_scaling_policy.return_value = defer.succeed( GroupState(self.tenant_id, self.group_id, {}, {}, 'date', {}, False)) self.addCleanup(controller_patcher.stop) set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {})
def setUp(self): """ Mock the interface :return: None """ self.mock_store = iMock(IScalingGroupCollection) self.mock_group = iMock(IScalingGroup) self.mock_store.get_scaling_group.return_value = self.mock_group self.mock_generate_transaction_id = patch( self, 'otter.rest.decorators.generate_transaction_id', return_value='transaction-id') # mock out modify state self.mock_state = mock.MagicMock(spec=[]) # so nothing can call it def _mock_modify_state(modifier, *args, **kwargs): modifier(self.mock_group, self.mock_state, *args, **kwargs) return defer.succeed(None) self.mock_group.modify_state.side_effect = _mock_modify_state self.root = Otter(self.mock_store).app.resource() self.get_url_root = patch(self, 'otter.util.http.get_url_root', return_value="") # set pagination limits as it'll be used by all rest interfaces set_config_data({'limits': {'pagination': 100}})
def setUp(self): """ Set up a silverberg client """ keyspace.resume() self.root = Otter(store, 'ord').app.resource() store.kz_client = mock.Mock(Lock=self.mock_lock()) set_config_data(limits) self.addCleanup(set_config_data, {}) self._config = config()[0] self._launch = launch_server_config()[0] self.mock_controller = patch(self, 'otter.rest.policies.controller') def _set_group_id(manifest): self.group_id = manifest['state'].group_id self.policies_url = ( '/v1.0/{tenant}/groups/{group}/policies/'.format( tenant=self.tenant_id, group=self.group_id)) self.mock_controller.maybe_execute_scaling_policy.return_value = defer.succeed( GroupState(self.tenant_id, self.group_id, self._config['name'], {}, {}, 'date', {}, False)) mock_log = mock.MagicMock() d = store.create_scaling_group(mock_log, self.tenant_id, self._config, self._launch) d.addCallback(_set_group_id) return d
def setUp(self): """ Configure test resources. """ self.log = mock.Mock() self.group = iMock(IScalingGroup) self.group.tenant_id = 11111 self.group.uuid = 'group-id' self.auth_token = 'auth-token' self.service_catalog = {} self.auth_function = mock.Mock( return_value=succeed((self.auth_token, self.service_catalog))) self.fake_server_details = { 'server': {'id': 'server_id', 'links': ['links'], 'name': 'meh', 'metadata': {}} } set_config_data({'region': 'ORD'}) self.addCleanup(set_config_data, {}) self.cooperator = mock.Mock(spec=Cooperator) self.supervisor = SupervisorService( self.auth_function, self.cooperator.coiterate) self.InMemoryUndoStack = patch(self, 'otter.supervisor.InMemoryUndoStack') self.undo = self.InMemoryUndoStack.return_value self.undo.rewind.return_value = succeed(None)
def setUp(self): """ Set up a silverberg client """ keyspace.resume() set_store(store) # ensure it's the cassandra store set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {}) self._config = config()[0] self._launch = launch_server_config()[0] self.mock_controller = patch(self, 'otter.rest.policies.controller') self.lock = self.mock_lock() patch(self, 'otter.models.cass.BasicLock', return_value=self.lock) def _set_group_id(manifest): self.group_id = manifest['id'] self.policies_url = ( '/v1.0/{tenant}/groups/{group}/policies/'.format( tenant=self.tenant_id, group=self.group_id)) self.mock_controller.maybe_execute_scaling_policy.return_value = defer.succeed( GroupState(self.tenant_id, self.group_id, {}, {}, 'date', {}, False)) mock_log = mock.MagicMock() d = store.create_scaling_group(mock_log, self.tenant_id, self._config, self._launch) d.addCallback(_set_group_id) return d
def setUp(self): """ Set the Cassandra store, and also patch the controller """ keyspace.resume() set_store(store) set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {}) self.config = config()[0] self.config['minEntities'] = 0 self.active_pending_etc = ({}, {}, 'date', {}, False) # patch both the config and the groups self.mock_controller = patch(self, 'otter.rest.configs.controller', spec=['obey_config_change']) patch(self, 'otter.rest.groups.controller', new=self.mock_controller) def _mock_obey_config_change(log, trans, config, group, state): return defer.succeed( GroupState(state.tenant_id, state.group_id, *self.active_pending_etc)) self.mock_controller.obey_config_change.side_effect = _mock_obey_config_change self.lock = self.mock_lock() patch(self, 'otter.models.cass.BasicLock', return_value=self.lock)
def setUp(self): """ Replace the store every time with a clean one. """ store = MockScalingGroupCollection() set_store(store) set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {}) self.config = config()[1] self.config['minEntities'] = 0 self.active_pending_etc = ({}, {}, 'date', {}, False) # patch both the config and the groups self.mock_controller = patch(self, 'otter.rest.configs.controller', spec=['obey_config_change']) patch(self, 'otter.rest.groups.controller', new=self.mock_controller) def _mock_obey_config_change(log, trans, config, group, state): return defer.succeed( GroupState(state.tenant_id, state.group_id, *self.active_pending_etc)) self.mock_controller.obey_config_change.side_effect = _mock_obey_config_change
def setUp(self): """ Set the Cassandra store, and also patch the controller """ keyspace.resume() set_store(store) set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {}) self.config = config()[0] self.config['minEntities'] = 0 self.active_pending_etc = ({}, {}, 'date', {}, False) # patch both the config and the groups self.mock_controller = patch(self, 'otter.rest.configs.controller', spec=['obey_config_change']) patch(self, 'otter.rest.groups.controller', new=self.mock_controller) def _mock_obey_config_change(log, trans, config, group, state): return defer.succeed(GroupState( state.tenant_id, state.group_id, *self.active_pending_etc)) self.mock_controller.obey_config_change.side_effect = _mock_obey_config_change self.lock = self.mock_lock() patch(self, 'otter.models.cass.BasicLock', return_value=self.lock)
def setUp(self): """ Configure test resources. """ self.log = mock.Mock() self.group = iMock(IScalingGroup) self.group.tenant_id = 11111 self.group.uuid = 'group-id' self.auth_token = 'auth-token' self.service_catalog = {} self.auth_function = mock.Mock( return_value=succeed((self.auth_token, self.service_catalog))) self.fake_server_details = { 'server': { 'id': 'server_id', 'links': ['links'], 'name': 'meh', 'metadata': {} } } set_config_data({'region': 'ORD'}) self.addCleanup(set_config_data, {}) self.cooperator = mock.Mock(spec=Cooperator) self.supervisor = Supervisor(self.auth_function, self.cooperator.coiterate) self.InMemoryUndoStack = patch(self, 'otter.supervisor.InMemoryUndoStack') self.undo = self.InMemoryUndoStack.return_value self.undo.rewind.return_value = succeed(None)
def setUp(self): """Set an elastic search config var.""" super(OtterHistoryTestCase, self).setUp() self.root = Otter(None, 'ord', es_host='http://dummy').app.resource() set_config_data({ 'limits': {'pagination': 20}, 'url_root': 'http://localhost'}) self.addCleanup(set_config_data, {}) self.log = patch(self, 'otter.rest.history.log', new=mock_log()) self.make_auditlog_query = patch( self, 'otter.rest.history.make_auditlog_query', return_value={'tenant_id': 101010}) self.treq = patch(self, 'otter.rest.history.treq', new=mock_treq( code=200, method='get', json_content={ 'hits': { 'hits': [{ '_source': { 'message': 'audit log event', 'event_type': 'event-abc', '@timestamp': 1234567890, 'policy_id': 'policy-xyz', 'scaling_group_id': 'scaling-group-uvw', 'server_id': 'server-rst', 'throwaway_key': 'ignore me!!!!' } }] } }))
def setUp(self): """ Replace the store every time with a clean one. """ store = MockScalingGroupCollection() self.mock_log = mock.MagicMock() manifest = self.successResultOf( store.create_scaling_group(self.mock_log, self.tenant_id, config()[0], launch_server_config()[0])) self.group_id = manifest['state'].group_id self.group_name = 'name' self.policies_url = '/v1.0/{tenant}/groups/{group}/policies/'.format( tenant=self.tenant_id, group=self.group_id) controller_patcher = mock.patch('otter.rest.policies.controller') self.mock_controller = controller_patcher.start() self.mock_controller.maybe_execute_scaling_policy.return_value = defer.succeed( GroupState(self.tenant_id, self.group_id, self.group_name, {}, {}, 'date', {}, False)) self.addCleanup(controller_patcher.stop) self.root = Otter(store).app.resource() set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {})
def test_max_groups_overlimit(self): """ test scaling group creation when at maxGroups limit """ set_config_data({'limits': {'absolute': {'maxGroups': 0}}}) d = self.collection.create_scaling_group(mock.Mock(), '1234', self.config, self.launch) self.failureResultOf(d, ScalingGroupOverLimitError)
def setUp(self): """ Set up a basic configuration dictionary. """ config.set_config_data({ 'foo': 'bar', 'baz': {'bax': 'quux'} })
def test_set_config_None(self): """ Setting `None` via :func:`config.set_config_data` also works and does not raise exceptions on subsequent update or get """ config.set_config_data(None) self.assertIsNone(config.config_value("a")) config.update_config_data("a.b", 2)
def setUp(self): """ Mock public_endpoint_url and set_config_data """ set_config_data({"cloudServersOpenStack": "cloud"}) self.public_endpoint_url = patch( self, "otter.worker.validate_config.public_endpoint_url", return_value="http://service" )
def test_sets_contents(self): """ Returns the configured response body """ set_config_data({'root': {'body': 'happyhappyhappy'}}) response_wrapper = self.request() self.assertEqual(response_wrapper.response.code, 200) self.assertEqual(self.get_non_standard_headers(response_wrapper), {}) self.assertEqual(response_wrapper.content, 'happyhappyhappy')
def makeService(config): """ Set up the otter-api service. """ set_config_data(dict(config)) if not config_value('mock'): seed_endpoints = [ clientFromString(reactor, str(host)) for host in config_value('cassandra.seed_hosts') ] cassandra_cluster = LoggingCQLClient( RoundRobinCassandraCluster(seed_endpoints, config_value('cassandra.keyspace')), log.bind(system='otter.silverberg')) set_store(CassScalingGroupCollection(cassandra_cluster)) bobby_url = config_value('bobby_url') if bobby_url is not None: set_bobby(BobbyClient(bobby_url)) cache_ttl = config_value('identity.cache_ttl') if cache_ttl is None: # FIXME: Pick an arbitrary cache ttl value based on absolutely no # science. cache_ttl = 300 authenticator = CachingAuthenticator( reactor, ImpersonatingAuthenticator(config_value('identity.username'), config_value('identity.password'), config_value('identity.url'), config_value('identity.admin_url')), cache_ttl) supervisor = Supervisor(authenticator.authenticate_tenant, coiterate) set_supervisor(supervisor) s = MultiService() site = Site(root) site.displayTracebacks = False api_service = service(str(config_value('port')), site) api_service.setServiceParent(s) if config_value('scheduler') and not config_value('mock'): scheduler_service = SchedulerService( int(config_value('scheduler.batchsize')), int(config_value('scheduler.interval')), cassandra_cluster) scheduler_service.setServiceParent(s) return s
def test_sets_status_code(self): """ Returns the configured status code """ set_config_data({'root': {'code': 204}}) response_wrapper = self.request() self.assertEqual(response_wrapper.response.code, 204) self.assertEqual(self.get_non_standard_headers(response_wrapper), {}) self.assertEqual(response_wrapper.content, '')
def test_no_limit(self): """ Defaults to config limit if not given, leaving it off the self URL, and calculates the next marker by id by default """ set_config_data({'limits': {'pagination': 3}, 'url_root': 'http://localhost'}) links = get_collection_links(self.coll, 'url', 'self') self.assertEqual(links, [{'href': 'http://localhost/url', 'rel': 'self'}, {'href': 'http://localhost/url?limit=3&marker=3444', 'rel': 'next'}])
def setUp(self): """ Set up a basic configuration dictionary. """ config.set_config_data({ 'foo': 'bar', 'baz': {'bax': 'quux'} }) self.addCleanup(config.set_config_data, {})
def makeService(config): """ Set up the otter-api service. """ set_config_data(dict(config)) # Try to configure graylog and airbrake. if config_value('graylog'): if GraylogUDPPublisher is not None: log.addObserver( make_observer_chain( GraylogUDPPublisher(**config_value('graylog')), False)) else: warnings.warn("There is a configuration option for Graylog, but " "txgraylog is not installed.") if config_value('airbrake'): if AirbrakeLogObserver is not None: airbrake = AirbrakeLogObserver( config_value('airbrake.api_key'), config_value('environment'), use_ssl=True ) airbrake.start() else: warnings.warn("There is a configuration option for Airbrake, but " "txairbrake is not installed.") if not config_value('mock'): seed_endpoints = [ clientFromString(reactor, str(host)) for host in config_value('cassandra.seed_hosts')] cassandra_cluster = RoundRobinCassandraCluster( seed_endpoints, config_value('cassandra.keyspace')) set_store(CassScalingGroupCollection(cassandra_cluster)) s = MultiService() site = Site(root) site.displayTracebacks = False api_service = service(str(config_value('port')), site) api_service.setServiceParent(s) if config_value('scheduler'): scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')), int(config_value('scheduler.interval')), cassandra_cluster) scheduler_service.setServiceParent(s) return s
def makeService(config): """ Set up the otter-api service. """ set_config_data(dict(config)) if not config_value('mock'): seed_endpoints = [ clientFromString(reactor, str(host)) for host in config_value('cassandra.seed_hosts')] cassandra_cluster = LoggingCQLClient(RoundRobinCassandraCluster( seed_endpoints, config_value('cassandra.keyspace')), log.bind(system='otter.silverberg')) set_store(CassScalingGroupCollection(cassandra_cluster)) bobby_url = config_value('bobby_url') if bobby_url is not None: set_bobby(BobbyClient(bobby_url)) cache_ttl = config_value('identity.cache_ttl') if cache_ttl is None: # FIXME: Pick an arbitrary cache ttl value based on absolutely no # science. cache_ttl = 300 authenticator = CachingAuthenticator( reactor, ImpersonatingAuthenticator( config_value('identity.username'), config_value('identity.password'), config_value('identity.url'), config_value('identity.admin_url')), cache_ttl) supervisor = Supervisor(authenticator.authenticate_tenant, coiterate) set_supervisor(supervisor) s = MultiService() site = Site(root) site.displayTracebacks = False api_service = service(str(config_value('port')), site) api_service.setServiceParent(s) if config_value('scheduler') and not config_value('mock'): scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')), int(config_value('scheduler.interval')), cassandra_cluster) scheduler_service.setServiceParent(s) return s
def test_mock_store_with_scheduler(self): """ SchedulerService is not created with mock store """ self.config['mock'] = True set_config_data(self.config) self.assertIs( setup_scheduler(self.parent, "disp", self.store, self.kz_client), None) self.assertFalse(self.store.set_scheduler_buckets.called)
def setUp(self): """ Set up some mocks. """ set_config_data(fake_config) self.addCleanup(set_config_data, {}) self.log = mock_log() self.treq = patch(self, 'otter.worker.launch_server_v1.treq') patch(self, 'otter.util.http.treq', new=self.treq)
def test_sets_unicode_headers(self): """ Returns the configured response headers, even if they were provided in unicode """ set_config_data({'root': {'headers': {u'someheader': [u'value']}}}) response_wrapper = self.request() self.assertEqual(response_wrapper.response.code, 200) self.assertEqual(self.get_non_standard_headers(response_wrapper), {'someheader': ['value']}) self.assertEqual(response_wrapper.content, '')
def test_sets_headers(self): """ Returns the configured response headers """ headers = {'someheader1': ['value1', 'value2'], 'someheader2': ['value1']} set_config_data({'root': {'headers': headers}}) response_wrapper = self.request() self.assertEqual(response_wrapper.response.code, 200) self.assertEqual(self.get_non_standard_headers(response_wrapper), headers) self.assertEqual(response_wrapper.content, '')
def test_performs_tenant_scope(self, deferred_lock_run): """ :func:`perform_tenant_scope` performs :obj:`TenantScope`, and uses the default throttler """ # We want to ensure # 1. the TenantScope can be performed # 2. the ServiceRequest is run within a lock, since it matches the # default throttling policy set_config_data({ "cloud_client": { "throttling": { "create_server_delay": 1, "delete_server_delay": 0.4 } } }) self.addCleanup(set_config_data, {}) clock = Clock() authenticator = object() log = object() dispatcher = get_cloud_client_dispatcher(clock, authenticator, log, make_service_configs()) svcreq = service_request(ServiceType.CLOUD_SERVERS, 'POST', 'servers') tscope = TenantScope(tenant_id='111', effect=svcreq) def run(f, *args, **kwargs): result = f(*args, **kwargs) result.addCallback(lambda x: (x[0], assoc(x[1], 'locked', True))) return result deferred_lock_run.side_effect = run response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (Authenticate(authenticator=authenticator, tenant_id='111', log=log), lambda i: ('token', fake_service_catalog)), (Request(method='POST', url='http://dfw.openstack/servers', headers=headers('token'), log=log), lambda i: response), ]) disp = ComposedDispatcher([seq, dispatcher]) with seq.consume(): result = perform(disp, Effect(tscope)) self.assertNoResult(result) clock.advance(1) self.assertEqual(self.successResultOf(result), (response[0], { 'locked': True }))
def setUp(self): """ Set up a mock Authenticator for authenticate_tenant. """ set_config_data({'identity': { 'username': '******', 'password': '******', 'url': 'http://identity/v2.0', 'admin_url': 'http://identity_admin' }}) self.ia = patch(self, 'otter.auth._ImpersonatingAuthenticator')
def setUp(self): """ Config with identity settings """ self.config = test_config.copy() self.config['identity'] = { 'username': '******', 'password': '******', 'url': 'htp', 'admin_url': 'ad', 'max_retries': 3, 'retry_interval': 5, 'wait': 4, 'cache_ttl': 50 } set_config_data(self.config) self.addCleanup(set_config_data, None)
def test_too_many_policies_fail(self): """ If the number of policies is over the configured limit, fail to validate """ def cleanup(): set_config_data({}) reload(rest_schemas) set_config_data({"limits": {"pagination": 5}}) self.addCleanup(cleanup) reload(rest_schemas) self.assertRaises(ValidationError, validate, [self.one_policy] * 6, rest_schemas.create_policies_request)
def test_sets_headers(self): """ Returns the configured response headers """ headers = { 'someheader1': ['value1', 'value2'], 'someheader2': ['value1'] } set_config_data({'root': {'headers': headers}}) response_wrapper = self.request() self.assertEqual(response_wrapper.response.code, 200) self.assertEqual(self.get_non_standard_headers(response_wrapper), headers) self.assertEqual(response_wrapper.content, '')
def setUp(self): """ SetUp a mock request with query args for testing `paginatable`. """ self.mockRequest = mock.MagicMock() self.mockRequest.args = {} class FakeApp(object): @paginatable def paginate_me(self, request, paginate): return defer.succeed(paginate) self.app = FakeApp() set_config_data({'limits': {'pagination': 10}}) self.addCleanup(set_config_data, {})
def setUp(self): """ setup fake config data """ data = { "limits": { "pagination": 500, "absolute": { "maxGroups": 2, "maxPoliciesPerGroup": 3, "maxWebhooksPerPolicy": 4 } } } super(OtterLimitsTestCase, self).setUp() set_config_data(data)
def test_no_limit(self): """ Defaults to config limit if not given, leaving it off the self URL, and calculates the next marker by id by default """ set_config_data({ 'limits': { 'pagination': 3 }, 'url_root': 'http://localhost' }) links = get_collection_links(self.coll, 'url', 'self') self.assertEqual(links, [{ 'href': 'http://localhost/url', 'rel': 'self' }, { 'href': 'http://localhost/url?limit=3&marker=3444', 'rel': 'next' }])
def test_post_and_delete_not_the_same(self): """ The throttlers for POST and DELETE to cloud servers are different. """ set_config_data({ "cloud_client": { "throttling": { "create_server_delay": 1, "delete_server_delay": 0.4 } } }) clock = Clock() locks = WeakLocks() deleter = _default_throttler(locks, clock, ServiceType.CLOUD_SERVERS, 'delete', 'any-tenant') poster = _default_throttler(locks, clock, ServiceType.CLOUD_SERVERS, 'post', 'any-tenant') self.assertIsNot(deleter, poster)
def setUp(self): """ Mock args """ self.config = { 'scheduler': { 'buckets': 10, 'partition': { 'path': '/part_path', 'time_boundary': 15 }, 'batchsize': 100, 'interval': 10 } } set_config_data(self.config) self.parent = MultiService() self.store = mock.Mock() self.kz_client = mock.Mock()
def _test_tenant(self, cfg_name, stype, method): """ Test a specific throttling configuration, and ensure that locks are per-tenant. """ locks = WeakLocks() set_config_data({'cloud_client': {'throttling': {cfg_name: 500}}}) self.addCleanup(set_config_data, {}) clock = Clock() bracket1 = _default_throttler(locks, clock, stype, method, 'tenant1') if bracket1 is None: self.fail("No throttler for %s and %s" % (stype, method)) result1 = bracket1(lambda: 'bar1') bracket2 = _default_throttler(locks, clock, stype, method, 'tenant2') result2 = bracket2(lambda: 'bar2') self.assertNoResult(result1) self.assertNoResult(result2) clock.advance(500) self.assertEqual(self.successResultOf(result1), 'bar1') self.assertEqual(self.successResultOf(result2), 'bar2')
def setUp(self): """ Set up test dependencies. """ self.log = mock_log() set_config_data(fake_config) self.addCleanup(set_config_data, {}) self.treq = patch(self, 'otter.worker.launch_server_v1.treq') patch(self, 'otter.util.http.treq', new=self.treq) self.generate_server_name = patch( self, 'otter.worker.launch_server_v1.generate_server_name') self.generate_server_name.return_value = 'as000000' self.scaling_group_uuid = '1111111-11111-11111-11111111' self.scaling_group = mock.Mock(uuid=self.scaling_group_uuid) self.undo = iMock(IUndoStack)
def setUp(self): """ Mock the interface :return: None """ self.mock_store = iMock(IScalingGroupCollection) # mock out modify state self.mock_state = mock.MagicMock(spec=[]) # so nothing can call it self.mock_group = mock_group(self.mock_state, '11111', 'one') self.mock_store.get_scaling_group.return_value = self.mock_group self.mock_generate_transaction_id = patch( self, 'otter.rest.decorators.generate_transaction_id', return_value='transaction-id') self.otter = Otter(self.mock_store, 'ord') self.root = self.otter.app.resource() # set pagination limits as it'll be used by all rest interfaces set_config_data({'limits': {'pagination': 100}, 'url_root': ''}) self.addCleanup(set_config_data, {})
def setUp(self): """ Replace the store every time with a clean one. """ self.mock_log = mock.MagicMock() store = MockScalingGroupCollection() manifest = self.successResultOf( store.create_scaling_group(self.mock_log, self.tenant_id, config()[0], launch_server_config()[0])) self.group_id = manifest['id'] group = store.get_scaling_group(self.mock_log, self.tenant_id, self.group_id) self.policy_id = self.successResultOf( group.create_policies([{ "name": 'set number of servers to 10', "change": 10, "cooldown": 3, "type": "webhook" }])).keys()[0] set_store(store) self.webhooks_url = ( '/v1.0/{tenant}/groups/{group}/policies/{policy}/webhooks/'.format( tenant=self.tenant_id, group=self.group_id, policy=self.policy_id)) self.mock_controller = patch(self, 'otter.rest.webhooks.controller') def _mock_maybe_execute(log, trans, group, state, policy_id): return defer.succeed(state) self.mock_controller.maybe_execute_scaling_policy.side_effect = _mock_maybe_execute set_config_data({'url_root': 'http://127.0.0.1'}) self.addCleanup(set_config_data, {})
def makeService(config): """ Set up the otter-api service. """ config = dict(config) set_config_data(config) parent = MultiService() region = config_value('region') seed_endpoints = [ clientFromString(reactor, str(host)) for host in config_value('cassandra.seed_hosts')] cassandra_cluster = LoggingCQLClient( TimingOutCQLClient( reactor, RoundRobinCassandraCluster( seed_endpoints, config_value('cassandra.keyspace'), disconnect_on_cancel=True), config_value('cassandra.timeout') or 30), log.bind(system='otter.silverberg')) store = CassScalingGroupCollection( cassandra_cluster, reactor, config_value('limits.absolute.maxGroups')) admin_store = CassAdmin(cassandra_cluster) bobby_url = config_value('bobby_url') if bobby_url is not None: set_bobby(BobbyClient(bobby_url)) service_configs = get_service_configs(config) authenticator = generate_authenticator(reactor, config['identity']) supervisor = SupervisorService(authenticator, region, coiterate, service_configs) supervisor.setServiceParent(parent) set_supervisor(supervisor) health_checker = HealthChecker(reactor, { 'store': getattr(store, 'health_check', None), 'kazoo': store.kazoo_health_check, 'supervisor': supervisor.health_check }) # Setup cassandra cluster to disconnect when otter shuts down if 'cassandra_cluster' in locals(): parent.addService(FunctionalService(stop=partial( call_after_supervisor, cassandra_cluster.disconnect, supervisor))) otter = Otter(store, region, health_checker.health_check) site = Site(otter.app.resource()) site.displayTracebacks = False api_service = service(str(config_value('port')), site) api_service.setServiceParent(parent) # Setup admin service admin_port = config_value('admin') if admin_port: admin = OtterAdmin(admin_store) admin_site = Site(admin.app.resource()) admin_site.displayTracebacks = False admin_service = service(str(admin_port), admin_site) admin_service.setServiceParent(parent) # setup cloud feed cf_conf = config.get('cloudfeeds', None) if cf_conf is not None: id_conf = deepcopy(config['identity']) id_conf['strategy'] = 'single_tenant' add_to_fanout(CloudFeedsObserver( reactor=reactor, authenticator=generate_authenticator(reactor, id_conf), tenant_id=cf_conf['tenant_id'], region=region, service_configs=service_configs)) # Setup Kazoo client if config_value('zookeeper'): threads = config_value('zookeeper.threads') or 10 disable_logs = config_value('zookeeper.no_logs') threadpool = ThreadPool(maxthreads=threads) sync_kz_client = KazooClient( hosts=config_value('zookeeper.hosts'), # Keep trying to connect until the end of time with # max interval of 10 minutes connection_retry=dict(max_tries=-1, max_delay=600), logger=None if disable_logs else TxLogger(log.bind(system='kazoo')) ) kz_client = TxKazooClient(reactor, threadpool, sync_kz_client) # Don't timeout. Keep trying to connect forever d = kz_client.start(timeout=None) def on_client_ready(_): dispatcher = get_full_dispatcher(reactor, authenticator, log, get_service_configs(config), kz_client, store, supervisor, cassandra_cluster) # Setup scheduler service after starting scheduler = setup_scheduler(parent, dispatcher, store, kz_client) health_checker.checks['scheduler'] = scheduler.health_check otter.scheduler = scheduler # Give dispatcher to Otter REST object otter.dispatcher = dispatcher # Set the client after starting # NOTE: There is small amount of time when the start is # not finished and the kz_client is not set in which case # policy execution and group delete will fail store.kz_client = kz_client # Setup kazoo to stop when shutting down parent.addService(FunctionalService( stop=partial(call_after_supervisor, kz_client.stop, supervisor))) setup_converger( parent, kz_client, dispatcher, config_value('converger.interval') or 10, config_value('converger.build_timeout') or 3600, config_value('converger.limited_retry_iterations') or 10, config_value('converger.step_limits') or {}) d.addCallback(on_client_ready) d.addErrback(log.err, 'Could not start TxKazooClient') return parent
def set_config_for_test(testcase, data): """ Set config data for test. Will reset to {} after test is run """ set_config_data(data) testcase.addCleanup(set_config_data, {})
def tearDown(self): set_config_data(None)
def tearDown(self): """ Reset config data """ set_config_data({})