コード例 #1
0
ファイル: test_scheduler.py プロジェクト: stephamon/otter
    def setUp(self):
        """
        Mock all the dependencies of SchedulingService.

        This includes logging, store's fetch_and_delete, TxKazooClient stuff,
        check_events_in_bucket.
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, 'otter.scheduler.otter_log')
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        def pfactory(log, callable):
            self.fake_partitioner = FakePartitioner(log, callable)
            return self.fake_partitioner

        self.scheduler_service = SchedulerService("disp",
                                                  100,
                                                  self.mock_store,
                                                  pfactory,
                                                  threshold=600)
        otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.scheduler_service.running = True
        self.assertIdentical(self.fake_partitioner,
                             self.scheduler_service.partitioner)

        self.check_events_in_bucket = patch(
            self, 'otter.scheduler.check_events_in_bucket')

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)
コード例 #2
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')
        ]

        cassandra_cluster = LoggingCQLClient(
            RoundRobinCassandraCluster(seed_endpoints,
                                       config_value('cassandra.keyspace')),
            log.bind(system='otter.silverberg'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    cache_ttl = config_value('identity.cache_ttl')

    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    authenticator = CachingAuthenticator(
        reactor,
        ImpersonatingAuthenticator(config_value('identity.username'),
                                   config_value('identity.password'),
                                   config_value('identity.url'),
                                   config_value('identity.admin_url')),
        cache_ttl)

    supervisor = Supervisor(authenticator.authenticate_tenant, coiterate)

    set_supervisor(supervisor)

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler') and not config_value('mock'):
        scheduler_service = SchedulerService(
            int(config_value('scheduler.batchsize')),
            int(config_value('scheduler.interval')), cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
コード例 #3
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')]

        cassandra_cluster = LoggingCQLClient(RoundRobinCassandraCluster(
            seed_endpoints,
            config_value('cassandra.keyspace')), log.bind(system='otter.silverberg'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    cache_ttl = config_value('identity.cache_ttl')

    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    authenticator = CachingAuthenticator(
        reactor,
        ImpersonatingAuthenticator(
            config_value('identity.username'),
            config_value('identity.password'),
            config_value('identity.url'),
            config_value('identity.admin_url')),
        cache_ttl)

    supervisor = Supervisor(authenticator.authenticate_tenant, coiterate)

    set_supervisor(supervisor)

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler') and not config_value('mock'):
        scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')),
                                             int(config_value('scheduler.interval')),
                                             cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
コード例 #4
0
ファイル: api.py プロジェクト: apurvvkumaria/otter
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    # Try to configure graylog and airbrake.

    if config_value('graylog'):
        if GraylogUDPPublisher is not None:
            log.addObserver(
                make_observer_chain(
                    GraylogUDPPublisher(**config_value('graylog')), False))
        else:
            warnings.warn("There is a configuration option for Graylog, but "
                          "txgraylog is not installed.")

    if config_value('airbrake'):
        if AirbrakeLogObserver is not None:
            airbrake = AirbrakeLogObserver(
                config_value('airbrake.api_key'),
                config_value('environment'),
                use_ssl=True
            )

            airbrake.start()
        else:
            warnings.warn("There is a configuration option for Airbrake, but "
                          "txairbrake is not installed.")

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')]

        cassandra_cluster = RoundRobinCassandraCluster(
            seed_endpoints,
            config_value('cassandra.keyspace'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler'):
        scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')),
                                             int(config_value('scheduler.interval')),
                                             cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
コード例 #5
0
ファイル: test_scheduler.py プロジェクト: stanzikratel/otter
    def setUp(self):
        """
        Mock all the dependencies of SchedulingService.

        This includes logging, store's fetch_and_delete, TxKazooClient stuff,
        check_events_in_bucket.
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, "otter.scheduler.otter_log")
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        def pfactory(log, callable):
            self.fake_partitioner = FakePartitioner(log, callable)
            return self.fake_partitioner

        self.scheduler_service = SchedulerService("disp", 100, self.mock_store, pfactory, threshold=600)
        otter_log.bind.assert_called_once_with(system="otter.scheduler")
        self.scheduler_service.running = True
        self.assertIdentical(self.fake_partitioner, self.scheduler_service.partitioner)

        self.check_events_in_bucket = patch(self, "otter.scheduler.check_events_in_bucket")

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)
コード例 #6
0
ファイル: test_scheduler.py プロジェクト: MariaAbrahms/otter
    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes logging,
        store's fetch_and_delete, TxKazooClient stuff, TimerService, check_events_in_bucket
        and twisted.internet.task.Clock is used to simulate time
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, 'otter.scheduler.otter_log')
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        self.kz_client = mock.Mock(spec=['SetPartitioner'])
        self.kz_partition = mock.MagicMock(allocating=False, release=False, failed=False,
                                           acquired=False)
        self.kz_client.SetPartitioner.return_value = self.kz_partition
        self.zk_partition_path = '/part_path'
        self.time_boundary = 15
        self.buckets = range(1, 10)

        self.clock = Clock()
        self.scheduler_service = SchedulerService(
            100, 1, self.mock_store, self.kz_client, self.zk_partition_path,
            self.time_boundary, self.buckets, self.clock, threshold=600)
        otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.timer_service = patch(self, 'otter.scheduler.TimerService')

        self.check_events_in_bucket = patch(self, 'otter.scheduler.check_events_in_bucket')

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)
コード例 #7
0
ファイル: api.py プロジェクト: dwcramer/otter
def setup_scheduler(parent, store, kz_client):
    """
    Setup scheduler service
    """
    # Setup scheduler service
    if not config_value('scheduler') or config_value('mock'):
        return
    buckets = range(1, int(config_value('scheduler.buckets')) + 1)
    store.set_scheduler_buckets(buckets)
    partition_path = config_value('scheduler.partition.path') or '/scheduler_partition'
    time_boundary = config_value('scheduler.partition.time_boundary') or 15
    scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')),
                                         int(config_value('scheduler.interval')),
                                         store, kz_client, partition_path, time_boundary,
                                         buckets)
    scheduler_service.setServiceParent(parent)
    return scheduler_service
コード例 #8
0
ファイル: test_scheduler.py プロジェクト: sharwell/otter
    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection, IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.update_delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self, 'otter.scheduler.generate_transaction_id',
            return_value='transaction-id')
        set_store(self.mock_store)
        self.addCleanup(set_store, None)

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.maybe_exec_policy = patch(self, 'otter.scheduler.maybe_execute_scaling_policy')

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, 'otter.scheduler.BasicLock')
        self.mock_with_lock = patch(self, 'otter.scheduler.with_lock')
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()
        self.otter_log = patch(self, 'otter.scheduler.otter_log')

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client, self.clock)

        self.otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.log = self.otter_log.bind.return_value

        self.next_cron_occurrence = patch(self, 'otter.scheduler.next_cron_occurrence')
        self.next_cron_occurrence.return_value = 'newtrigger'
コード例 #9
0
def setup_scheduler(parent, dispatcher, store, kz_client):
    """
    Setup scheduler service
    """
    # Setup scheduler service
    if not config_value('scheduler') or config_value('mock'):
        return
    buckets = range(1, int(config_value('scheduler.buckets')) + 1)
    store.set_scheduler_buckets(buckets)
    partition_path = (config_value('scheduler.partition.path') or
                      '/scheduler_partition')
    time_boundary = config_value('scheduler.partition.time_boundary') or 15
    partitioner_factory = partial(
        Partitioner,
        kz_client, int(config_value('scheduler.interval')), partition_path,
        buckets, time_boundary)
    scheduler_service = SchedulerService(
        dispatcher, int(config_value('scheduler.batchsize')),
        store, partitioner_factory)
    scheduler_service.setServiceParent(parent)
    return scheduler_service
コード例 #10
0
ファイル: api.py プロジェクト: rackerlabs/otter
def setup_scheduler(parent, dispatcher, store, kz_client):
    """
    Setup scheduler service based on the configuration and return service
    object. If "scheduler" config is not found then return `None`.
    """
    # Setup scheduler service
    if not config_value('scheduler') or config_value('mock'):
        return None
    buckets = range(1, int(config_value('scheduler.buckets')) + 1)
    store.set_scheduler_buckets(buckets)
    partition_path = (config_value('scheduler.partition.path') or
                      '/scheduler_partition')
    time_boundary = config_value('scheduler.partition.time_boundary') or 15
    partitioner_factory = partial(
        Partitioner,
        kz_client, int(config_value('scheduler.interval')), partition_path,
        buckets, time_boundary)
    scheduler_service = SchedulerService(
        dispatcher, int(config_value('scheduler.batchsize')),
        store, partitioner_factory)
    scheduler_service.setServiceParent(parent)
    return scheduler_service
コード例 #11
0
ファイル: test_scheduler.py プロジェクト: stanzikratel/otter
class SchedulerServiceTests(SchedulerTests, DeferredFunctionMixin):
    """
    Tests for `SchedulerService`.
    """

    def setUp(self):
        """
        Mock all the dependencies of SchedulingService.

        This includes logging, store's fetch_and_delete, TxKazooClient stuff,
        check_events_in_bucket.
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, "otter.scheduler.otter_log")
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        def pfactory(log, callable):
            self.fake_partitioner = FakePartitioner(log, callable)
            return self.fake_partitioner

        self.scheduler_service = SchedulerService("disp", 100, self.mock_store, pfactory, threshold=600)
        otter_log.bind.assert_called_once_with(system="otter.scheduler")
        self.scheduler_service.running = True
        self.assertIdentical(self.fake_partitioner, self.scheduler_service.partitioner)

        self.check_events_in_bucket = patch(self, "otter.scheduler.check_events_in_bucket")

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)

    def test_partitioner_child(self):
        """
        The Partitioner service is registered as a child of the
        SchedulerService.
        """
        self.assertEqual(self.scheduler_service.services, [self.fake_partitioner])

    def test_health_check_after_threshold(self):
        """
        `service.health_check` returns False when trigger time is above
        threshold.
        """
        self.fake_partitioner.health = (True, {"buckets": [2, 3]})
        now = datetime.utcnow()
        returns = [
            {"trigger": now - timedelta(hours=1), "version": "v1"},
            {"trigger": now - timedelta(seconds=2), "version": "v1"},
        ]
        self.returns = returns[:]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {"old_events": [returns[0]], "buckets": [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_before_threshold(self):
        """
        `service.health_check` returns True when trigger time is below
        threshold.
        """
        self.fake_partitioner.health = (True, {"buckets": [2, 3]})
        now = datetime.utcnow()
        self.returns = [
            {"trigger": now + timedelta(hours=1), "version": "v1"},
            {"trigger": now + timedelta(seconds=2), "version": "v1"},
        ]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {"old_events": [], "buckets": [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_None(self):
        """
        `service.health_check` returns True when there are no triggers.
        """
        self.fake_partitioner.health = (True, {"buckets": [2, 3]})
        self.returns = [None, None]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {"old_events": [], "buckets": [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_unhealthy_partitioner(self):
        """
        When the partitioner service is unhealthy, the scheduler service passes
        its health message through.
        """
        self.fake_partitioner.health = (False, {"foo": "bar"})
        d = self.scheduler_service.health_check()
        self.assertEqual(self.successResultOf(d), (False, {"foo": "bar"}))

    def test_health_check_not_running(self):
        """
        `service.health_check` returns False when scheduler is stopped.
        """
        self.scheduler_service.running = False
        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {"reason": "Not running"}))
        self.assertFalse(self.mock_store.get_oldest_event.called)

    def test_reset(self):
        """
        reset() starts new partition based on new path.
        """
        self.assertEqual(self.scheduler_service.reset("/new_path"), "partitioner reset to /new_path")

    @mock.patch("otter.scheduler.datetime")
    def test_check_events_acquired(self, mock_datetime):
        """
        the got_buckets callback checks events in each bucket when they are
        partitoned.
        """
        self.scheduler_service.log = mock.Mock()
        mock_datetime.utcnow.return_value = "utcnow"

        responses = [4, 5]
        self.check_events_in_bucket.side_effect = lambda *_: defer.succeed(responses.pop(0))

        d = self.fake_partitioner.got_buckets([2, 3])

        self.assertEqual(self.successResultOf(d), [4, 5])
        self.scheduler_service.log.bind.assert_called_once_with(scheduler_run_id="transaction-id", utcnow="utcnow")
        log = self.scheduler_service.log.bind.return_value
        self.assertEqual(
            self.check_events_in_bucket.mock_calls,
            [
                mock.call(log, "disp", self.mock_store, 2, "utcnow", 100),
                mock.call(log, "disp", self.mock_store, 3, "utcnow", 100),
            ],
        )
コード例 #12
0
ファイル: test_scheduler.py プロジェクト: sharwell/otter
    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection,
                                IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.update_delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self,
            'otter.scheduler.generate_transaction_id',
            return_value='transaction-id')
        set_store(self.mock_store)
        self.addCleanup(set_store, None)

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.maybe_exec_policy = patch(
            self, 'otter.scheduler.maybe_execute_scaling_policy')

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, 'otter.scheduler.BasicLock')
        self.mock_with_lock = patch(self, 'otter.scheduler.with_lock')
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()
        self.otter_log = patch(self, 'otter.scheduler.otter_log')

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client,
                                                  self.clock)

        self.otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.log = self.otter_log.bind.return_value

        self.next_cron_occurrence = patch(
            self, 'otter.scheduler.next_cron_occurrence')
        self.next_cron_occurrence.return_value = 'newtrigger'
コード例 #13
0
ファイル: test_scheduler.py プロジェクト: sharwell/otter
class SchedulerTestCase(TestCase):
    """
    Tests for :mod:`SchedulerService`
    """
    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection,
                                IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.update_delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self,
            'otter.scheduler.generate_transaction_id',
            return_value='transaction-id')
        set_store(self.mock_store)
        self.addCleanup(set_store, None)

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.maybe_exec_policy = patch(
            self, 'otter.scheduler.maybe_execute_scaling_policy')

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, 'otter.scheduler.BasicLock')
        self.mock_with_lock = patch(self, 'otter.scheduler.with_lock')
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()
        self.otter_log = patch(self, 'otter.scheduler.otter_log')

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client,
                                                  self.clock)

        self.otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.log = self.otter_log.bind.return_value

        self.next_cron_occurrence = patch(
            self, 'otter.scheduler.next_cron_occurrence')
        self.next_cron_occurrence.return_value = 'newtrigger'

    def validate_calls(self, d, fetch_returns, update_delete_args):
        """
        Validate all the calls made in the service w.r.t to the events
        """
        fetch_call_count = len(fetch_returns)
        events = [
            event for fetch_return in fetch_returns for event in fetch_return
        ]
        num_events = len(events)
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count,
                         fetch_call_count)
        if update_delete_args:
            self.assertEqual(
                self.mock_store.update_delete_events.call_args_list, [
                    mock.call(delete_events, update_events)
                    for delete_events, update_events in update_delete_args
                ])
        self.assertEqual(self.mock_group.modify_state.call_count, num_events)
        self.assertEqual(
            self.mock_store.get_scaling_group.call_args_list,
            [mock.call(mock.ANY, e['tenantId'], e['groupId']) for e in events])
        self.assertEqual(self.maybe_exec_policy.mock_calls, [
            mock.call(mock.ANY,
                      'transaction-id',
                      self.mock_group,
                      self.mock_state,
                      policy_id=event['policyId']) for event in events
        ])

    def test_empty(self):
        """
        No policies are executed when ``fetch_batch_of_events`` return empty list
        i.e. no events are there before now
        """
        self.returns = [[]]
        d = self.scheduler_service.check_for_events(100)
        self.validate_calls(d, [[]], None)
        self.assertFalse(self.mock_store.update_delete_events.called)
        self.log.bind.assert_called_once_with(scheduler_run_id=mock.ANY,
                                              utcnow=mock.ANY)
        self.log.bind.return_value.msg.assert_called_once_with(
            'Checking for events')

    def test_one(self):
        """
        policy is executed when its corresponding event is there before now
        """
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        }]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events], [(['pol44'], [])])

    def test_policy_exec_logs(self):
        """
        The scheduler logs `CannotExecutePolicyError` as msg instead of err
        """
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': 'c1'
        }]
        self.returns = [events]
        self.mock_group.modify_state.side_effect = (lambda *_: defer.fail(
            CannotExecutePolicyError('t', 'g', 'p', 'w')))

        d = self.scheduler_service.check_for_events(100)

        self.assertIsNone(self.successResultOf(d))
        self.log.bind.return_value.bind(tenant_id='1234',
                                        scaling_group_id='scal44',
                                        policy_id='pol44')
        self.log.bind.return_value.bind.return_value.msg.assert_has_calls([
            mock.call('Executing policy'),
            mock.call('Cannot execute policy',
                      reason=CheckFailure(CannotExecutePolicyError))
        ])
        self.assertFalse(
            self.log.bind.return_value.bind.return_value.err.called)

    def test_many(self):
        """
        Events are fetched and processed as batches of 100. Its corresponding policies
        are executed.
        """
        events1 = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(100)]
        events2 = [{
            'tenantId': '1234',
            'groupId': 'scal45',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        } for i in range(100)]
        self.returns = [events1, events2, []]
        fetch_returns = self.returns[:]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, fetch_returns, [(['pol44'] * 100, []),
                                               (['pol45'] * 100, [])])

    def test_timer_works(self):
        """
        The scheduler executes every x seconds
        """
        events1 = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(30)]
        events2 = [{
            'tenantId': '1234',
            'groupId': 'scal45',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        } for i in range(20)]
        self.returns = [events1, events2]

        # events not fetched before startService
        self.validate_calls(defer.succeed(None), [], None)

        # events fetched after calling startService
        self.scheduler_service.startService()
        self.validate_calls(defer.succeed(None), [events1],
                            [(['pol44'] * 30, [])])

        # events are fetched again after timer expires
        self.clock.advance(1)
        self.validate_calls(defer.succeed(None), [events1, events2],
                            [(['pol44'] * 30, []), (['pol45'] * 20, [])])

    def test_timer_works_on_error(self):
        """
        The scheduler executes every x seconds even if an occurs occurs while fetching events
        """
        # Copy fetch function from setUp and set it to fail
        fetch_func = self.mock_store.fetch_batch_of_events.side_effect
        self.mock_store.fetch_batch_of_events.side_effect = None
        self.mock_store.fetch_batch_of_events.return_value = defer.fail(
            TimedOutException())

        # Start service and see if update_delete_events got called
        self.scheduler_service.startService()
        self.assertFalse(self.mock_store.update_delete_events.called)

        # fix fetch function and advance clock to see if works next time
        self.mock_store.fetch_batch_of_events.side_effect = fetch_func
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(30)]
        self.returns = [events]
        self.clock.advance(1)
        self.validate_calls(
            defer.succeed(None),
            [[], events],  # first [] to account for failed fetch call
            [(['pol44'] * 30, [])])

    def test_called_with_lock(self):
        """
        ``fetch_and_process`` is called with a lock
        """
        events1 = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(100)]
        events2 = [{
            'tenantId': '1234',
            'groupId': 'scal45',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        } for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client,
                                               LOCK_TABLE_NAME,
                                               'schedule',
                                               max_retry=0)

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1, events2], [(['pol44'] * 100, []),
                                                    (['pol45'] * 20, [])])

        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.call_count, 2)
        self.assertEqual(
            self.mock_with_lock.mock_calls,
            [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] *
            2)

    def test_does_nothing_on_no_lock(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock. It
        does not call ``fetch_and_process``
        """
        events1 = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(100)]
        events2 = [{
            'tenantId': '1234',
            'groupId': 'scal45',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        } for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client,
                                               LOCK_TABLE_NAME,
                                               'schedule',
                                               max_retry=0)
        with_lock_impl = lambda *args: defer.fail(
            BusyLockError(LOCK_TABLE_NAME, 'schedule'))
        self.mock_with_lock.side_effect = with_lock_impl

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [], None)
        lock = self.mock_lock.return_value
        self.assertEqual(
            self.mock_with_lock.mock_calls,
            [mock.call(lock, self.scheduler_service.fetch_and_process, 100)])
        self.log.msg.assert_called_once_with(
            "Couldn't get lock to process events",
            reason=CheckFailure(BusyLockError))

    def test_does_nothing_on_no_lock_second_time(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock after
        finishing first batch of 100 events. It does not call ``fetch_and_process`` second time
        """
        events1 = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': None
        } for i in range(100)]
        events2 = [{
            'tenantId': '1234',
            'groupId': 'scal45',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        } for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client,
                                               LOCK_TABLE_NAME,
                                               'schedule',
                                               max_retry=0)

        _with_lock_first_time = [True]

        def _with_lock(lock, func, *args, **kwargs):
            if _with_lock_first_time[0]:
                _with_lock_first_time[0] = False
                return defer.maybeDeferred(func, *args, **kwargs)
            return defer.fail(BusyLockError(LOCK_TABLE_NAME, 'schedule'))

        self.mock_with_lock.side_effect = _with_lock

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1], [(['pol44'] * 100, [])])
        lock = self.mock_lock.return_value
        self.assertEqual(
            self.mock_with_lock.mock_calls,
            [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] *
            2)
        self.log.msg.assert_called_once_with(
            "Couldn't get lock to process events",
            reason=CheckFailure(BusyLockError))

    def test_cron_updates(self):
        """
        The scheduler updates cron events
        """
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': 'c1'
        } for i in range(30)]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_updated_events = []
        for event in events:
            event['trigger'] = 'newtrigger'
            exp_updated_events.append(event)
        self.validate_calls(d, [events], [([], exp_updated_events)])

    def test_cron_updates_and_deletes(self):
        """
        The scheduler updates cron events and deletes at-style events
        """
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': 'c1'
        }, {
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': None
        }, {
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol46',
            'trigger': 'now',
            'cron': 'c2'
        }]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_deleted_events = ['pol45']
        exp_updated_events = []
        for i in [0, 2]:
            event = events[i]
            event['trigger'] = 'newtrigger'
            exp_updated_events.append(event)
        self.validate_calls(d, [events],
                            [(exp_deleted_events, exp_updated_events)])

    def test_nopolicy_or_group_events_deleted(self):
        """
        The scheduler does not update deleted policy/group's (that give NoSuchPolicyError or
        NoSuchScalingGroupError) events (for cron-style events) and deletes them
        """
        events = [{
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol44',
            'trigger': 'now',
            'cron': 'c1'
        }, {
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol45',
            'trigger': 'now',
            'cron': 'c2'
        }, {
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol46',
            'trigger': 'now',
            'cron': 'c3'
        }, {
            'tenantId': '1234',
            'groupId': 'scal44',
            'policyId': 'pol47',
            'trigger': 'now',
            'cron': None
        }]
        self.returns = [events]

        events_indexes = range(len(events))

        def _mock_modify_state(modifier, *args, **kwargs):
            index = events_indexes.pop(0)
            if index == 0:
                return defer.fail(NoSuchPolicyError('1234', 'scal44', 'pol44'))
            if index == 1:
                return defer.fail(NoSuchScalingGroupError('1234', 'scal44'))
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        d = self.scheduler_service.check_for_events(100)

        exp_delete_events = ['pol44', 'pol45', 'pol47']
        events[2]['trigger'] = 'newtrigger'
        exp_update_events = [events[2]]

        # Not using validate_call since maybe_execute_scaling_policy calls do not match
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, 1)
        self.mock_store.update_delete_events.assert_called_once_with(
            exp_delete_events, exp_update_events)
        self.assertEqual(self.mock_group.modify_state.call_count, len(events))
        self.assertEqual(
            self.mock_store.get_scaling_group.call_args_list,
            [mock.call(mock.ANY, e['tenantId'], e['groupId']) for e in events])
コード例 #14
0
ファイル: test_scheduler.py プロジェクト: sharwell/otter
class SchedulerTestCase(TestCase):
    """
    Tests for :mod:`SchedulerService`
    """

    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection, IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.update_delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self, 'otter.scheduler.generate_transaction_id',
            return_value='transaction-id')
        set_store(self.mock_store)
        self.addCleanup(set_store, None)

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.maybe_exec_policy = patch(self, 'otter.scheduler.maybe_execute_scaling_policy')

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, 'otter.scheduler.BasicLock')
        self.mock_with_lock = patch(self, 'otter.scheduler.with_lock')
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()
        self.otter_log = patch(self, 'otter.scheduler.otter_log')

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client, self.clock)

        self.otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.log = self.otter_log.bind.return_value

        self.next_cron_occurrence = patch(self, 'otter.scheduler.next_cron_occurrence')
        self.next_cron_occurrence.return_value = 'newtrigger'

    def validate_calls(self, d, fetch_returns, update_delete_args):
        """
        Validate all the calls made in the service w.r.t to the events
        """
        fetch_call_count = len(fetch_returns)
        events = [event for fetch_return in fetch_returns for event in fetch_return]
        num_events = len(events)
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, fetch_call_count)
        if update_delete_args:
            self.assertEqual(self.mock_store.update_delete_events.call_args_list,
                             [mock.call(delete_events, update_events)
                              for delete_events, update_events in update_delete_args])
        self.assertEqual(self.mock_group.modify_state.call_count, num_events)
        self.assertEqual(self.mock_store.get_scaling_group.call_args_list,
                         [mock.call(mock.ANY, e['tenantId'], e['groupId']) for e in events])
        self.assertEqual(self.maybe_exec_policy.mock_calls,
                         [mock.call(mock.ANY, 'transaction-id', self.mock_group,
                          self.mock_state, policy_id=event['policyId']) for event in events])

    def test_empty(self):
        """
        No policies are executed when ``fetch_batch_of_events`` return empty list
        i.e. no events are there before now
        """
        self.returns = [[]]
        d = self.scheduler_service.check_for_events(100)
        self.validate_calls(d, [[]], None)
        self.assertFalse(self.mock_store.update_delete_events.called)
        self.log.bind.assert_called_once_with(scheduler_run_id=mock.ANY, utcnow=mock.ANY)
        self.log.bind.return_value.msg.assert_called_once_with('Checking for events')

    def test_one(self):
        """
        policy is executed when its corresponding event is there before now
        """
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': None}]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events], [(['pol44'], [])])

    def test_policy_exec_logs(self):
        """
        The scheduler logs `CannotExecutePolicyError` as msg instead of err
        """
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': 'c1'}]
        self.returns = [events]
        self.mock_group.modify_state.side_effect = (
            lambda *_: defer.fail(CannotExecutePolicyError('t', 'g', 'p', 'w')))

        d = self.scheduler_service.check_for_events(100)

        self.assertIsNone(self.successResultOf(d))
        self.log.bind.return_value.bind(tenant_id='1234', scaling_group_id='scal44',
                                        policy_id='pol44')
        self.log.bind.return_value.bind.return_value.msg.assert_has_calls(
            [mock.call('Executing policy'),
             mock.call('Cannot execute policy',
                       reason=CheckFailure(CannotExecutePolicyError))])
        self.assertFalse(self.log.bind.return_value.bind.return_value.err.called)

    def test_many(self):
        """
        Events are fetched and processed as batches of 100. Its corresponding policies
        are executed.
        """
        events1 = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                    'trigger': 'now', 'cron': None} for i in range(100)]
        events2 = [{'tenantId': '1234', 'groupId': 'scal45', 'policyId': 'pol45',
                    'trigger': 'now', 'cron': None} for i in range(100)]
        self.returns = [events1, events2, []]
        fetch_returns = self.returns[:]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, fetch_returns, [(['pol44'] * 100, []), (['pol45'] * 100, [])])

    def test_timer_works(self):
        """
        The scheduler executes every x seconds
        """
        events1 = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                    'trigger': 'now', 'cron': None} for i in range(30)]
        events2 = [{'tenantId': '1234', 'groupId': 'scal45', 'policyId': 'pol45',
                    'trigger': 'now', 'cron': None} for i in range(20)]
        self.returns = [events1, events2]

        # events not fetched before startService
        self.validate_calls(defer.succeed(None), [], None)

        # events fetched after calling startService
        self.scheduler_service.startService()
        self.validate_calls(defer.succeed(None), [events1], [(['pol44'] * 30, [])])

        # events are fetched again after timer expires
        self.clock.advance(1)
        self.validate_calls(defer.succeed(None), [events1, events2],
                            [(['pol44'] * 30, []), (['pol45'] * 20, [])])

    def test_timer_works_on_error(self):
        """
        The scheduler executes every x seconds even if an occurs occurs while fetching events
        """
        # Copy fetch function from setUp and set it to fail
        fetch_func = self.mock_store.fetch_batch_of_events.side_effect
        self.mock_store.fetch_batch_of_events.side_effect = None
        self.mock_store.fetch_batch_of_events.return_value = defer.fail(TimedOutException())

        # Start service and see if update_delete_events got called
        self.scheduler_service.startService()
        self.assertFalse(self.mock_store.update_delete_events.called)

        # fix fetch function and advance clock to see if works next time
        self.mock_store.fetch_batch_of_events.side_effect = fetch_func
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': None} for i in range(30)]
        self.returns = [events]
        self.clock.advance(1)
        self.validate_calls(defer.succeed(None),
                            [[], events],  # first [] to account for failed fetch call
                            [(['pol44'] * 30, [])])

    def test_called_with_lock(self):
        """
        ``fetch_and_process`` is called with a lock
        """
        events1 = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                    'trigger': 'now', 'cron': None} for i in range(100)]
        events2 = [{'tenantId': '1234', 'groupId': 'scal45', 'policyId': 'pol45',
                    'trigger': 'now', 'cron': None} for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1, events2],
                            [(['pol44'] * 100, []), (['pol45'] * 20, [])])

        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.call_count, 2)
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2)

    def test_does_nothing_on_no_lock(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock. It
        does not call ``fetch_and_process``
        """
        events1 = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                    'trigger': 'now', 'cron': None} for i in range(100)]
        events2 = [{'tenantId': '1234', 'groupId': 'scal45', 'policyId': 'pol45',
                    'trigger': 'now', 'cron': None} for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)
        with_lock_impl = lambda *args: defer.fail(BusyLockError(LOCK_TABLE_NAME, 'schedule'))
        self.mock_with_lock.side_effect = with_lock_impl

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [], None)
        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)])
        self.log.msg.assert_called_once_with("Couldn't get lock to process events",
                                             reason=CheckFailure(BusyLockError))

    def test_does_nothing_on_no_lock_second_time(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock after
        finishing first batch of 100 events. It does not call ``fetch_and_process`` second time
        """
        events1 = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                    'trigger': 'now', 'cron': None} for i in range(100)]
        events2 = [{'tenantId': '1234', 'groupId': 'scal45', 'policyId': 'pol45',
                    'trigger': 'now', 'cron': None} for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)

        _with_lock_first_time = [True]

        def _with_lock(lock, func, *args, **kwargs):
            if _with_lock_first_time[0]:
                _with_lock_first_time[0] = False
                return defer.maybeDeferred(func, *args, **kwargs)
            return defer.fail(BusyLockError(LOCK_TABLE_NAME, 'schedule'))

        self.mock_with_lock.side_effect = _with_lock

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1], [(['pol44'] * 100, [])])
        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2)
        self.log.msg.assert_called_once_with("Couldn't get lock to process events",
                                             reason=CheckFailure(BusyLockError))

    def test_cron_updates(self):
        """
        The scheduler updates cron events
        """
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': 'c1'} for i in range(30)]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_updated_events = []
        for event in events:
            event['trigger'] = 'newtrigger'
            exp_updated_events.append(event)
        self.validate_calls(d, [events], [([], exp_updated_events)])

    def test_cron_updates_and_deletes(self):
        """
        The scheduler updates cron events and deletes at-style events
        """
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': 'c1'},
                  {'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol45',
                   'trigger': 'now', 'cron': None},
                  {'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol46',
                   'trigger': 'now', 'cron': 'c2'}]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_deleted_events = ['pol45']
        exp_updated_events = []
        for i in [0, 2]:
            event = events[i]
            event['trigger'] = 'newtrigger'
            exp_updated_events.append(event)
        self.validate_calls(d, [events], [(exp_deleted_events, exp_updated_events)])

    def test_nopolicy_or_group_events_deleted(self):
        """
        The scheduler does not update deleted policy/group's (that give NoSuchPolicyError or
        NoSuchScalingGroupError) events (for cron-style events) and deletes them
        """
        events = [{'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol44',
                   'trigger': 'now', 'cron': 'c1'},
                  {'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol45',
                   'trigger': 'now', 'cron': 'c2'},
                  {'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol46',
                   'trigger': 'now', 'cron': 'c3'},
                  {'tenantId': '1234', 'groupId': 'scal44', 'policyId': 'pol47',
                   'trigger': 'now', 'cron': None}]
        self.returns = [events]

        events_indexes = range(len(events))

        def _mock_modify_state(modifier, *args, **kwargs):
            index = events_indexes.pop(0)
            if index == 0:
                return defer.fail(NoSuchPolicyError('1234', 'scal44', 'pol44'))
            if index == 1:
                return defer.fail(NoSuchScalingGroupError('1234', 'scal44'))
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        d = self.scheduler_service.check_for_events(100)

        exp_delete_events = ['pol44', 'pol45', 'pol47']
        events[2]['trigger'] = 'newtrigger'
        exp_update_events = [events[2]]

        # Not using validate_call since maybe_execute_scaling_policy calls do not match
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, 1)
        self.mock_store.update_delete_events.assert_called_once_with(exp_delete_events,
                                                                     exp_update_events)
        self.assertEqual(self.mock_group.modify_state.call_count, len(events))
        self.assertEqual(self.mock_store.get_scaling_group.call_args_list,
                         [mock.call(mock.ANY, e['tenantId'], e['groupId']) for e in events])
コード例 #15
0
ファイル: api.py プロジェクト: alex/otter
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    # Try to configure graylog and airbrake.

    if config_value('graylog'):
        if GraylogUDPPublisher is not None:
            log.addObserver(
                make_observer_chain(
                    GraylogUDPPublisher(**config_value('graylog')), False))
        else:
            warnings.warn("There is a configuration option for Graylog, but "
                          "txgraylog is not installed.")

    if config_value('airbrake'):
        if AirbrakeLogObserver is not None:
            airbrake = AirbrakeLogObserver(
                config_value('airbrake.api_key'),
                config_value('environment'),
                use_ssl=True
            )

            airbrake.start()
        else:
            warnings.warn("There is a configuration option for Airbrake, but "
                          "txairbrake is not installed.")

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')]

        cassandra_cluster = RoundRobinCassandraCluster(
            seed_endpoints,
            config_value('cassandra.keyspace'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    cache_ttl = config_value('identity.cache_ttl')

    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    authenticator = CachingAuthenticator(
        reactor,
        ImpersonatingAuthenticator(
            config_value('identity.username'),
            config_value('identity.password'),
            config_value('identity.url'),
            config_value('identity.admin_url')),
        cache_ttl)

    supervisor = Supervisor(authenticator.authenticate_tenant)

    set_supervisor(supervisor)

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler'):
        scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')),
                                             int(config_value('scheduler.interval')),
                                             cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
コード例 #16
0
ファイル: test_scheduler.py プロジェクト: MariaAbrahms/otter
class SchedulerServiceTests(SchedulerTests, DeferredFunctionMixin):
    """
    Tests for `SchedulerService`
    """

    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes logging,
        store's fetch_and_delete, TxKazooClient stuff, TimerService, check_events_in_bucket
        and twisted.internet.task.Clock is used to simulate time
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, 'otter.scheduler.otter_log')
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        self.kz_client = mock.Mock(spec=['SetPartitioner'])
        self.kz_partition = mock.MagicMock(allocating=False, release=False, failed=False,
                                           acquired=False)
        self.kz_client.SetPartitioner.return_value = self.kz_partition
        self.zk_partition_path = '/part_path'
        self.time_boundary = 15
        self.buckets = range(1, 10)

        self.clock = Clock()
        self.scheduler_service = SchedulerService(
            100, 1, self.mock_store, self.kz_client, self.zk_partition_path,
            self.time_boundary, self.buckets, self.clock, threshold=600)
        otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.timer_service = patch(self, 'otter.scheduler.TimerService')

        self.check_events_in_bucket = patch(self, 'otter.scheduler.check_events_in_bucket')

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)

    def test_start_service(self):
        """
        startService() calls super's startService() and creates SetPartitioner object
        """
        self.scheduler_service.startService()
        self.kz_client.SetPartitioner.assert_called_once_with(
            self.zk_partition_path, set=set(self.buckets), time_boundary=self.time_boundary)
        self.assertEqual(self.scheduler_service.kz_partition, self.kz_partition)
        self.timer_service.startService.assert_called_once_with(self.scheduler_service)

    def test_stop_service(self):
        """
        stopService() calls super's stopService() and stops the allocation if it
        is already acquired
        """
        self.scheduler_service.startService()
        self.kz_partition.acquired = True
        d = self.scheduler_service.stopService()
        self.timer_service.stopService.assert_called_once_with(self.scheduler_service)
        self.kz_partition.finish.assert_called_once_with()
        self.assertEqual(self.kz_partition.finish.return_value, d)

    def test_health_check_after_threshold(self):
        """
        `service.health_check` returns False when trigger time is above threshold
        """
        self.kz_partition.acquired = True
        self.scheduler_service.startService()
        self.kz_partition.__iter__.return_value = [2, 3]
        now = datetime.utcnow()
        returns = [{'trigger': now - timedelta(hours=1), 'version': 'v1'},
                   {'trigger': now - timedelta(seconds=2), 'version': 'v1'}]
        self.returns = returns[:]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {'old_events': [returns[0]],
                                                           'buckets': [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_before_threshold(self):
        """
        `service.health_check` returns True when trigger time is below threshold
        """
        self.kz_partition.acquired = True
        self.scheduler_service.startService()
        self.kz_partition.__iter__.return_value = [2, 3]
        now = datetime.utcnow()
        self.returns = [{'trigger': now + timedelta(hours=1), 'version': 'v1'},
                        {'trigger': now + timedelta(seconds=2), 'version': 'v1'}]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {'old_events': [],
                                                          'buckets': [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_None(self):
        """
        `service.health_check` returns True when there are no triggers
        """
        self.kz_partition.acquired = True
        self.scheduler_service.startService()
        self.kz_partition.__iter__.return_value = [2, 3]
        self.returns = [None, None]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {'old_events': [],
                                                          'buckets': [2, 3]}))
        self.mock_store.get_oldest_event.assert_has_calls([mock.call(2), mock.call(3)])

    def test_health_check_not_acquired(self):
        """
        `service.health_check` returns False when partition is not acquired
        """
        self.kz_partition.acquired = False
        self.scheduler_service.startService()
        self.kz_partition.__iter__.return_value = [2, 3]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {'reason': 'Not acquired'}))
        self.assertFalse(self.mock_store.get_oldest_event.called)

    def test_stop_service_allocating(self):
        """
        stopService() does not stop the allocation (i.e. call finish) if it is not acquired
        """
        self.scheduler_service.startService()
        d = self.scheduler_service.stopService()
        self.assertFalse(self.kz_partition.finish.called)
        self.assertIsNone(d)

    def test_reset(self):
        """
        reset() starts new partition based on new path
        """
        self.scheduler_service.reset('/new_path')
        self.assertEqual(self.scheduler_service.zk_partition_path, '/new_path')
        self.kz_client.SetPartitioner.assert_called_once_with(
            '/new_path', set=set(self.buckets), time_boundary=self.time_boundary)
        self.assertEqual(self.scheduler_service.kz_partition,
                         self.kz_client.SetPartitioner.return_value)

    def test_reset_same_path(self):
        """
        reset() raises error on same path
        """
        self.assertRaises(ValueError, self.scheduler_service.reset, '/part_path')
        self.assertFalse(self.kz_client.SetPartitioner.called)

    def test_check_events_allocating(self):
        """
        `check_events` logs message and does not check events in buckets when
        buckets are still allocating
        """
        self.kz_partition.allocating = True
        self.scheduler_service.startService()
        self.scheduler_service.check_events(100)
        self.log.msg.assert_called_with('Partition allocating')

        # Ensure others are not called
        self.assertFalse(self.kz_partition.__iter__.called)
        self.assertFalse(self.check_events_in_bucket.called)

    def test_check_events_release(self):
        """
        `check_events` logs message and does not check events in buckets when
        partitioning has changed. It calls release_set() to re-partition
        """
        self.kz_partition.release = True
        self.scheduler_service.startService()
        self.scheduler_service.check_events(100)
        self.log.msg.assert_called_with('Partition changed. Repartitioning')
        self.kz_partition.release_set.assert_called_once_with()

        # Ensure others are not called
        self.assertFalse(self.kz_partition.__iter__.called)
        self.assertFalse(self.check_events_in_bucket.called)

    def test_check_events_failed(self):
        """
        `check_events` logs message and does not check events in buckets when
        partitioning has failed. It creates a new partition
        """
        self.kz_partition.failed = True
        self.scheduler_service.startService()

        # after starting change SetPartitioner return value to check if
        # new value is set in self.scheduler_service.kz_partition
        new_kz_partition = mock.MagicMock()
        self.kz_client.SetPartitioner.return_value = new_kz_partition

        self.scheduler_service.check_events(100)
        self.log.msg.assert_called_with('Partition failed. Starting new')

        # Called once when starting and now again when partition failed
        self.assertEqual(self.kz_client.SetPartitioner.call_args_list,
                         [mock.call(self.zk_partition_path, set=set(self.buckets),
                                    time_boundary=self.time_boundary)] * 2)
        self.assertEqual(self.scheduler_service.kz_partition, new_kz_partition)

        # Ensure others are not called
        self.assertFalse(self.kz_partition.__iter__.called)
        self.assertFalse(new_kz_partition.__iter__.called)
        self.assertFalse(self.check_events_in_bucket.called)

    def test_check_events_bad_state(self):
        """
        `self.kz_partition.state` is none of the exepected values. `check_events`
        logs it as err and starts a new partition
        """
        self.kz_partition.state = 'bad'
        self.scheduler_service.startService()

        # after starting change SetPartitioner return value to check if
        # new value is set in self.scheduler_service.kz_partition
        new_kz_partition = mock.MagicMock()
        self.kz_client.SetPartitioner.return_value = new_kz_partition

        self.scheduler_service.check_events(100)

        self.log.err.assert_called_with('Unknown state bad. This cannot happen. Starting new')
        self.kz_partition.finish.assert_called_once_with()

        # Called once when starting and now again when got bad state
        self.assertEqual(self.kz_client.SetPartitioner.call_args_list,
                         [mock.call(self.zk_partition_path, set=set(self.buckets),
                                    time_boundary=self.time_boundary)] * 2)
        self.assertEqual(self.scheduler_service.kz_partition, new_kz_partition)

        # Ensure others are not called
        self.assertFalse(self.kz_partition.__iter__.called)
        self.assertFalse(new_kz_partition.__iter__.called)
        self.assertFalse(self.check_events_in_bucket.called)

    @mock.patch('otter.scheduler.datetime')
    def test_check_events_acquired(self, mock_datetime):
        """
        `check_events` checks events in each bucket when they are partitoned.
        """
        self.kz_partition.acquired = True
        self.scheduler_service.startService()
        self.kz_partition.__iter__.return_value = [2, 3]
        self.scheduler_service.log = mock.Mock()
        mock_datetime.utcnow.return_value = 'utcnow'

        responses = [4, 5]
        self.check_events_in_bucket.side_effect = lambda *_: defer.succeed(responses.pop(0))

        d = self.scheduler_service.check_events(100)

        self.assertEqual(self.successResultOf(d), [4, 5])
        self.assertEqual(self.kz_partition.__iter__.call_count, 1)
        self.scheduler_service.log.bind.assert_called_once_with(
            scheduler_run_id='transaction-id', utcnow='utcnow')
        log = self.scheduler_service.log.bind.return_value
        log.msg.assert_called_once_with('Got buckets {buckets}',
                                        buckets=[2, 3], path='/part_path')
        self.assertEqual(self.check_events_in_bucket.mock_calls,
                         [mock.call(log, self.mock_store, 2, 'utcnow', 100),
                          mock.call(log, self.mock_store, 3, 'utcnow', 100)])
コード例 #17
0
ファイル: test_scheduler.py プロジェクト: alex/otter
class SchedulerTestCase(DeferredTestMixin, TestCase):
    """
    Tests for :mod:`SchedulerService`
    """

    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection, IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self, 'otter.scheduler.generate_transaction_id',
            return_value='transaction-id')
        set_store(self.mock_store)
        self.addCleanup(set_store, None)

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.mock_log = mock.MagicMock()

        self.mock_controller = patch(self, 'otter.scheduler.controller')

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, 'otter.scheduler.BasicLock')
        self.mock_with_lock = patch(self, 'otter.scheduler.with_lock')
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client, self.clock)

    def validate_calls(self, d, fetch_call_count, events):
        """
        Validate all the calls made in the service w.r.t to the events
        """
        num_events = len(events)
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, fetch_call_count)
        self.assertEqual(self.mock_group.modify_state.call_count, num_events)
        self.assertEqual(self.mock_store.get_scaling_group.call_args_list,
                         [mock.call(mock.ANY, tid, gid) for tid, gid, pid, t in events])
        self.assertEqual(self.mock_controller.maybe_execute_scaling_policy.mock_calls,
                         [mock.call(mock.ANY, 'transaction-id', self.mock_group,
                          self.mock_state, policy_id=policy_id)
                         for tid, gid, policy_id, t in events])

    def test_empty(self):
        """
        No policies are executed when ``fetch_batch_of_events`` return empty list
        i.e. no events are there before now
        """
        self.returns = [[]]
        d = self.scheduler_service.check_for_events(100)
        self.validate_calls(d, 1, [])

    def test_one(self):
        """
        policy is executed when its corresponding event is there before now
        """
        events = [('1234', 'scal44', 'pol44', 'now')]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, 1, events)

    def test_many(self):
        """
        Events are fetched and processed as batches of 100. Its corresponding policies
        are executed.
        """
        events1 = [('1234', 'scal44', 'pol44', 'now') for i in range(100)]
        events2 = [('1234', 'scal44', 'pol45', 'now') for i in range(100)]
        self.returns = [events1, events2, []]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, 3, events1 + events2)

    def test_timer_works(self):
        """
        The scheduler executes every x seconds
        """
        events1 = [('1234', 'scal44', 'pol44', 'now') for i in range(10)]
        events2 = [('1234', 'scal44', 'pol45', 'now') for i in range(20)]
        self.returns = [events1, events2]

        # events not fetched before startService
        self.validate_calls(defer.succeed(None), 0, [])

        # events fetched after calling startService
        self.scheduler_service.startService()
        self.validate_calls(defer.succeed(None), 1, events1)

        # events are fetched again after timer expires
        self.clock.advance(1)
        self.validate_calls(defer.succeed(None), 2, events1 + events2)

    def test_called_with_lock(self):
        """
        ``fetch_and_process`` is called with a lock
        """
        events1 = [('1234', 'scal44', 'pol44', 'now') for i in range(100)]
        events2 = [('1234', 'scal44', 'pol45', 'now') for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, 2, events1 + events2)

        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.call_count, 2)
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2)

    def test_does_nothing_on_no_lock(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock. It
        does not call ``fetch_and_process``
        """
        events1 = [('1234', 'scal44', 'pol44', 'now') for i in range(100)]
        events2 = [('1234', 'scal44', 'pol45', 'now') for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)

        with_lock_impl = lambda *args: defer.fail(BusyLockError(LOCK_TABLE_NAME, 'schedule'))
        self.mock_with_lock.side_effect = with_lock_impl

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, 0, [])
        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)])

    def test_does_nothing_on_no_lock_second_time(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock after
        finishing first batch of 100 events. It does not call ``fetch_and_process`` second time
        """
        events1 = [('1234', 'scal44', 'pol44', 'now') for i in range(100)]
        events2 = [('1234', 'scal44', 'pol45', 'now') for i in range(20)]
        self.returns = [events1, events2]

        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, 'schedule',
                                               max_retry=0)

        _with_lock_first_time = [True]

        def _with_lock(lock, func, *args, **kwargs):
            if _with_lock_first_time[0]:
                _with_lock_first_time[0] = False
                return defer.maybeDeferred(func, *args, **kwargs)
            raise BusyLockError(LOCK_TABLE_NAME, 'schedule')

        self.mock_with_lock.side_effect = _with_lock

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, 1, events1)
        lock = self.mock_lock.return_value
        self.assertEqual(self.mock_with_lock.mock_calls,
                         [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2)
コード例 #18
0
ファイル: test_scheduler.py プロジェクト: stephamon/otter
class SchedulerServiceTests(SchedulerTests, DeferredFunctionMixin):
    """
    Tests for `SchedulerService`.
    """
    def setUp(self):
        """
        Mock all the dependencies of SchedulingService.

        This includes logging, store's fetch_and_delete, TxKazooClient stuff,
        check_events_in_bucket.
        """
        super(SchedulerServiceTests, self).setUp()

        otter_log = patch(self, 'otter.scheduler.otter_log')
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        def pfactory(log, callable):
            self.fake_partitioner = FakePartitioner(log, callable)
            return self.fake_partitioner

        self.scheduler_service = SchedulerService("disp",
                                                  100,
                                                  self.mock_store,
                                                  pfactory,
                                                  threshold=600)
        otter_log.bind.assert_called_once_with(system='otter.scheduler')
        self.scheduler_service.running = True
        self.assertIdentical(self.fake_partitioner,
                             self.scheduler_service.partitioner)

        self.check_events_in_bucket = patch(
            self, 'otter.scheduler.check_events_in_bucket')

        self.returns = []
        self.setup_func(self.mock_store.get_oldest_event)

    def test_partitioner_child(self):
        """
        The Partitioner service is registered as a child of the
        SchedulerService.
        """
        self.assertEqual(self.scheduler_service.services,
                         [self.fake_partitioner])

    def test_health_check_after_threshold(self):
        """
        `service.health_check` returns False when trigger time is above
        threshold.
        """
        self.fake_partitioner.health = (True, {'buckets': [2, 3]})
        now = datetime.utcnow()
        returns = [{
            'trigger': now - timedelta(hours=1),
            'version': 'v1'
        }, {
            'trigger': now - timedelta(seconds=2),
            'version': 'v1'
        }]
        self.returns = returns[:]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {
            'old_events': [returns[0]],
            'buckets': [2, 3]
        }))
        self.mock_store.get_oldest_event.assert_has_calls(
            [mock.call(2), mock.call(3)])

    def test_health_check_before_threshold(self):
        """
        `service.health_check` returns True when trigger time is below
        threshold.
        """
        self.fake_partitioner.health = (True, {'buckets': [2, 3]})
        now = datetime.utcnow()
        self.returns = [{
            'trigger': now + timedelta(hours=1),
            'version': 'v1'
        }, {
            'trigger': now + timedelta(seconds=2),
            'version': 'v1'
        }]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {
            'old_events': [],
            'buckets': [2, 3]
        }))
        self.mock_store.get_oldest_event.assert_has_calls(
            [mock.call(2), mock.call(3)])

    def test_health_check_None(self):
        """
        `service.health_check` returns True when there are no triggers.
        """
        self.fake_partitioner.health = (True, {'buckets': [2, 3]})
        self.returns = [None, None]

        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (True, {
            'old_events': [],
            'buckets': [2, 3]
        }))
        self.mock_store.get_oldest_event.assert_has_calls(
            [mock.call(2), mock.call(3)])

    def test_health_check_unhealthy_partitioner(self):
        """
        When the partitioner service is unhealthy, the scheduler service passes
        its health message through.
        """
        self.fake_partitioner.health = (False, {'foo': 'bar'})
        d = self.scheduler_service.health_check()
        self.assertEqual(self.successResultOf(d), (False, {'foo': 'bar'}))

    def test_health_check_not_running(self):
        """
        `service.health_check` returns False when scheduler is stopped.
        """
        self.scheduler_service.running = False
        d = self.scheduler_service.health_check()

        self.assertEqual(self.successResultOf(d), (False, {
            'reason': 'Not running'
        }))
        self.assertFalse(self.mock_store.get_oldest_event.called)

    def test_reset(self):
        """
        reset() starts new partition based on new path.
        """
        self.assertEqual(self.scheduler_service.reset('/new_path'),
                         'partitioner reset to /new_path')

    @mock.patch('otter.scheduler.datetime')
    def test_check_events_acquired(self, mock_datetime):
        """
        the got_buckets callback checks events in each bucket when they are
        partitoned.
        """
        self.scheduler_service.log = mock.Mock()
        mock_datetime.utcnow.return_value = 'utcnow'

        responses = [4, 5]
        self.check_events_in_bucket.side_effect = \
            lambda *_: defer.succeed(responses.pop(0))

        d = self.fake_partitioner.got_buckets([2, 3])

        self.assertEqual(self.successResultOf(d), [4, 5])
        self.scheduler_service.log.bind.assert_called_once_with(
            scheduler_run_id='transaction-id', utcnow='utcnow')
        log = self.scheduler_service.log.bind.return_value
        self.assertEqual(self.check_events_in_bucket.mock_calls, [
            mock.call(log, "disp", self.mock_store, 2, 'utcnow', 100),
            mock.call(log, "disp", self.mock_store, 3, 'utcnow', 100)
        ])
コード例 #19
0
class SchedulerTestCase(TestCase):
    """
    Tests for :mod:`SchedulerService`
    """

    def setUp(self):
        """
        mock all the dependencies of SchedulingService that includes cass store,
        store's fetch and delete events methods, scaling group on which controller
        will execute scaling policy. Hence, controller.maybe_execute_scaling_policy.
        twisted.internet.task.Clock is used to simulate time
        """

        self.mock_store = iMock(IScalingGroupCollection, IScalingScheduleCollection)
        self.mock_group = iMock(IScalingGroup)
        self.mock_store.get_scaling_group.return_value = self.mock_group

        self.returns = [None]

        def _responses(*args):
            result = self.returns.pop(0)
            if isinstance(result, Exception):
                return defer.fail(result)
            return defer.succeed(result)

        self.mock_store.fetch_batch_of_events.side_effect = _responses

        self.mock_store.update_delete_events.return_value = defer.succeed(None)

        self.mock_generate_transaction_id = patch(
            self, "otter.scheduler.generate_transaction_id", return_value="transaction-id"
        )

        # mock out modify state
        self.mock_state = mock.MagicMock(spec=[])  # so nothing can call it

        def _mock_modify_state(modifier, *args, **kwargs):
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        self.maybe_exec_policy = patch(self, "otter.scheduler.maybe_execute_scaling_policy")

        def _mock_with_lock(lock, func, *args, **kwargs):
            return defer.maybeDeferred(func, *args, **kwargs)

        self.mock_lock = patch(self, "otter.scheduler.BasicLock")
        self.mock_with_lock = patch(self, "otter.scheduler.with_lock")
        self.mock_with_lock.side_effect = _mock_with_lock
        self.slv_client = mock.MagicMock()

        otter_log = patch(self, "otter.scheduler.otter_log")
        self.log = mock_log()
        otter_log.bind.return_value = self.log

        self.clock = Clock()
        self.scheduler_service = SchedulerService(100, 1, self.slv_client, self.mock_store, self.clock)

        otter_log.bind.assert_called_once_with(system="otter.scheduler")

        self.next_cron_occurrence = patch(self, "otter.scheduler.next_cron_occurrence")
        self.next_cron_occurrence.return_value = "newtrigger"

    def validate_calls(self, d, fetch_returns, update_delete_args):
        """
        Validate all the calls made in the service w.r.t to the events
        """
        fetch_call_count = len(fetch_returns)
        events = [event for fetch_return in fetch_returns for event in fetch_return]
        num_events = len(events)
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, fetch_call_count)
        if update_delete_args:
            self.assertEqual(
                self.mock_store.update_delete_events.call_args_list,
                [mock.call(delete_events, update_events) for delete_events, update_events in update_delete_args],
            )
        self.assertEqual(self.mock_group.modify_state.call_count, num_events)
        self.assertEqual(
            self.mock_store.get_scaling_group.call_args_list,
            [mock.call(mock.ANY, e["tenantId"], e["groupId"]) for e in events],
        )
        self.assertEqual(
            self.maybe_exec_policy.mock_calls,
            [
                mock.call(mock.ANY, "transaction-id", self.mock_group, self.mock_state, policy_id=event["policyId"])
                for event in events
            ],
        )

    @mock.patch("otter.scheduler.generate_transaction_id", return_value="transid")
    @mock.patch("otter.scheduler.datetime", spec=["utcnow"])
    def test_empty(self, mock_datetime, mock_gentransid):
        """
        No policies are executed when ``fetch_batch_of_events`` return empty list
        i.e. no events are there before now
        """
        mock_datetime.utcnow.return_value = datetime(2012, 10, 10, 03, 20, 30, 0, None)
        self.returns = [[]]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [[]], None)
        self.assertFalse(self.mock_store.update_delete_events.called)
        self.assertFalse(self.log.msg.called)

    def test_one(self):
        """
        policy is executed when its corresponding event is there before now
        """
        events = [{"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events], [(["pol44"], [])])

    def test_logging(self):
        """
        All the necessary messages are logged
        """
        events = [{"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}]
        self.returns = [events]

        self.scheduler_service.check_for_events(100)

        calls = [
            ("Processing {num_events} events", dict(num_events=1)),
            ("Executing policy", dict(tenant_id="1234", policy_id="pol44", scaling_group_id="scal44")),
            ("Deleting {policy_ids_deleting} events", dict(policy_ids_deleting=1)),
            ("Updating {policy_ids_updating} events", dict(policy_ids_updating=0)),
        ]

        self.log.msg.assert_has_calls(
            [mock.call(msg, scheduler_run_id="transaction-id", utcnow=mock.ANY, **kwargs) for msg, kwargs in calls]
        )

    def test_policy_exec_err_logs(self):
        """
        The scheduler logs `CannotExecutePolicyError` as msg instead of err
        """
        events = [{"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": "c1"}]
        self.returns = [events]
        self.mock_group.modify_state.side_effect = lambda *_: defer.fail(CannotExecutePolicyError("t", "g", "p", "w"))

        d = self.scheduler_service.check_for_events(100)

        self.assertIsNone(self.successResultOf(d))
        kwargs = dict(
            scheduler_run_id="transaction-id",
            utcnow=mock.ANY,
            tenant_id="1234",
            scaling_group_id="scal44",
            policy_id="pol44",
        )
        self.assertEqual(
            self.log.msg.mock_calls[2],
            mock.call("Cannot execute policy", reason=CheckFailure(CannotExecutePolicyError), **kwargs),
        )
        self.assertFalse(self.log.err.called)

    def test_many(self):
        """
        Events are fetched and processed as batches of 100. Its corresponding policies
        are executed.
        """
        events1 = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(100)
        ]
        events2 = [
            {"tenantId": "1234", "groupId": "scal45", "policyId": "pol45", "trigger": "now", "cron": None}
            for i in range(100)
        ]
        self.returns = [events1, events2, []]
        fetch_returns = self.returns[:]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, fetch_returns, [(["pol44"] * 100, []), (["pol45"] * 100, [])])

    def test_timer_works(self):
        """
        The scheduler executes every x seconds
        """
        events1 = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(30)
        ]
        events2 = [
            {"tenantId": "1234", "groupId": "scal45", "policyId": "pol45", "trigger": "now", "cron": None}
            for i in range(20)
        ]
        self.returns = [events1, events2]

        # events not fetched before startService
        self.validate_calls(defer.succeed(None), [], None)

        # events fetched after calling startService
        self.scheduler_service.startService()
        self.validate_calls(defer.succeed(None), [events1], [(["pol44"] * 30, [])])

        # events are fetched again after timer expires
        self.clock.advance(1)
        self.validate_calls(defer.succeed(None), [events1, events2], [(["pol44"] * 30, []), (["pol45"] * 20, [])])

    def test_timer_works_on_error(self):
        """
        The scheduler executes every x seconds even if an occurs occurs while fetching events
        """
        # Copy fetch function from setUp and set it to fail
        fetch_func = self.mock_store.fetch_batch_of_events.side_effect
        self.mock_store.fetch_batch_of_events.side_effect = None
        self.mock_store.fetch_batch_of_events.return_value = defer.fail(TimedOutException())

        # Start service and see if update_delete_events got called
        self.scheduler_service.startService()
        self.assertFalse(self.mock_store.update_delete_events.called)

        # fix fetch function and advance clock to see if works next time
        self.mock_store.fetch_batch_of_events.side_effect = fetch_func
        events = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(30)
        ]
        self.returns = [events]
        self.clock.advance(1)
        self.validate_calls(
            defer.succeed(None), [[], events], [(["pol44"] * 30, [])]  # first [] to account for failed fetch call
        )

    def test_called_with_lock(self):
        """
        ``fetch_and_process`` is called with a lock
        """
        events1 = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(100)
        ]
        events2 = [
            {"tenantId": "1234", "groupId": "scal45", "policyId": "pol45", "trigger": "now", "cron": None}
            for i in range(20)
        ]
        self.returns = [events1, events2]

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1, events2], [(["pol44"] * 100, []), (["pol45"] * 20, [])])

        self.assertEqual(
            self.mock_lock.mock_calls,
            [mock.call(self.slv_client, LOCK_TABLE_NAME, "schedule", max_retry=0, log=mock.ANY)] * 2,
        )
        lock = self.mock_lock.return_value
        self.assertEqual(
            self.mock_with_lock.mock_calls, [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2
        )

    def test_does_nothing_on_no_lock(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock. It
        does not call ``fetch_and_process``
        """
        events1 = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(100)
        ]
        events2 = [
            {"tenantId": "1234", "groupId": "scal45", "policyId": "pol45", "trigger": "now", "cron": None}
            for i in range(20)
        ]
        self.returns = [events1, events2]

        with_lock_impl = lambda *args: defer.fail(BusyLockError(LOCK_TABLE_NAME, "schedule"))
        self.mock_with_lock.side_effect = with_lock_impl

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [], None)
        self.mock_lock.assert_called_once_with(self.slv_client, LOCK_TABLE_NAME, "schedule", max_retry=0, log=mock.ANY)
        lock = self.mock_lock.return_value
        self.mock_with_lock.assert_called_once_with(lock, self.scheduler_service.fetch_and_process, 100)
        self.log.msg.assert_called_once_with(
            "Couldn't get lock to process events", reason=CheckFailure(BusyLockError), category="locking"
        )

    def test_does_nothing_on_no_lock_second_time(self):
        """
        ``check_for_events`` gracefully does nothing when it does not get a lock after
        finishing first batch of 100 events. It does not call ``fetch_and_process`` second time
        """
        events1 = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": None}
            for i in range(100)
        ]
        events2 = [
            {"tenantId": "1234", "groupId": "scal45", "policyId": "pol45", "trigger": "now", "cron": None}
            for i in range(20)
        ]
        self.returns = [events1, events2]

        _with_lock_first_time = [True]

        def _with_lock(lock, func, *args, **kwargs):
            if _with_lock_first_time[0]:
                _with_lock_first_time[0] = False
                return defer.maybeDeferred(func, *args, **kwargs)
            return defer.fail(BusyLockError(LOCK_TABLE_NAME, "schedule"))

        self.mock_with_lock.side_effect = _with_lock

        d = self.scheduler_service.check_for_events(100)

        self.validate_calls(d, [events1], [(["pol44"] * 100, [])])
        self.assertEqual(
            self.mock_lock.mock_calls,
            [mock.call(self.slv_client, LOCK_TABLE_NAME, "schedule", max_retry=0, log=mock.ANY)] * 2,
        )
        lock = self.mock_lock.return_value
        self.assertEqual(
            self.mock_with_lock.mock_calls, [mock.call(lock, self.scheduler_service.fetch_and_process, 100)] * 2
        )
        self.log.msg.assert_called_with(
            "Couldn't get lock to process events", reason=CheckFailure(BusyLockError), category="locking"
        )

    def test_cron_updates(self):
        """
        The scheduler updates cron events
        """
        events = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": "c1"}
            for i in range(30)
        ]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_updated_events = []
        for event in events:
            event["trigger"] = "newtrigger"
            exp_updated_events.append(event)
        self.validate_calls(d, [events], [([], exp_updated_events)])

    def test_cron_updates_and_deletes(self):
        """
        The scheduler updates cron events and deletes at-style events
        """
        events = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": "c1"},
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol45", "trigger": "now", "cron": None},
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol46", "trigger": "now", "cron": "c2"},
        ]
        self.returns = [events]

        d = self.scheduler_service.check_for_events(100)

        exp_deleted_events = ["pol45"]
        exp_updated_events = []
        for i in [0, 2]:
            event = events[i]
            event["trigger"] = "newtrigger"
            exp_updated_events.append(event)
        self.validate_calls(d, [events], [(exp_deleted_events, exp_updated_events)])

    def test_nopolicy_or_group_events_deleted(self):
        """
        The scheduler does not update deleted policy/group's (that give NoSuchPolicyError or
        NoSuchScalingGroupError) events (for cron-style events) and deletes them
        """
        events = [
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": "c1"},
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol45", "trigger": "now", "cron": "c2"},
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol46", "trigger": "now", "cron": "c3"},
            {"tenantId": "1234", "groupId": "scal44", "policyId": "pol47", "trigger": "now", "cron": None},
        ]
        self.returns = [events]

        events_indexes = range(len(events))

        def _mock_modify_state(modifier, *args, **kwargs):
            index = events_indexes.pop(0)
            if index == 0:
                return defer.fail(NoSuchPolicyError("1234", "scal44", "pol44"))
            if index == 1:
                return defer.fail(NoSuchScalingGroupError("1234", "scal44"))
            modifier(self.mock_group, self.mock_state, *args, **kwargs)
            return defer.succeed(None)

        self.mock_group.modify_state.side_effect = _mock_modify_state

        d = self.scheduler_service.check_for_events(100)

        exp_delete_events = ["pol44", "pol45", "pol47"]
        events[2]["trigger"] = "newtrigger"
        exp_update_events = [events[2]]

        # Not using validate_call since maybe_execute_scaling_policy calls do not match
        self.assertIsNone(self.successResultOf(d))
        self.assertEqual(self.mock_store.fetch_batch_of_events.call_count, 1)
        self.mock_store.update_delete_events.assert_called_once_with(exp_delete_events, exp_update_events)
        self.assertEqual(self.mock_group.modify_state.call_count, len(events))
        self.assertEqual(
            self.mock_store.get_scaling_group.call_args_list,
            [mock.call(mock.ANY, e["tenantId"], e["groupId"]) for e in events],
        )

    def test_exec_event_logs(self):
        """
        `execute_event` logs error with all the ids bound
        """
        log = mock_log()
        log.err.return_value = None
        event = {"tenantId": "1234", "groupId": "scal44", "policyId": "pol44", "trigger": "now", "cron": "c1"}
        self.mock_group.modify_state.side_effect = lambda *_: defer.fail(ValueError("meh"))

        d = self.scheduler_service.execute_event(log, event, mock.Mock())

        self.assertIsNone(self.successResultOf(d))
        log.err.assert_called_once_with(
            CheckFailure(ValueError),
            "Scheduler failed to execute policy",
            tenant_id="1234",
            scaling_group_id="scal44",
            policy_id="pol44",
        )