Esempio n. 1
0
def execute_event(store, log, event, deleted_policy_ids):
    """
    Execute a single event

    :param store: `IScalingGroupCollection` provider
    :param log: A bound log for logging
    :param event: event dict to execute
    :param deleted_policy_ids: Set of policy ids that are deleted. Policy id will be added
                               to this if its scaling group or policy has been deleted
    :return: a deferred with None. Any error occurred during execution is logged
    """
    tenant_id, group_id, policy_id = event['tenantId'], event['groupId'], event['policyId']
    log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id)
    log.msg('Scheduler executing policy {policy_id}')
    group = store.get_scaling_group(log, tenant_id, group_id)
    d = group.modify_state(partial(maybe_execute_scaling_policy,
                                   log, generate_transaction_id(),
                                   policy_id=policy_id, version=event['version']))
    d.addErrback(ignore_and_log, CannotExecutePolicyError,
                 log, 'Scheduler cannot execute policy {policy_id}')

    def collect_deleted_policy(failure):
        failure.trap(NoSuchScalingGroupError, NoSuchPolicyError)
        deleted_policy_ids.add(policy_id)

    d.addErrback(collect_deleted_policy)
    d.addErrback(log.err, 'Scheduler failed to execute policy {policy_id}')
    return d
    def execute_event(self, log, event, deleted_policy_ids):
        """
        Execute a single event

        :param log: A bound log for logging
        :param event: event dict to execute
        :param deleted_policy_ids: Set of policy ids that are deleted. Policy id will be added
                                   to this if its scaling group or policy has been deleted
        :return: a deferred with the results of execution
        """
        tenant_id, group_id, policy_id = event['tenantId'], event['groupId'], event['policyId']
        log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id)
        log.msg('Executing policy')
        group = get_store().get_scaling_group(log, tenant_id, group_id)
        d = group.modify_state(partial(maybe_execute_scaling_policy,
                                       log, generate_transaction_id(),
                                       policy_id=policy_id))
        d.addErrback(ignore_and_log, CannotExecutePolicyError, log, 'Cannot execute policy')

        def collect_deleted_policy(failure):
            failure.trap(NoSuchScalingGroupError, NoSuchPolicyError)
            deleted_policy_ids.add(policy_id)

        d.addErrback(collect_deleted_policy)
        d.addErrback(log.err, 'Scheduler failed to execute policy')
        return d
Esempio n. 3
0
    def check_events(self, batchsize):
        """
        Check for events occurring now and earlier
        """
        if self.kz_partition.allocating:
            self.log.msg('Partition allocating')
            return
        if self.kz_partition.release:
            self.log.msg('Partition changed. Repartitioning')
            return self.kz_partition.release_set()
        if self.kz_partition.failed:
            self.log.msg('Partition failed. Starting new')
            self.kz_partition = self.kz_client.SetPartitioner(
                self.zk_partition_path, set=set(self.buckets),
                time_boundary=self.time_boundary)
            return
        if not self.kz_partition.acquired:
            self.log.err('Unknown state {}. This cannot happen. Starting new'.format(
                self.kz_partition.state))
            self.kz_partition.finish()
            self.kz_partition = self.kz_client.SetPartitioner(
                self.zk_partition_path, set=set(self.buckets),
                time_boundary=self.time_boundary)
            return

        buckets = list(self.kz_partition)
        utcnow = datetime.utcnow()
        log = self.log.bind(scheduler_run_id=generate_transaction_id(), utcnow=utcnow)
        # TODO: This log might feel like spam since it'll occur on every tick. But
        # it'll be useful to debug partitioning problems (at least in initial deployment)
        log.msg('Got buckets {buckets}', buckets=buckets, path=self.zk_partition_path)

        return defer.gatherResults(
            [check_events_in_bucket(
                log, self.store, bucket, utcnow, batchsize) for bucket in buckets])
Esempio n. 4
0
    def fetch_and_process(self, batchsize):
        """
        Fetch the events to be processed and process them.
        Also delete/update after processing them

        :return: a deferred that fires with list of events processed
        """
        def process_events(events):

            if not len(events):
                return events, set()

            log.msg('Processing events', num_events=len(events))

            deleted_policy_ids = set()

            def eb(failure, policy_id):
                failure.trap(NoSuchPolicyError, NoSuchScalingGroupError)
                deleted_policy_ids.add(policy_id)

            deferreds = [
                self.execute_event(log, event).addErrback(eb, event['policyId']).addErrback(log.err)
                for event in events
            ]
            d = defer.gatherResults(deferreds, consumeErrors=True)
            return d.addCallback(lambda _: (events, deleted_policy_ids))

        def update_delete_events((events, deleted_policy_ids)):
            """
            Update events with cron entry with next trigger time
            Delete other events
            """
            if not len(events):
                return events

            events_to_delete, events_to_update = [], []
            for event in events:
                if event['cron'] and event['policyId'] not in deleted_policy_ids:
                    event['trigger'] = next_cron_occurrence(event['cron'])
                    events_to_update.append(event)
                else:
                    events_to_delete.append(event['policyId'])

            log.msg('Deleting events', num_policy_ids_deleting=len(events_to_delete))
            log.msg('Updating events', num_policy_ids_updating=len(events_to_update))
            d = get_store().update_delete_events(events_to_delete, events_to_update)

            return d.addCallback(lambda _: events)

        # utcnow because of cass serialization issues
        utcnow = datetime.utcnow()
        log = self.log.bind(scheduler_run_id=generate_transaction_id(), utcnow=utcnow)
        log.msg('Checking for events')
        deferred = get_store().fetch_batch_of_events(utcnow, batchsize)
        deferred.addCallback(process_events)
        deferred.addCallback(update_delete_events)
        deferred.addErrback(log.err)
        return deferred
Esempio n. 5
0
    def _check_events(self, batchsize, buckets):
        """
        Check for events occurring now and earlier
        """
        utcnow = datetime.utcnow()
        log = self.log.bind(scheduler_run_id=generate_transaction_id(), utcnow=utcnow)

        return defer.gatherResults(
            [check_events_in_bucket(log, self.dispatcher, self.store, bucket, utcnow, batchsize) for bucket in buckets]
        )
Esempio n. 6
0
    def _check_events(self, batchsize, buckets):
        """
        Check for events occurring now and earlier
        """
        utcnow = datetime.utcnow()
        log = self.log.bind(scheduler_run_id=generate_transaction_id(),
                            utcnow=utcnow)

        return defer.gatherResults([
            check_events_in_bucket(log, self.dispatcher, self.store, bucket,
                                   utcnow, batchsize) for bucket in buckets
        ])
Esempio n. 7
0
 def _(self, request, *args, **kwargs):
     transaction_id = generate_transaction_id()
     request.setHeader('X-Response_Id', transaction_id)
     bound_log = log.bind(system=reflect.fullyQualifiedName(f),
                          transaction_id=transaction_id)
     bound_log.bind(method=request.method,
                    uri=request.uri,
                    clientproto=request.clientproto,
                    referer=request.getHeader('referer'),
                    useragent=request.getHeader('user-agent')).msg(
                        'Recieved request')
     return f(self, request, bound_log, *args, **kwargs)
Esempio n. 8
0
 def _(request, *args, **kwargs):
     transaction_id = generate_transaction_id()
     request.setHeader('X-Response-Id', transaction_id)
     bound_log = log.bind(system=reflect.fullyQualifiedName(f),
                          transaction_id=transaction_id)
     bound_log.bind(method=request.method,
                    uri=request.uri,
                    clientproto=request.clientproto,
                    referer=request.getHeader("referer"),
                    useragent=request.getHeader("user-agent")).msg(
                        "Received request")
     return bind_log(f)(request, bound_log, *args, **kwargs)
Esempio n. 9
0
 def _(self, request, *args, **kwargs):
     transaction_id = generate_transaction_id()
     request.setHeader('X-Response-Id', transaction_id)
     self.log = self.log.bind(system=reflect.fullyQualifiedName(f),
                              transaction_id=transaction_id)
     self.log.msg("Received request",
                  method=request.method,
                  uri=request.uri,
                  clientproto=request.clientproto,
                  referer=request.getHeader("referer"),
                  useragent=request.getHeader("user-agent"),
                  request_status="received")
     return f(self, request, *args, **kwargs)
Esempio n. 10
0
 def _(self, request, *args, **kwargs):
     transaction_id = generate_transaction_id()
     request.setHeader('X-Response-Id', transaction_id)
     bound_log = log.bind(
         system=reflect.fullyQualifiedName(f),
         transaction_id=transaction_id)
     bound_log.bind(
         method=request.method,
         uri=request.uri,
         clientproto=request.clientproto,
         referer=request.getHeader("referer"),
         useragent=request.getHeader("user-agent")
     ).msg("Received request")
     return bind_log(f)(self, request, bound_log, *args, **kwargs)
Esempio n. 11
0
    def execute_event(self, log, event):
        """
        Execute a single event

        :param log: A bound log for logging
        :return: a deferred with the results of execution
        """
        tenant_id, group_id, policy_id = event['tenantId'], event['groupId'], event['policyId']
        log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id)
        log.msg('Executing policy')
        group = get_store().get_scaling_group(log, tenant_id, group_id)
        d = group.modify_state(partial(maybe_execute_scaling_policy,
                                       log, generate_transaction_id(),
                                       policy_id=policy_id))
        d.addErrback(ignore_and_log, CannotExecutePolicyError, log, 'Cannot execute policy')
        return d
Esempio n. 12
0
def execute_event(dispatcher, store, log, event, deleted_policy_ids):
    """
    Execute a single event

    :param dispatcher: Effect dispatcher
    :param store: `IScalingGroupCollection` provider
    :param log: A bound log for logging
    :param event: event dict to execute
    :param deleted_policy_ids: Set of policy ids that are deleted. Policy id
        will be added to this if its scaling group or policy has been deleted
    :return: a deferred with None. Any error occurred during execution is
        logged
    """
    tenant_id = event['tenantId']
    group_id = event['groupId']
    policy_id = event['policyId']
    log = log.bind(tenant_id=tenant_id,
                   scaling_group_id=group_id,
                   policy_id=policy_id,
                   scheduled_time=event["trigger"].isoformat() + "Z")
    log.msg('sch-exec-pol', cloud_feed=True)
    group = store.get_scaling_group(log, tenant_id, group_id)
    d = modify_and_trigger(dispatcher,
                           group,
                           bound_log_kwargs(log),
                           partial(maybe_execute_scaling_policy,
                                   log,
                                   generate_transaction_id(),
                                   policy_id=policy_id,
                                   version=event['version']),
                           modify_state_reason='scheduler.execute_event')
    d.addErrback(ignore_and_log,
                 CannotExecutePolicyError,
                 log,
                 "sch-cannot-exec",
                 cloud_feed=True)

    def collect_deleted_policy(failure):
        failure.trap(NoSuchScalingGroupError, NoSuchPolicyError)
        deleted_policy_ids.add(policy_id)

    d.addErrback(collect_deleted_policy)
    d.addErrback(log.err, "sch-exec-pol-err", cloud_feed=True)
    return d
Esempio n. 13
0
def execute_event(dispatcher, store, log, event, deleted_policy_ids):
    """
    Execute a single event

    :param dispatcher: Effect dispatcher
    :param store: `IScalingGroupCollection` provider
    :param log: A bound log for logging
    :param event: event dict to execute
    :param deleted_policy_ids: Set of policy ids that are deleted. Policy id
        will be added to this if its scaling group or policy has been deleted
    :return: a deferred with None. Any error occurred during execution is
        logged
    """
    tenant_id = event["tenantId"]
    group_id = event["groupId"]
    policy_id = event["policyId"]
    log = log.bind(
        tenant_id=tenant_id,
        scaling_group_id=group_id,
        policy_id=policy_id,
        scheduled_time=event["trigger"].isoformat() + "Z",
    )
    log.msg("sch-exec-pol", cloud_feed=True)
    group = store.get_scaling_group(log, tenant_id, group_id)
    d = modify_and_trigger(
        dispatcher,
        group,
        bound_log_kwargs(log),
        partial(
            maybe_execute_scaling_policy, log, generate_transaction_id(), policy_id=policy_id, version=event["version"]
        ),
        modify_state_reason="scheduler.execute_event",
    )
    d.addErrback(ignore_and_log, CannotExecutePolicyError, log, "sch-cannot-exec", cloud_feed=True)

    def collect_deleted_policy(failure):
        failure.trap(NoSuchScalingGroupError, NoSuchPolicyError)
        deleted_policy_ids.add(policy_id)

    d.addErrback(collect_deleted_policy)
    d.addErrback(log.err, "sch-exec-pol-err", cloud_feed=True)
    return d