Example #1
0
def modify_and_trigger(dispatcher, group, logargs, modifier, *args, **kwargs):
    """
    Modify group state and trigger convergence after that if the group is not
    suspended. Otherwise fail with :obj:`TenantSuspendedError`.

    :param IScalingGroup group: Scaling group whose state is getting modified
    :param log: Bound logger
    :param modifier: Callable as described in IScalingGroup.modify_state

    :return: Deferred with None if modification and convergence succeeded.
        Fails with :obj:`TenantSuspendedError` if group is suspended.
    """
    def modifier_wrapper(_group, state, *_args, **_kwargs):
        # Ideally this will not be allowed by repose middleware but
        # adding check for mimic based integration tests
        if state.suspended:
            raise TenantSuspendedError(_group.tenant_id)
        return modifier(_group, state, *_args, **_kwargs)

    cannot_exec_pol_err = None
    try:
        yield group.modify_state(modifier_wrapper, *args, **kwargs)
    except CannotExecutePolicyError as ce:
        cannot_exec_pol_err = ce
    if tenant_is_enabled(group.tenant_id, config_value):
        eff = Effect(
            BoundFields(
                trigger_convergence(group.tenant_id, group.uuid), logargs))
        yield perform(dispatcher, eff)
    if cannot_exec_pol_err is not None:
        raise cannot_exec_pol_err
Example #2
0
    def converge_scaling_group(self, request):
        """
        Trigger convergence on given scaling group
        """

        class ConvergeErrorGroup(Exception):
            pass

        def can_converge(group, state):
            if state.paused:
                raise GroupPausedError(group.tenant_id, group.uuid, "converge")
            conv_on_error = extract_bool_arg(request, 'on_error', True)
            if not conv_on_error and state.status == ScalingGroupStatus.ERROR:
                raise ConvergeErrorGroup()
            return state

        def converge_error_group_header(f):
            f.trap(ConvergeErrorGroup)
            request.setHeader("x-not-converging", "true")

        if tenant_is_enabled(self.tenant_id, config_value):
            group = self.store.get_scaling_group(
                self.log, self.tenant_id, self.group_id)
            return controller.modify_and_trigger(
                self.dispatcher,
                group,
                bound_log_kwargs(self.log),
                can_converge).addErrback(converge_error_group_header)
        else:
            request.setResponseCode(404)
Example #3
0
def modify_and_trigger(dispatcher, group, logargs, modifier, *args, **kwargs):
    """
    Modify group state and trigger convergence after that if the group is not
    suspended. Otherwise fail with :obj:`TenantSuspendedError`.

    :param IScalingGroup group: Scaling group whose state is getting modified
    :param log: Bound logger
    :param modifier: Callable as described in IScalingGroup.modify_state

    :return: Deferred with None if modification and convergence succeeded.
        Fails with :obj:`TenantSuspendedError` if group is suspended.
    """
    def modifier_wrapper(_group, state, *_args, **_kwargs):
        # Ideally this will not be allowed by repose middleware but
        # adding check for mimic based integration tests
        if state.suspended:
            raise TenantSuspendedError(_group.tenant_id)
        return modifier(_group, state, *_args, **_kwargs)

    cannot_exec_pol_err = None
    try:
        yield group.modify_state(modifier_wrapper, *args, **kwargs)
    except CannotExecutePolicyError as ce:
        cannot_exec_pol_err = ce
    if tenant_is_enabled(group.tenant_id, config_value):
        eff = Effect(
            BoundFields(trigger_convergence(group.tenant_id, group.uuid),
                        logargs))
        yield perform(dispatcher, eff)
    if cannot_exec_pol_err is not None:
        raise cannot_exec_pol_err
Example #4
0
def converge(log,
             transaction_id,
             config,
             scaling_group,
             state,
             launch_config,
             policy,
             config_value=config_value):
    """
    Apply a policy's change to a scaling group, and attempt to make the
    resulting state a reality. This does no cooldown checking.

    This is done by dispatching to the appropriate orchestration backend for
    the scaling group; currently only direct nova interaction is supported.

    :param log: A bound log for logging
    :param str transaction_id: the transaction id
    :param dict config: the scaling group config
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    :param otter.models.interface.GroupState state: the group state
    :param dict launch_config: the scaling group launch config
    :param dict policy: the policy configuration dictionary

    :return: a ``Deferred`` that fires with the updated
        :class:`otter.models.interface.GroupState` if successful. If no changes
        are to be made to the group, None will synchronously be returned.
    """
    if tenant_is_enabled(scaling_group.tenant_id, config_value):
        # For convergence tenants, find delta based on group's desired
        # capacity
        delta = apply_delta(log, state.desired, state, config, policy)
        if delta == 0:
            # No change in servers. Return None synchronously
            return None
        else:
            return defer.succeed(state)

    # For non-convergence tenants, the value used for desired-capacity is
    # the sum of active+pending, which is 0, so the delta ends up being
    # the min entities due to constraint calculation.
    delta = calculate_delta(log, state, config, policy)
    execute_log = log.bind(server_delta=delta)

    if delta == 0:
        execute_log.msg("no change in servers")
        return None
    elif delta > 0:
        execute_log.msg("executing launch configs")
        deferred = execute_launch_config(execute_log, transaction_id, state,
                                         launch_config, scaling_group, delta)
    else:
        # delta < 0 (scale down)
        execute_log.msg("scaling down")
        deferred = exec_scale_down(execute_log, transaction_id, state,
                                   scaling_group, -delta)

    deferred.addCallback(_do_convergence_audit_log, log, delta, state)
    return deferred
Example #5
0
def get_groups_to_converge(config_func):
    """
    Get all tenant's all groups that needs convergence triggering
    """
    eff = Effect(GetAllValidGroups())
    eff = eff.on(
        filter(lambda g: tenant_is_enabled(g["tenantId"], config_func)))
    return eff.on(list)
Example #6
0
def get_groups_to_converge(config_func):
    """
    Get all tenant's all groups that needs convergence triggering
    """
    eff = Effect(GetAllValidGroups())
    eff = eff.on(
        filter(lambda g: tenant_is_enabled(g["tenantId"], config_func)))
    return eff.on(list)
Example #7
0
 def fetch_active_caches(group_states):
     if not tenant_is_enabled(self.tenant_id, config_value):
         return group_states, [None] * len(group_states)
     d = gatherResults(
         [get_active_cache(
             self.store.reactor, self.store.connection, self.tenant_id,
             state.group_id)
          for state in group_states])
     return d.addCallback(lambda cache: (group_states, cache))
Example #8
0
 def fetch_active_caches(group_states):
     if not tenant_is_enabled(self.tenant_id, config_value):
         return group_states, [None] * len(group_states)
     d = gatherResults(
         [get_active_cache(
             self.store.reactor, self.store.connection, self.tenant_id,
             state.group_id)
          for state in group_states])
     return d.addCallback(lambda cache: (group_states, cache))
Example #9
0
def converge(log, transaction_id, config, scaling_group, state, launch_config,
             policy, config_value=config_value):
    """
    Apply a policy's change to a scaling group, and attempt to make the
    resulting state a reality. This does no cooldown checking.

    This is done by dispatching to the appropriate orchestration backend for
    the scaling group; currently only direct nova interaction is supported.

    :param log: A bound log for logging
    :param str transaction_id: the transaction id
    :param dict config: the scaling group config
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    :param otter.models.interface.GroupState state: the group state
    :param dict launch_config: the scaling group launch config
    :param dict policy: the policy configuration dictionary

    :return: a ``Deferred`` that fires with the updated
        :class:`otter.models.interface.GroupState` if successful. If no changes
        are to be made to the group, None will synchronously be returned.
    """
    if tenant_is_enabled(scaling_group.tenant_id, config_value):
        # For convergence tenants, find delta based on group's desired
        # capacity
        delta = apply_delta(log, state.desired, state, config, policy)
        if delta == 0:
            # No change in servers. Return None synchronously
            return None
        else:
            return defer.succeed(state)

    # For non-convergence tenants, the value used for desired-capacity is
    # the sum of active+pending, which is 0, so the delta ends up being
    # the min entities due to constraint calculation.
    delta = calculate_delta(log, state, config, policy)
    execute_log = log.bind(server_delta=delta)

    if delta == 0:
        execute_log.msg("no change in servers")
        return None
    elif delta > 0:
        execute_log.msg("executing launch configs")
        deferred = execute_launch_config(
            execute_log, transaction_id, state, launch_config,
            scaling_group, delta)
    else:
        # delta < 0 (scale down)
        execute_log.msg("scaling down")
        deferred = exec_scale_down(execute_log, transaction_id, state,
                                   scaling_group, -delta)

    deferred.addCallback(_do_convergence_audit_log, log, delta, state)
    return deferred
Example #10
0
def pause_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Pauses the scaling group, causing all scaling policy executions to be
    rejected until unpaused.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError("Pause is not implemented for legay groups")
    return perform(dispatcher, conv_pause_group_eff(scaling_group, transaction_id))
Example #11
0
 def with_active_cache(self, get_func, *args, **kwargs):
     """
     Return result of `get_func` and active cache from servers table
     if this is convergence enabled tenant
     """
     if tenant_is_enabled(self.tenant_id, config_value):
         cache_d = get_active_cache(
             self.store.reactor, self.store.connection, self.tenant_id,
             self.group_id)
     else:
         cache_d = succeed(None)
     return gatherResults([get_func(*args, **kwargs), cache_d],
                          consumeErrors=True)
Example #12
0
    def test_tenant_is_not_enabled(self):
        """
        :obj:`convergence.tenant_is_enabled` should return ``False`` when a
        given tenant ID has convergence behavior turned off.
        """
        enabled_tenant_id = "some-tenant"

        def get_config_value(config_key):
            self.assertEqual(config_key, "convergence-tenants")
            return [enabled_tenant_id + "-nope"]
        self.assertEqual(tenant_is_enabled(enabled_tenant_id,
                                           get_config_value),
                         False)
Example #13
0
    def test_tenant_is_not_enabled(self):
        """
        :obj:`convergence.tenant_is_enabled` should return ``False`` when a
        given tenant ID has convergence behavior turned off.
        """
        disabled_tenant_id = "some-tenant"

        def get_config_value(config_key):
            self.assertEqual(config_key, "non-convergence-tenants")
            return [disabled_tenant_id]
        self.assertEqual(tenant_is_enabled(disabled_tenant_id,
                                           get_config_value),
                         False)
Example #14
0
    def test_tenant_is_enabled(self):
        """
        :obj:`convergence.tenant_is_enabled` should return ``True`` when a
        given tenant ID has convergence behavior turned on.
        """
        disabled_tenant_id = "some-tenant"

        def get_config_value(config_key):
            self.assertEqual(config_key, "non-convergence-tenants")
            return [disabled_tenant_id]
        self.assertEqual(tenant_is_enabled("some-other-tenant",
                                           get_config_value),
                         True)
Example #15
0
 def with_active_cache(self, get_func, *args, **kwargs):
     """
     Return result of `get_func` and active cache from servers table
     if this is convergence enabled tenant
     """
     if tenant_is_enabled(self.tenant_id, config_value):
         cache_d = get_active_cache(
             self.store.reactor, self.store.connection, self.tenant_id,
             self.group_id)
     else:
         cache_d = succeed(None)
     return gatherResults([get_func(*args, **kwargs), cache_d],
                          consumeErrors=True)
Example #16
0
def resume_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Resumes the scaling group, causing all scaling policy executions to be
    evaluated as normal again.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError("Resume is not implemented for legacy groups")
    return perform(dispatcher, conv_resume_group_eff(transaction_id, scaling_group))
Example #17
0
def remove_server_from_group(dispatcher,
                             log,
                             trans_id,
                             server_id,
                             replace,
                             purge,
                             group,
                             state,
                             config_value=config_value):
    """
    Remove a specific server from the group, optionally replacing it
    with a new one, and optionally deleting the old one from Nova.

    If the old server is not deleted from Nova, otter-specific metadata
    is removed: otherwise, a different part of otter may later mistake
    the server as one that *should* still be in the group.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: deferred :class:`~otter.models.interface.GroupState`
    """
    # worker case
    if not tenant_is_enabled(group.tenant_id, config_value):
        return worker_remove_server_from_group(log, trans_id, server_id,
                                               replace, purge, group, state)

    # convergence case - requires that the convergence dispatcher handles
    # EvictServerFromScalingGroup
    eff = convergence_remove_server_from_group(log, trans_id, server_id,
                                               replace, purge, group, state)

    def kick_off_convergence(new_state):
        ceff = trigger_convergence(group.tenant_id, group.uuid)
        return ceff.on(lambda _: new_state)

    return perform(
        dispatcher,
        with_log(eff.on(kick_off_convergence),
                 tenant_id=group.tenant_id,
                 scaling_group_id=group.uuid,
                 server_id=server_id,
                 transaction_id=trans_id))
Example #18
0
def pause_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Pauses the scaling group, causing all scaling policy executions to be
    rejected until unpaused.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError("Pause is not implemented for legay groups")
    return perform(dispatcher,
                   conv_pause_group_eff(scaling_group, transaction_id))
Example #19
0
def resume_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Resumes the scaling group, causing all scaling policy executions to be
    evaluated as normal again.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError(
            'Resume is not implemented for legacy groups')
    return perform(dispatcher,
                   conv_resume_group_eff(transaction_id, scaling_group))
Example #20
0
def remove_server_from_group(
    dispatcher, log, trans_id, server_id, replace, purge, group, state, config_value=config_value
):
    """
    Remove a specific server from the group, optionally replacing it
    with a new one, and optionally deleting the old one from Nova.

    If the old server is not deleted from Nova, otter-specific metadata
    is removed: otherwise, a different part of otter may later mistake
    the server as one that *should* still be in the group.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: deferred :class:`~otter.models.interface.GroupState`
    """
    # worker case
    if not tenant_is_enabled(group.tenant_id, config_value):
        return worker_remove_server_from_group(log, trans_id, server_id, replace, purge, group, state)

    # convergence case - requires that the convergence dispatcher handles
    # EvictServerFromScalingGroup
    eff = convergence_remove_server_from_group(log, trans_id, server_id, replace, purge, group, state)

    def kick_off_convergence(new_state):
        ceff = trigger_convergence(group.tenant_id, group.uuid)
        return ceff.on(lambda _: new_state)

    return perform(
        dispatcher,
        with_log(
            eff.on(kick_off_convergence),
            tenant_id=group.tenant_id,
            scaling_group_id=group.uuid,
            server_id=server_id,
            transaction_id=trans_id,
        ),
    )
Example #21
0
    def converge_scaling_group(self, request):
        """
        Trigger convergence on given scaling group
        """
        def can_converge(group, state):
            if state.paused:
                raise GroupPausedError(group.tenant_id, group.uuid, "converge")
            return state

        if tenant_is_enabled(self.tenant_id, config_value):
            group = self.store.get_scaling_group(self.log, self.tenant_id,
                                                 self.group_id)
            return controller.modify_and_trigger(self.dispatcher, group,
                                                 bound_log_kwargs(self.log),
                                                 can_converge)
        else:
            request.setResponseCode(404)
Example #22
0
    def converge_scaling_group(self, request):
        """
        Trigger convergence on given scaling group
        """

        def is_group_paused(group, state):
            if state.paused:
                raise GroupPausedError(group.tenant_id, group.uuid, "converge")
            return state

        if tenant_is_enabled(self.tenant_id, config_value):
            group = self.store.get_scaling_group(
                self.log, self.tenant_id, self.group_id)
            return controller.modify_and_trigger(
                self.dispatcher,
                group,
                bound_log_kwargs(log),
                is_group_paused)
        else:
            request.setResponseCode(404)
Example #23
0
def modify_and_trigger(dispatcher, group, logargs, modifier, *args, **kwargs):
    """
    Modify group state and trigger convergence after that

    :param IScalingGroup group: Scaling group whose state is getting modified
    :param log: Bound logger
    :param modifier: Callable as described in IScalingGroup.modify_state

    :return: Deferred with None
    """
    cannot_exec_pol_err = None
    try:
        yield group.modify_state(modifier, *args, **kwargs)
    except CannotExecutePolicyError as ce:
        cannot_exec_pol_err = ce
    if tenant_is_enabled(group.tenant_id, config_value):
        eff = Effect(BoundFields(trigger_convergence(group.tenant_id, group.uuid), logargs))
        yield perform(dispatcher, eff)
    if cannot_exec_pol_err is not None:
        raise cannot_exec_pol_err
Example #24
0
def delete_group(dispatcher, log, trans_id, group, force):
    """
    Delete group based on the kind of tenant

    :param log: Bound logger
    :param str trans_id: Transaction ID of request doing this
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    :param bool force: Should group be deleted even if it has servers?

    :return: Deferred that fires with None
    :raise: `GroupNotEmptyError` if group is not empty and force=False
    """

    def check_and_delete(_group, state):
        if state.desired == 0:
            d = trigger_convergence_deletion(dispatcher, group, trans_id)
            return d.addCallback(lambda _: state)
        else:
            raise GroupNotEmptyError(group.tenant_id, group.uuid)

    if tenant_is_enabled(group.tenant_id, config_value):
        if force:
            # We don't care about servers in the group. So trigger deletion
            # since it will take precedence over other status
            d = trigger_convergence_deletion(dispatcher, group, trans_id)
        else:
            # Delete only if desired is 0 which must be done with a lock to
            # ensure desired is not getting modified by another thread/node
            # when executing policy
            d = group.modify_state(
                check_and_delete,
                modify_state_reason='delete_group')
    else:
        if force:
            d = empty_group(log, trans_id, group)
            d.addCallback(lambda _: group.delete_group())
        else:
            d = group.delete_group()
    return d
Example #25
0
def modify_and_trigger(dispatcher, group, logargs, modifier, *args, **kwargs):
    """
    Modify group state and trigger convergence after that

    :param IScalingGroup group: Scaling group whose state is getting modified
    :param log: Bound logger
    :param modifier: Callable as described in IScalingGroup.modify_state

    :return: Deferred with None
    """
    cannot_exec_pol_err = None
    try:
        yield group.modify_state(modifier, *args, **kwargs)
    except CannotExecutePolicyError as ce:
        cannot_exec_pol_err = ce
    if tenant_is_enabled(group.tenant_id, config_value):
        eff = Effect(
            BoundFields(trigger_convergence(group.tenant_id, group.uuid),
                        logargs))
        yield perform(dispatcher, eff)
    if cannot_exec_pol_err is not None:
        raise cannot_exec_pol_err
Example #26
0
def delete_group(dispatcher, log, trans_id, group, force):
    """
    Delete group based on the kind of tenant

    :param log: Bound logger
    :param str trans_id: Transaction ID of request doing this
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    :param bool force: Should group be deleted even if it has servers?

    :return: Deferred that fires with None
    :raise: `GroupNotEmptyError` if group is not empty and force=False
    """
    def check_and_delete(_group, state):
        if state.desired == 0:
            d = trigger_convergence_deletion(dispatcher, group, trans_id)
            return d.addCallback(lambda _: state)
        else:
            raise GroupNotEmptyError(group.tenant_id, group.uuid)

    if tenant_is_enabled(group.tenant_id, config_value):
        if force:
            # We don't care about servers in the group. So trigger deletion
            # since it will take precedence over other status
            d = trigger_convergence_deletion(dispatcher, group, trans_id)
        else:
            # Delete only if desired is 0 which must be done with a lock to
            # ensure desired is not getting modified by another thread/node
            # when executing policy
            d = group.modify_state(check_and_delete,
                                   modify_state_reason='delete_group')
    else:
        if force:
            d = empty_group(log, trans_id, group)
            d.addCallback(lambda _: group.delete_group())
        else:
            d = group.delete_group()
    return d
Example #27
0
 def test_all(self):
     """When the value is ``'none'``, True is returned for any tenant."""
     def get_config_value(config_key):
         self.assertEqual(config_key, "non-convergence-tenants")
         return 'none'
     self.assertEqual(tenant_is_enabled('foo', get_config_value), True)
Example #28
0
 def test_unconfigured(self):
     """
     When no `non-convergence-tenants` key is available in the config,
     every tenant has enabled convergence
     """
     self.assertEqual(tenant_is_enabled('foo', lambda x: None), True)
Example #29
0
 def test_unconfigured(self):
     """
     When no `convergence-tenants` key is available in the config, False is
     returned.
     """
     self.assertEqual(tenant_is_enabled('foo', lambda x: None), False)