Example #1
0
def node_feed_req(lb_id, node_id, response):
    """
    Return (intent, performer) sequence for getting clb node's feed that
    wrapped with retry intent.

    :param lb_id: Lodbalancer ID
    :param node_id: LB node ID
    :param response: The response returned when getting CLB node feed. It is
        either string containing feed or Exception object that will be raised
        when getting the feed

    :return: (intent, performer) tuple
    """
    if isinstance(response, Exception):
        def handler(i): raise response
    else:
        def handler(i): return response
    return (
        Retry(
            effect=mock.ANY,
            should_retry=ShouldDelayAndRetry(
                can_retry=retry_times(5),
                next_interval=exponential_backoff_interval(2))
        ),
        nested_sequence([(("gcnf", lb_id, node_id), handler)])
    )
Example #2
0
def lb_req(url, json_response, response):
    """
    Return a SequenceDispatcher two-tuple that matches a service request to a
    particular load balancer endpoint (using GET), and returns the given
    ``response`` as the content in an HTTP 200 ``StubResponse``.
    """
    if isinstance(response, Exception):
        def handler(i): raise response
        log_seq = []
    else:
        def handler(i): return (StubResponse(200, {}), response)
        log_seq = [(Log(mock.ANY, mock.ANY), lambda i: None)]
    return (
        Retry(
            effect=mock.ANY,
            should_retry=ShouldDelayAndRetry(
                can_retry=retry_times(5),
                next_interval=exponential_backoff_interval(2))
        ),
        nested_sequence([
            (service_request(
                ServiceType.CLOUD_LOAD_BALANCERS,
                'GET', url, json_response=json_response).intent,
             handler)
        ] + log_seq)
    )
Example #3
0
def node_feed_req(lb_id, node_id, response):
    """
    Return (intent, performer) sequence for getting clb node's feed that
    wrapped with retry intent.

    :param lb_id: Lodbalancer ID
    :param node_id: LB node ID
    :param response: The response returned when getting CLB node feed. It is
        either string containing feed or Exception object that will be raised
        when getting the feed

    :return: (intent, performer) tuple
    """
    if isinstance(response, Exception):

        def handler(i):
            raise response
    else:

        def handler(i):
            return response

    return (Retry(effect=mock.ANY,
                  should_retry=ShouldDelayAndRetry(
                      can_retry=retry_times(5),
                      next_interval=exponential_backoff_interval(2))),
            nested_sequence([(("gcnf", lb_id, node_id), handler)]))
Example #4
0
def lb_req(url, json_response, response):
    """
    Return a SequenceDispatcher two-tuple that matches a service request to a
    particular load balancer endpoint (using GET), and returns the given
    ``response`` as the content in an HTTP 200 ``StubResponse``.
    """
    if isinstance(response, Exception):
        def handler(i): raise response
        log_seq = []
    else:
        def handler(i): return (StubResponse(200, {}), response)
        log_seq = [(Log(mock.ANY, mock.ANY), lambda i: None)]
    return (
        Retry(
            effect=mock.ANY,
            should_retry=ShouldDelayAndRetry(
                can_retry=retry_times(5),
                next_interval=exponential_backoff_interval(2))
        ),
        nested_sequence([
            (service_request(
                ServiceType.CLOUD_LOAD_BALANCERS,
                'GET', url, json_response=json_response).intent,
             handler)
        ] + log_seq)
    )
Example #5
0
    def _perform_add_event(self, response_sequence):
        """
        Given a sequence of functions that take an intent and returns a
        response (or raises an exception), perform :func:`add_event` and
        return the result.
        """
        log = object()
        eff = add_event(self.event, 'tid', 'ord', log)
        uid = '00000000-0000-0000-0000-000000000000'

        svrq = service_request(
            ServiceType.CLOUD_FEEDS,
            'POST',
            'autoscale/events',
            headers={'content-type': ['application/vnd.rackspace.atom+json']},
            data=self._get_request('INFO', uid, 'tid'),
            log=log,
            success_pred=has_code(201),
            json_response=False)

        seq = [
            (TenantScope(mock.ANY, 'tid'),
             nested_sequence([
                 retry_sequence(
                     Retry(effect=svrq,
                           should_retry=ShouldDelayAndRetry(
                               can_retry=mock.ANY,
                               next_interval=exponential_backoff_interval(2))),
                     response_sequence)
             ]))
        ]

        return perform_sequence(seq, eff)
Example #6
0
    def _perform_add_event(self, response_sequence):
        """
        Given a sequence of functions that take an intent and returns a
        response (or raises an exception), perform :func:`add_event` and
        return the result.
        """
        log = object()
        eff = add_event(self.event, 'tid', 'ord', log)
        uid = '00000000-0000-0000-0000-000000000000'

        svrq = service_request(
            ServiceType.CLOUD_FEEDS, 'POST', 'autoscale/events',
            headers={
                'content-type': ['application/vnd.rackspace.atom+json']},
            data=self._get_request('INFO', uid, 'tid'), log=log,
            success_pred=has_code(201),
            json_response=False)

        seq = [
            (TenantScope(mock.ANY, 'tid'), nested_sequence([
                retry_sequence(
                    Retry(effect=svrq, should_retry=ShouldDelayAndRetry(
                        can_retry=mock.ANY,
                        next_interval=exponential_backoff_interval(2))),
                    response_sequence
                )
            ]))
        ]

        return perform_sequence(seq, eff)
Example #7
0
def convergence_remove_server_from_group(log, transaction_id, server_id,
                                         replace, purge, group, state):
    """
    Remove a specific server from the group, optionally decrementing the
    desired capacity.

    The server may just be scheduled for deletion, or it may be evicted from
    the group by removing otter-specific metdata from the server.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: Effect of :class:`~otter.models.interface.GroupState`

    :raise: :class:`CannotDeleteServerBelowMinError` if the server cannot
        be deleted without replacement, and :class:`ServerNotFoundError` if
        there is no such server to be deleted.
    """
    effects = [_is_server_in_group(group, server_id)]
    if not replace:
        effects.append(_can_scale_down(group, server_id))

    # the (possibly) two checks can happen in parallel, but we want
    # ServerNotFoundError to take precedence over
    # CannotDeleteServerBelowMinError
    both_checks = yield parallel_all_errors(effects)
    for is_error, result in both_checks:
        if is_error:
            reraise(*result)

    # Remove the server
    if purge:
        eff = set_nova_metadata_item(server_id, *DRAINING_METADATA)
    else:
        eff = Effect(
            EvictServerFromScalingGroup(log=log,
                                        transaction_id=transaction_id,
                                        scaling_group=group,
                                        server_id=server_id))
    yield Effect(
        TenantScope(
            retry_effect(eff, retry_times(3), exponential_backoff_interval(2)),
            group.tenant_id))

    if not replace:
        yield do_return(assoc_obj(state, desired=state.desired - 1))
    else:
        yield do_return(state)
Example #8
0
 def test_exp_backoff_interval(self):
     """
     ``exponential_backoff_interval`` returns previous interval * 2 every
     time it is called
     """
     err = DummyException()
     next_interval = exponential_backoff_interval(3)
     self.assertEqual(next_interval(err), 3)
     self.assertEqual(next_interval(err), 6)
     self.assertEqual(next_interval(err), 12)
Example #9
0
 def test_exp_backoff_interval(self):
     """
     ``exponential_backoff_interval`` returns previous interval * 2 every
     time it is called
     """
     err = DummyException()
     next_interval = exponential_backoff_interval(3)
     self.assertEqual(next_interval(err), 3)
     self.assertEqual(next_interval(err), 6)
     self.assertEqual(next_interval(err), 12)
Example #10
0
def convergence_remove_server_from_group(
        log, transaction_id, server_id, replace, purge, group, state):
    """
    Remove a specific server from the group, optionally decrementing the
    desired capacity.

    The server may just be scheduled for deletion, or it may be evicted from
    the group by removing otter-specific metdata from the server.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: Effect of :class:`~otter.models.interface.GroupState`

    :raise: :class:`CannotDeleteServerBelowMinError` if the server cannot
        be deleted without replacement, and :class:`ServerNotFoundError` if
        there is no such server to be deleted.
    """
    effects = [_is_server_in_group(group, server_id)]
    if not replace:
        effects.append(_can_scale_down(group, server_id))

    # the (possibly) two checks can happen in parallel, but we want
    # ServerNotFoundError to take precedence over
    # CannotDeleteServerBelowMinError
    both_checks = yield parallel_all_errors(effects)
    for is_error, result in both_checks:
        if is_error:
            reraise(*result)

    # Remove the server
    if purge:
        eff = set_nova_metadata_item(server_id, *DRAINING_METADATA)
    else:
        eff = Effect(
            EvictServerFromScalingGroup(log=log,
                                        transaction_id=transaction_id,
                                        scaling_group=group,
                                        server_id=server_id))
    yield Effect(TenantScope(
        retry_effect(eff, retry_times(3), exponential_backoff_interval(2)),
        group.tenant_id))

    if not replace:
        yield do_return(assoc_obj(state, desired=state.desired - 1))
    else:
        yield do_return(state)
Example #11
0
def add_event(event, admin_tenant_id, region, log):
    """
    Add event to cloud feeds
    """
    event, error, timestamp, event_tenant_id, event_id = sanitize_event(event)
    req = prepare_request(request_format, event, error, timestamp, region, event_tenant_id, event_id)

    eff = retry_effect(
        publish_autoscale_event(req, log=log),
        compose_retries(lambda f: (not f.check(APIError) or f.value.code < 400 or f.value.code >= 500), retry_times(5)),
        exponential_backoff_interval(2),
    )
    return Effect(TenantScope(tenant_id=admin_tenant_id, effect=eff))
Example #12
0
    def as_effect(self):
        """Produce a :obj:`Effect` to delete a server."""

        eff = retry_effect(
            delete_and_verify(self.server_id), can_retry=retry_times(3),
            next_interval=exponential_backoff_interval(2))

        def report_success(result):
            return StepResult.RETRY, [
                ErrorReason.String(
                    'must re-gather after deletion in order to update the '
                    'active cache')]

        return eff.on(success=report_success)
Example #13
0
    def as_effect(self):
        """Produce a :obj:`Effect` to delete a server."""

        eff = retry_effect(
            delete_and_verify(self.server_id), can_retry=retry_times(3),
            next_interval=exponential_backoff_interval(2))

        def report_success(result):
            return StepResult.RETRY, [
                ErrorReason.String(
                    'must re-gather after deletion in order to update the '
                    'active cache')]

        return eff.on(success=report_success)
Example #14
0
def add_event(event, admin_tenant_id, region, log):
    """
    Add event to cloud feeds
    """
    event, error, timestamp, event_tenant_id, event_id = sanitize_event(event)
    req = prepare_request(request_format, event, error, timestamp, region,
                          event_tenant_id, event_id)

    eff = retry_effect(
        publish_autoscale_event(req, log=log),
        compose_retries(
            lambda f: (not f.check(APIError) or
                       f.value.code < 400 or
                       f.value.code >= 500),
            retry_times(5)),
        exponential_backoff_interval(2))
    return Effect(TenantScope(tenant_id=admin_tenant_id, effect=eff))
Example #15
0
def verified_delete(log,
                    server_endpoint,
                    request_bag,
                    server_id,
                    exp_start=2,
                    max_retries=10,
                    clock=None):
    """
    Attempt to delete a server from the server endpoint, and ensure that it is
    deleted by trying again until deleting/getting the server results in a 404
    or until ``OS-EXT-STS:task_state`` in server details is 'deleting',
    indicating that Nova has acknowledged that the server is to be deleted
    as soon as possible.

    Time out attempting to verify deletes after a period of time and log an
    error.

    :param log: A bound logger.
    :param str server_endpoint: Server endpoint URI.
    :param str auth_token: Keystone Auth token.
    :param str server_id: Opaque nova server id.
    :param int exp_start: Exponential backoff interval start seconds. Default 2
    :param int max_retries: Maximum number of retry attempts

    :return: Deferred that fires when the expected status has been seen.
    """
    serv_log = log.bind(server_id=server_id)
    serv_log.msg('Deleting server')

    if clock is None:  # pragma: no cover
        from twisted.internet import reactor
        clock = reactor

    d = retry(
        partial(delete_and_verify, serv_log, server_endpoint, request_bag,
                server_id, clock),
        can_retry=retry_times(max_retries),
        next_interval=exponential_backoff_interval(exp_start),
        clock=clock)

    d.addCallback(log_with_time, clock, serv_log, clock.seconds(),
                  ('Server deleted successfully (or acknowledged by Nova as '
                   'to-be-deleted) : {time_delete} seconds.'), 'time_delete')
    return d
Example #16
0
def _is_server_in_group(group, server_id):
    """
    Given a group and server ID, determines if the server is a member of
    the group.  If it isn't, it raises a :class:`ServerNotFoundError`.
    """
    try:
        response, server_info = yield Effect(
            TenantScope(
                retry_effect(get_server_details(server_id), retry_times(3),
                             exponential_backoff_interval(2)),
                group.tenant_id))
    except NoSuchServerError:
        raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)

    group_id = group_id_from_metadata(
        get_in(('server', 'metadata'), server_info, {}))

    if group_id != group.uuid:
        raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)
Example #17
0
def verified_delete(log,
                    server_endpoint,
                    request_bag,
                    server_id,
                    exp_start=2,
                    max_retries=10,
                    clock=None):
    """
    Attempt to delete a server from the server endpoint, and ensure that it is
    deleted by trying again until deleting/getting the server results in a 404
    or until ``OS-EXT-STS:task_state`` in server details is 'deleting',
    indicating that Nova has acknowledged that the server is to be deleted
    as soon as possible.

    Time out attempting to verify deletes after a period of time and log an
    error.

    :param log: A bound logger.
    :param str server_endpoint: Server endpoint URI.
    :param str auth_token: Keystone Auth token.
    :param str server_id: Opaque nova server id.
    :param int exp_start: Exponential backoff interval start seconds. Default 2
    :param int max_retries: Maximum number of retry attempts

    :return: Deferred that fires when the expected status has been seen.
    """
    serv_log = log.bind(server_id=server_id)
    serv_log.msg('Deleting server')

    if clock is None:  # pragma: no cover
        from twisted.internet import reactor
        clock = reactor

    d = retry(partial(delete_and_verify, serv_log, server_endpoint,
                      request_bag, server_id, clock),
              can_retry=retry_times(max_retries),
              next_interval=exponential_backoff_interval(exp_start),
              clock=clock)

    d.addCallback(log_with_time, clock, serv_log, clock.seconds(),
                  ('Server deleted successfully (or acknowledged by Nova as '
                   'to-be-deleted) : {time_delete} seconds.'), 'time_delete')
    return d
Example #18
0
    def test_delete_server(self, mock_dav):
        """
        :obj:`DeleteServer.as_effect` calls `delete_and_verify` with
        retries. It returns SUCCESS on completion and RETRY on failure
        """
        mock_dav.side_effect = lambda sid: Effect(sid)
        eff = DeleteServer(server_id='abc123').as_effect()
        self.assertIsInstance(eff.intent, Retry)
        self.assertEqual(
            eff.intent.should_retry,
            ShouldDelayAndRetry(can_retry=retry_times(3),
                                next_interval=exponential_backoff_interval(2)))
        self.assertEqual(eff.intent.effect.intent, 'abc123')

        self.assertEqual(
            resolve_effect(eff, (None, {})),
            (StepResult.RETRY,
             [ErrorReason.String('must re-gather after deletion in order to '
                                 'update the active cache')]))
Example #19
0
def _is_server_in_group(group, server_id):
    """
    Given a group and server ID, determines if the server is a member of
    the group.  If it isn't, it raises a :class:`ServerNotFoundError`.
    """
    try:
        response, server_info = yield Effect(
            TenantScope(
                retry_effect(get_server_details(server_id), retry_times(3), exponential_backoff_interval(2)),
                group.tenant_id,
            )
        )
    except NoSuchServerError:
        raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)

    group_id = group_id_from_metadata(get_in(("server", "metadata"), server_info, {}))

    if group_id != group.uuid:
        raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)
Example #20
0
def get_all_server_details(tenant_id, authenticator, service_name, region,
                           limit=100, clock=None, _treq=None):
    """
    Return all servers of a tenant
    TODO: service_name is possibly internal to this function but I don't want to pass config here?
    NOTE: This really screams to be a independent txcloud-type API
    """
    token, catalog = yield authenticator.authenticate_tenant(tenant_id, log=default_log)
    endpoint = public_endpoint_url(catalog, service_name, region)
    url = append_segments(endpoint, 'servers', 'detail')
    query = {'limit': limit}
    all_servers = []

    if clock is None:  # pragma: no cover
        from twisted.internet import reactor as clock

    if _treq is None:  # pragma: no cover
        _treq = treq

    def fetch(url, headers):
        d = _treq.get(url, headers=headers)
        d.addCallback(check_success, [200], _treq=_treq)
        d.addCallback(_treq.json_content)
        return d

    while True:
        # sort based on query name to make the tests predictable
        urlparams = sorted(query.items(), key=lambda e: e[0])
        d = retry(partial(fetch, '{}?{}'.format(url, urlencode(urlparams)), headers(token)),
                  can_retry=retry_times(5),
                  next_interval=exponential_backoff_interval(2), clock=clock)
        servers = (yield d)['servers']
        all_servers.extend(servers)
        if len(servers) < limit:
            break
        query.update({'marker': servers[-1]['id']})

    defer.returnValue(all_servers)
Example #21
0
def _retry(eff):
    """Retry an effect with a common policy."""
    return retry_effect(
        eff, retry_times(5), exponential_backoff_interval(2))
Example #22
0
def _retry(eff):
    """Retry an effect with a common policy."""
    return retry_effect(eff, retry_times(5), exponential_backoff_interval(2))