示例#1
0
 def test_nested_boundfields(self):
     """
     BoundFields effects can be nested and the log effects internally
     will expand with all bound fields
     """
     eff = Effect(Constant("foo")).on(lambda _: msg("foo", m="d")).on(lambda _: Effect(Constant("goo")))
     e = Effect(Constant("abc")).on(lambda _: with_log(eff, i="a")).on(lambda _: Effect(Constant("def")))
     self.assertEqual(sync_perform(self.disp, with_log(e, o="f")), "def")
     self.log.msg.assert_called_once_with("foo", i="a", f1="v", m="d", o="f")
示例#2
0
 def test_nested_boundfields(self):
     """
     BoundFields effects can be nested and the log effects internally
     will expand with all bound fields
     """
     eff = Effect(Constant("foo")).on(lambda _: msg("foo", m='d')).on(
         lambda _: Effect(Constant("goo")))
     e = Effect(Constant("abc")).on(lambda _: with_log(eff, i='a')).on(
         lambda _: Effect(Constant("def")))
     self.assertEqual(sync_perform(self.disp, with_log(e, o='f')), "def")
     self.log.msg.assert_called_once_with('foo',
                                          i='a',
                                          f1='v',
                                          m='d',
                                          o='f')
示例#3
0
 def test_log_none_effectful_fields(self):
     """
     When log is not passed, but there are log fields from BoundFields,
     the log passed to treq has those fields.
     """
     log = mock_log()
     # we have to include system='otter' in the expected log here because
     # the code falls back to otter.log.log, which has the system key bound.
     expected_log = matches(IsBoundWith(bound='stuff', system='otter'))
     req = ('GET', 'http://google.com/', None, None, None, {
         'log': expected_log
     })
     response = StubResponse(200, {})
     treq = StubTreq(reqs=[(req, response)],
                     contents=[(response, "content")])
     req = Request(method="get", url="http://google.com/")
     req.treq = treq
     req_eff = Effect(req)
     bound_log_eff = with_log(req_eff, bound='stuff')
     dispatcher = ComposedDispatcher(
         [get_simple_dispatcher(None),
          get_log_dispatcher(log, {})])
     self.assertEqual(
         self.successResultOf(perform(dispatcher, bound_log_eff)),
         (response, "content"))
示例#4
0
 def _with_conv_runid(self, eff):
     """
     Return Effect wrapped with converger_run_id log field
     """
     return Effect(Func(uuid.uuid4)).on(str).on(
         lambda uid: with_log(eff, otter_service='converger',
                              converger_run_id=uid))
示例#5
0
def conv_pause_group_eff(group, transaction_id):
    """
    Pause scaling group of convergence enabled tenant
    """
    eff = parallel([Effect(ModifyGroupStatePaused(group, True)),
                    delete_divergent_flag(group.tenant_id, group.uuid, -1)])
    return with_log(eff, transaction_id=transaction_id,
                    tenant_id=group.tenant_id,
                    scaling_group_id=group.uuid).on(lambda _: None)
示例#6
0
def conv_resume_group_eff(trans_id, group):
    """
    Resume scaling group of convergence enabled tenant
    """
    eff = parallel([
        Effect(ModifyGroupStatePaused(group, False)),
        mark_divergent(group.tenant_id, group.uuid).on(
            lambda _: msg("mark-dirty-success"))])
    return with_log(eff, transaction_id=trans_id, tenant_id=group.tenant_id,
                    scaling_group_id=group.uuid).on(lambda _: None)
示例#7
0
 def test_boundfields(self):
     """
     When an effect is wrapped `BoundFields` then any logging effect
     inside is performed with fields setup in `BoundFields`
     """
     f = object()
     eff = Effect(Constant("foo")).on(lambda _: err(f, "yo", a='b')).on(
         lambda _: msg("foo", m='d')).on(lambda _: Effect(Constant("goo")))
     eff = with_log(eff, bf='new')
     self.assertEqual(sync_perform(self.disp, eff), "goo")
     self.log.msg.assert_called_once_with("foo", f1='v', bf='new', m='d')
     self.log.err.assert_called_once_with(f, "yo", f1='v', bf='new', a='b')
示例#8
0
def conv_pause_group_eff(group, transaction_id):
    """
    Pause scaling group of convergence enabled tenant
    """
    eff = parallel([
        Effect(ModifyGroupStatePaused(group, True)),
        delete_divergent_flag(group.tenant_id, group.uuid, -1)
    ])
    return with_log(eff,
                    transaction_id=transaction_id,
                    tenant_id=group.tenant_id,
                    scaling_group_id=group.uuid).on(lambda _: None)
示例#9
0
def conv_resume_group_eff(trans_id, group):
    """
    Resume scaling group of convergence enabled tenant
    """
    eff = parallel([
        Effect(ModifyGroupStatePaused(group, False)),
        mark_divergent(group.tenant_id,
                       group.uuid).on(lambda _: msg("mark-dirty-success"))
    ])
    return with_log(eff,
                    transaction_id=trans_id,
                    tenant_id=group.tenant_id,
                    scaling_group_id=group.uuid).on(lambda _: None)
示例#10
0
def remove_server_from_group(dispatcher,
                             log,
                             trans_id,
                             server_id,
                             replace,
                             purge,
                             group,
                             state,
                             config_value=config_value):
    """
    Remove a specific server from the group, optionally replacing it
    with a new one, and optionally deleting the old one from Nova.

    If the old server is not deleted from Nova, otter-specific metadata
    is removed: otherwise, a different part of otter may later mistake
    the server as one that *should* still be in the group.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: deferred :class:`~otter.models.interface.GroupState`
    """
    # worker case
    if not tenant_is_enabled(group.tenant_id, config_value):
        return worker_remove_server_from_group(log, trans_id, server_id,
                                               replace, purge, group, state)

    # convergence case - requires that the convergence dispatcher handles
    # EvictServerFromScalingGroup
    eff = convergence_remove_server_from_group(log, trans_id, server_id,
                                               replace, purge, group, state)

    def kick_off_convergence(new_state):
        ceff = trigger_convergence(group.tenant_id, group.uuid)
        return ceff.on(lambda _: new_state)

    return perform(
        dispatcher,
        with_log(eff.on(kick_off_convergence),
                 tenant_id=group.tenant_id,
                 scaling_group_id=group.uuid,
                 server_id=server_id,
                 transaction_id=trans_id))
示例#11
0
 def test_boundfields(self):
     """
     When an effect is wrapped `BoundFields` then any logging effect
     inside is performed with fields setup in `BoundFields`
     """
     f = object()
     eff = Effect(Constant("foo")).on(
             lambda _: err(f, "yo", a='b')).on(
                 lambda _: msg("foo", m='d')).on(
                     lambda _: Effect(Constant("goo")))
     eff = with_log(eff, bf='new')
     self.assertEqual(sync_perform(self.disp, eff), "goo")
     self.log.msg.assert_called_once_with("foo", f1='v', bf='new', m='d')
     self.log.err.assert_called_once_with(f, "yo", f1='v', bf='new', a='b')
示例#12
0
def check_and_trigger(tenant_id, group_id):
    """
    Trigger convergence on given group if it is ACTIVE and not paused
    """
    try:
        group, info = yield Effect(
            GetScalingGroupInfo(tenant_id=tenant_id, group_id=group_id))
    except NoSuchScalingGroupError:
        # Nothing to do if group has been deleted
        yield msg("selfheal-group-deleted",
                  tenant_id=tenant_id, scaling_group_id=group_id)
    else:
        state = info["state"]
        if state.status == ScalingGroupStatus.ACTIVE and (not state.paused):
            yield with_log(
                trigger_convergence(tenant_id, group_id),
                tenant_id=tenant_id, scaling_group_id=group_id)
示例#13
0
def trigger_convergence_deletion(dispatcher, group, trans_id):
    """
    Trigger deletion of group that belongs to convergence tenant

    :param log: Bound logger
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    """
    # Update group status and trigger convergence
    # DELETING status will take precedence over other status
    d = group.update_status(ScalingGroupStatus.DELETING)
    eff = with_log(trigger_convergence(group.tenant_id, group.uuid),
                   tenant_id=group.tenant_id,
                   scaling_group_id=group.uuid,
                   transaction_id=trans_id)
    d.addCallback(lambda _: perform(dispatcher, eff))
    return d
示例#14
0
def remove_server_from_group(
    dispatcher, log, trans_id, server_id, replace, purge, group, state, config_value=config_value
):
    """
    Remove a specific server from the group, optionally replacing it
    with a new one, and optionally deleting the old one from Nova.

    If the old server is not deleted from Nova, otter-specific metadata
    is removed: otherwise, a different part of otter may later mistake
    the server as one that *should* still be in the group.

    :param log: A bound logger
    :param bytes trans_id: The transaction id for this operation.
    :param bytes server_id: The id of the server to be removed.
    :param bool replace: Should the server be replaced?
    :param bool purge: Should the server be deleted from Nova?
    :param group: The scaling group to remove a server from.
    :type group: :class:`~otter.models.interface.IScalingGroup`
    :param state: The current state of the group.
    :type state: :class:`~otter.models.interface.GroupState`

    :return: The updated state.
    :rtype: deferred :class:`~otter.models.interface.GroupState`
    """
    # worker case
    if not tenant_is_enabled(group.tenant_id, config_value):
        return worker_remove_server_from_group(log, trans_id, server_id, replace, purge, group, state)

    # convergence case - requires that the convergence dispatcher handles
    # EvictServerFromScalingGroup
    eff = convergence_remove_server_from_group(log, trans_id, server_id, replace, purge, group, state)

    def kick_off_convergence(new_state):
        ceff = trigger_convergence(group.tenant_id, group.uuid)
        return ceff.on(lambda _: new_state)

    return perform(
        dispatcher,
        with_log(
            eff.on(kick_off_convergence),
            tenant_id=group.tenant_id,
            scaling_group_id=group.uuid,
            server_id=server_id,
            transaction_id=trans_id,
        ),
    )
示例#15
0
def trigger_convergence_deletion(dispatcher, group, trans_id):
    """
    Trigger deletion of group that belongs to convergence tenant

    :param log: Bound logger
    :param otter.models.interface.IScalingGroup scaling_group: the scaling
        group object
    """
    # Update group status and trigger convergence
    # DELETING status will take precedence over other status
    d = group.update_status(ScalingGroupStatus.DELETING)
    eff = with_log(trigger_convergence(group.tenant_id, group.uuid),
                   tenant_id=group.tenant_id,
                   scaling_group_id=group.uuid,
                   transaction_id=trans_id)
    d.addCallback(lambda _: perform(dispatcher, eff))
    return d
示例#16
0
def check_and_trigger(tenant_id, group_id):
    """
    Trigger convergence on given group if it is ACTIVE and not paused
    """
    try:
        group, info = yield Effect(
            GetScalingGroupInfo(tenant_id=tenant_id, group_id=group_id))
    except NoSuchScalingGroupError:
        # Nothing to do if group has been deleted
        yield msg("selfheal-group-deleted",
                  tenant_id=tenant_id,
                  scaling_group_id=group_id)
    else:
        state = info["state"]
        if (state.status == ScalingGroupStatus.ACTIVE
                and not (state.paused or state.suspended)):
            yield with_log(trigger_convergence(tenant_id, group_id),
                           tenant_id=tenant_id,
                           scaling_group_id=group_id)
示例#17
0
 def test_log_effectful_fields(self):
     """
     The log passed to treq is bound with the fields from BoundFields.
     """
     log = mock_log().bind(duplicate='should be overridden')
     expected_log = matches(IsBoundWith(duplicate='effectful',
                                        bound='stuff'))
     req = ('GET', 'http://google.com/', None, None, None,
            {'log': expected_log})
     response = StubResponse(200, {})
     treq = StubTreq(reqs=[(req, response)],
                     contents=[(response, "content")])
     req = Request(method="get", url="http://google.com/", log=log)
     req.treq = treq
     req_eff = Effect(req)
     bound_log_eff = with_log(req_eff, bound='stuff', duplicate='effectful')
     dispatcher = ComposedDispatcher([
         get_simple_dispatcher(None),
         get_log_dispatcher(log, {})])
     self.assertEqual(
         self.successResultOf(perform(dispatcher, bound_log_eff)),
         (response, "content"))
示例#18
0
 def test_log_effectful_fields(self):
     """
     The log passed to treq is bound with the fields from BoundFields.
     """
     log = mock_log().bind(duplicate='should be overridden')
     expected_log = matches(
         IsBoundWith(duplicate='effectful', bound='stuff'))
     req = ('GET', 'http://google.com/', None, None, None, {
         'log': expected_log
     })
     response = StubResponse(200, {})
     treq = StubTreq(reqs=[(req, response)],
                     contents=[(response, "content")])
     req = Request(method="get", url="http://google.com/", log=log)
     req.treq = treq
     req_eff = Effect(req)
     bound_log_eff = with_log(req_eff, bound='stuff', duplicate='effectful')
     dispatcher = ComposedDispatcher(
         [get_simple_dispatcher(None),
          get_log_dispatcher(log, {})])
     self.assertEqual(
         self.successResultOf(perform(dispatcher, bound_log_eff)),
         (response, "content"))
示例#19
0
 def test_log_none_effectful_fields(self):
     """
     When log is not passed, but there are log fields from BoundFields,
     the log passed to treq has those fields.
     """
     log = mock_log()
     # we have to include system='otter' in the expected log here because
     # the code falls back to otter.log.log, which has the system key bound.
     expected_log = matches(IsBoundWith(bound='stuff', system='otter'))
     req = ('GET', 'http://google.com/', None, None, None,
            {'log': expected_log})
     response = StubResponse(200, {})
     treq = StubTreq(reqs=[(req, response)],
                     contents=[(response, "content")])
     req = Request(method="get", url="http://google.com/")
     req.treq = treq
     req_eff = Effect(req)
     bound_log_eff = with_log(req_eff, bound='stuff')
     dispatcher = ComposedDispatcher([
         get_simple_dispatcher(None),
         get_log_dispatcher(log, {})])
     self.assertEqual(
         self.successResultOf(perform(dispatcher, bound_log_eff)),
         (response, "content"))
示例#20
0
def converge_all_groups(currently_converging,
                        recently_converged,
                        waiting,
                        my_buckets,
                        all_buckets,
                        divergent_flags,
                        build_timeout,
                        interval,
                        limited_retry_iterations,
                        step_limits,
                        converge_one_group=converge_one_group):
    """
    Check for groups that need convergence and which match up to the
    buckets we've been allocated.

    :param Reference currently_converging: pset of currently converging groups
    :param Reference recently_converged: pmap of group ID to time last
        convergence finished
    :param Reference waiting: pmap of group ID to number of iterations already
        waited
    :param my_buckets: The buckets that should be checked for group IDs to
        converge on.
    :param all_buckets: The set of all buckets that can be checked for group
        IDs to converge on.  ``my_buckets`` should be a subset of this.
    :param divergent_flags: divergent flags that were found in zookeeper.
    :param number build_timeout: number of seconds to wait for servers to be in
        building before it's is timed out and deleted
    :param number interval: number of seconds between attempts at convergence.
        Groups will not be converged if less than this amount of time has
        passed since the end of its last convergence.
    :param int limited_retry_iterations: number of iterations to wait for
        LIMITED_RETRY steps
    :param dict step_limits: Mapping of step class to number of executions
        allowed in a convergence cycle
    :param callable converge_one_group: function to use to converge a single
        group - to be used for test injection only
    """
    group_infos = get_my_divergent_groups(my_buckets, all_buckets,
                                          divergent_flags)
    # filter out currently converging groups
    cc = yield currently_converging.read()
    group_infos = [info for info in group_infos if info['group_id'] not in cc]
    if not group_infos:
        return
    yield msg('converge-all-groups',
              group_infos=group_infos,
              currently_converging=list(cc))

    @do
    def converge(tenant_id, group_id, dirty_flag):
        stat = yield Effect(GetStat(dirty_flag))
        # If the node disappeared, ignore it. `stat` will be None here if the
        # divergent flag was discovered only after the group is removed from
        # currently_converging, but before the divergent flag is deleted, and
        # then the deletion happens, and then our GetStat happens. This
        # basically means it happens when one convergence is starting as
        # another one for the same group is ending.
        if stat is None:
            yield msg('converge-divergent-flag-disappeared', znode=dirty_flag)
        else:
            eff = converge_one_group(currently_converging, recently_converged,
                                     waiting, tenant_id, group_id,
                                     stat.version, build_timeout,
                                     limited_retry_iterations, step_limits)
            result = yield Effect(TenantScope(eff, tenant_id))
            yield do_return(result)

    recent_groups = yield get_recently_converged_groups(
        recently_converged, interval)
    effs = []
    for info in group_infos:
        tenant_id, group_id = info['tenant_id'], info['group_id']
        if group_id in recent_groups:
            # Don't converge a group if it has recently been converged.
            continue
        eff = converge(tenant_id, group_id, info['dirty-flag'])
        effs.append(
            with_log(eff, tenant_id=tenant_id, scaling_group_id=group_id))

    yield do_return(parallel(effs))
示例#21
0
 def test_get_fields(self):
     """GetFields results in the fields bound in the effectful context."""
     eff = with_log(get_fields(), ab=12, cd='foo')
     fields = sync_perform(self.disp, eff)
     self.assertEqual(fields, {'f1': 'v', 'ab': 12, 'cd': 'foo'})
示例#22
0
 def test_get_fields(self):
     """GetFields results in the fields bound in the effectful context."""
     eff = with_log(get_fields(), ab=12, cd='foo')
     fields = sync_perform(self.disp, eff)
     self.assertEqual(fields, {'f1': 'v', 'ab': 12, 'cd': 'foo'})
示例#23
0
def converge_all_groups(
        currently_converging, recently_converged, waiting,
        my_buckets, all_buckets,
        divergent_flags, build_timeout, interval,
        limited_retry_iterations, step_limits,
        converge_one_group=converge_one_group):
    """
    Check for groups that need convergence and which match up to the
    buckets we've been allocated.

    :param Reference currently_converging: pset of currently converging groups
    :param Reference recently_converged: pmap of group ID to time last
        convergence finished
    :param Reference waiting: pmap of group ID to number of iterations already
        waited
    :param my_buckets: The buckets that should be checked for group IDs to
        converge on.
    :param all_buckets: The set of all buckets that can be checked for group
        IDs to converge on.  ``my_buckets`` should be a subset of this.
    :param divergent_flags: divergent flags that were found in zookeeper.
    :param number build_timeout: number of seconds to wait for servers to be in
        building before it's is timed out and deleted
    :param number interval: number of seconds between attempts at convergence.
        Groups will not be converged if less than this amount of time has
        passed since the end of its last convergence.
    :param int limited_retry_iterations: number of iterations to wait for
        LIMITED_RETRY steps
    :param dict step_limits: Mapping of step class to number of executions
        allowed in a convergence cycle
    :param callable converge_one_group: function to use to converge a single
        group - to be used for test injection only
    """
    group_infos = get_my_divergent_groups(
        my_buckets, all_buckets, divergent_flags)
    # filter out currently converging groups
    cc = yield currently_converging.read()
    group_infos = [info for info in group_infos if info['group_id'] not in cc]
    if not group_infos:
        return
    yield msg('converge-all-groups', group_infos=group_infos,
              currently_converging=list(cc))

    @do
    def converge(tenant_id, group_id, dirty_flag):
        stat = yield Effect(GetStat(dirty_flag))
        # If the node disappeared, ignore it. `stat` will be None here if the
        # divergent flag was discovered only after the group is removed from
        # currently_converging, but before the divergent flag is deleted, and
        # then the deletion happens, and then our GetStat happens. This
        # basically means it happens when one convergence is starting as
        # another one for the same group is ending.
        if stat is None:
            yield msg('converge-divergent-flag-disappeared', znode=dirty_flag)
        else:
            eff = converge_one_group(currently_converging, recently_converged,
                                     waiting,
                                     tenant_id, group_id,
                                     stat.version, build_timeout,
                                     limited_retry_iterations, step_limits)
            result = yield Effect(TenantScope(eff, tenant_id))
            yield do_return(result)

    recent_groups = yield get_recently_converged_groups(recently_converged,
                                                        interval)
    effs = []
    for info in group_infos:
        tenant_id, group_id = info['tenant_id'], info['group_id']
        if group_id in recent_groups:
            # Don't converge a group if it has recently been converged.
            continue
        eff = converge(tenant_id, group_id, info['dirty-flag'])
        effs.append(
            with_log(eff, tenant_id=tenant_id, scaling_group_id=group_id))

    yield do_return(parallel(effs))
示例#24
0
 def test_get_fields(self):
     """GetFields results in the fields bound in the effectful context."""
     eff = with_log(get_fields(), ab=12, cd="foo")
     fields = sync_perform(self.disp, eff)
     self.assertEqual(fields, {"f1": "v", "ab": 12, "cd": "foo"})