Пример #1
0
    def test_reactivate_group_on_success_after_steps(self):
        """
        When the group started in ERROR state, and convergence succeeds, the
        group is put back into ACTIVE.
        """
        self.manifest['state'].status = ScalingGroupStatus.ERROR

        def plan(*args, **kwargs):
            return pbag([TestStep(Effect("step"))])

        sequence = [
            parallel_sequence([]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("step", lambda i: (StepResult.SUCCESS, []))]
            ]),
            (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
            (UpdateGroupStatus(scaling_group=self.group,
                               status=ScalingGroupStatus.ACTIVE),
             noop),
            (Log('group-status-active',
                 dict(cloud_feed=True, status='ACTIVE')),
             noop),
            (UpdateServersCache(
                "tenant-id", "group-id", self.now,
                [thaw(self.servers[0].json.set('_is_as_active', True)),
                 thaw(self.servers[1].json.set('_is_as_active', True))]),
             noop),
        ]
        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Пример #2
0
    def test_log_steps(self):
        """The steps to be executed are logged to cloud feeds."""
        step = CreateServer(server_config=pmap({"foo": "bar"}))
        step.as_effect = lambda: Effect("create-server")

        def plan(*args, **kwargs):
            return pbag([step])

        sequence = [
            parallel_sequence([
                [parallel_sequence([
                    [(Log('convergence-create-servers',
                          {'num_servers': 1, 'server_config': {'foo': 'bar'},
                           'cloud_feed': True}),
                      noop)]
                ])]
            ]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("create-server", lambda i: (StepResult.RETRY, []))]
            ]),
            (Log(msg='execute-convergence-results', fields=mock.ANY), noop)
        ]

        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.RETRY, ScalingGroupStatus.ACTIVE))
Пример #3
0
 def test_lb_disappeared_during_feed_fetch(self):
     """
     If a load balancer gets deleted while fetching feeds, no nodes will be
     returned for it.
     """
     node21 = node('21', 'a21', condition='DRAINING', weight=None)
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([
             [nodes_req(1, [node('11', 'a11', condition='DRAINING'),
                            node('12', 'a12')])],
             [nodes_req(2, [node21])],
             [lb_hm_req(1, {"type": "CONNECT"})],
             [lb_hm_req(2, {"type": "CONNECT"})]
         ]),
         parallel_sequence([
             [node_feed_req('1', '11', CLBNotFoundError(lb_id=u'1'))],
             [node_feed_req('2', '21', '22feed')]]),
     ]
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         ([attr.assoc(CLBNode.from_node_json(2, node21), _drained_at=2.0)],
          {'2': CLB(True)}))
Пример #4
0
 def test_lb_disappeared_during_node_fetch(self):
     """
     If a load balancer gets deleted while fetching nodes, no nodes will be
     returned for it.
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([
             [nodes_req(1, [node('11', 'a11')])],
             [lb_req('loadbalancers/2/nodes', True,
                     CLBNotFoundError(lb_id=u'2'))],
             [lb_hm_req(1, {"type": "CONNECT"})],
             [lb_req('loadbalancers/2/healthmonitor', True,
                     CLBNotFoundError(lb_id=u'2'))]
         ]),
         parallel_sequence([])  # No node feeds to fetch
     ]
     make_desc = partial(CLBDescription, port=20, weight=2,
                         condition=CLBNodeCondition.ENABLED,
                         type=CLBNodeType.PRIMARY)
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         ([CLBNode(node_id='11', address='a11',
                   description=make_desc(lb_id='1'))],
          {'1': CLB(True)}))
Пример #5
0
    def _test_deleting_group(self, step_result, with_delete, group_status):

        def _plan(dsg, *a, **kwargs):
            self.dsg = dsg
            return [TestStep(Effect("step"))]

        self.state.status = ScalingGroupStatus.DELETING
        sequence = [
            parallel_sequence([]),
            (Log('execute-convergence', mock.ANY), noop),
            parallel_sequence([
                [("step", lambda i: (step_result, []))]
            ]),
            (Log('execute-convergence-results', mock.ANY), noop),
        ]
        if with_delete:
            sequence.append((DeleteGroup(tenant_id=self.tenant_id,
                                         group_id=self.group_id), noop))
        self.assertEqual(
            # skipping cache update intents returned in get_seq()
            perform_sequence(self.get_seq(False) + sequence,
                             self._invoke(_plan)),
            (step_result, group_status))
        # desired capacity was changed to 0
        self.assertEqual(self.dsg.capacity, 0)
Пример #6
0
 def test_lb_disappeared_during_node_fetch(self):
     """
     If a load balancer gets deleted while fetching nodes, no nodes will be
     returned for it.
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{
                    'id': 1
                }, {
                    'id': 2
                }]}),
         parallel_sequence([[nodes_req(1, [node('11', 'a11')])],
                            [
                                lb_req('loadbalancers/2/nodes', True,
                                       CLBNotFoundError(lb_id=u'2'))
                            ], [lb_hm_req(1, {"type": "CONNECT"})],
                            [
                                lb_req('loadbalancers/2/healthmonitor',
                                       True, CLBNotFoundError(lb_id=u'2'))
                            ]]),
         parallel_sequence([])  # No node feeds to fetch
     ]
     make_desc = partial(CLBDescription,
                         port=20,
                         weight=2,
                         condition=CLBNodeCondition.ENABLED,
                         type=CLBNodeType.PRIMARY)
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), ([
         CLBNode(
             node_id='11', address='a11', description=make_desc(lb_id='1'))
     ], {
         '1': CLB(True)
     }))
Пример #7
0
 def test_lb_disappeared_during_feed_fetch(self):
     """
     If a load balancer gets deleted while fetching feeds, no nodes will be
     returned for it.
     """
     node21 = node('21', 'a21', condition='DRAINING', weight=None)
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{
                    'id': 1
                }, {
                    'id': 2
                }]}),
         parallel_sequence([[
             nodes_req(1, [
                 node('11', 'a11', condition='DRAINING'),
                 node('12', 'a12')
             ])
         ], [nodes_req(2, [node21])], [lb_hm_req(1, {"type": "CONNECT"})],
                            [lb_hm_req(2, {"type": "CONNECT"})]]),
         parallel_sequence(
             [[node_feed_req('1', '11', CLBNotFoundError(lb_id=u'1'))],
              [node_feed_req('2', '21', '22feed')]]),
     ]
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         ([attr.assoc(CLBNode.from_node_json(2, node21), _drained_at=2.0)
           ], {
               '2': CLB(True)
           }))
Пример #8
0
 def test_no_draining(self):
     """
     Doesnt fetch feeds if all nodes are ENABLED
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{
                    'id': 1
                }, {
                    'id': 2
                }]}),
         parallel_sequence([[nodes_req(1, [node('11', 'a11')])],
                            [nodes_req(2, [node('21', 'a21')])],
                            [lb_hm_req(1, {})], [lb_hm_req(2, {})]]),
         parallel_sequence([])  # No nodes to fetch
     ]
     make_desc = partial(CLBDescription,
                         port=20,
                         weight=2,
                         condition=CLBNodeCondition.ENABLED,
                         type=CLBNodeType.PRIMARY)
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), ([
         CLBNode(
             node_id='11', address='a11', description=make_desc(lb_id='1')),
         CLBNode(
             node_id='21', address='a21', description=make_desc(lb_id='2'))
     ], {
         '1': CLB(False),
         '2': CLB(False)
     }))
Пример #9
0
 def test_success(self):
     """
     Gets LB contents with drained_at correctly
     """
     node11 = node('11', 'a11', condition='DRAINING')
     node12 = node('12', 'a12')
     node21 = node('21', 'a21', weight=3)
     node22 = node('22', 'a22', weight=None, condition='DRAINING')
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{
                    'id': 1
                }, {
                    'id': 2
                }]}),
         parallel_sequence([[nodes_req(1, [node11, node12])],
                            [nodes_req(2, [node21, node22])],
                            [lb_hm_req(1, {"type": "CONNECT"})],
                            [lb_hm_req(2, {})]]),
         parallel_sequence([[node_feed_req('1', '11', '11feed')],
                            [node_feed_req('2', '22', '22feed')]]),
     ]
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), ([
         attr.assoc(CLBNode.from_node_json(1, node11), _drained_at=1.0),
         CLBNode.from_node_json(1, node12),
         CLBNode.from_node_json(2, node21),
         attr.assoc(CLBNode.from_node_json(2, node22), _drained_at=2.0)
     ], {
         '1': CLB(True),
         '2': CLB(False)
     }))
Пример #10
0
 def test_success(self):
     """
     Gets LB contents with drained_at correctly
     """
     node11 = node('11', 'a11', condition='DRAINING')
     node12 = node('12', 'a12')
     node21 = node('21', 'a21', weight=3)
     node22 = node('22', 'a22', weight=None, condition='DRAINING')
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([[nodes_req(1, [node11, node12])],
                            [nodes_req(2, [node21, node22])],
                            [lb_hm_req(1, {"type": "CONNECT"})],
                            [lb_hm_req(2, {})]]),
         parallel_sequence([[node_feed_req('1', '11', '11feed')],
                            [node_feed_req('2', '22', '22feed')]]),
     ]
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         ([attr.assoc(CLBNode.from_node_json(1, node11), _drained_at=1.0),
           CLBNode.from_node_json(1, node12),
           CLBNode.from_node_json(2, node21),
           attr.assoc(CLBNode.from_node_json(2, node22), _drained_at=2.0)],
          {'1': CLB(True), '2': CLB(False)}))
Пример #11
0
    def test_failure_unknown_reasons(self):
        """
        The group is put into ERROR state if any step returns FAILURE, and
        unknown error is defaulted to fixed reason
        """
        exc_info = raise_to_exc_info(ValueError('wat'))

        def plan(*args, **kwargs):
            return [TestStep(Effect("fail"))]

        sequence = [
            parallel_sequence([]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("fail", lambda i: (StepResult.FAILURE,
                                     [ErrorReason.Exception(exc_info)]))]
            ]),
            (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
            (UpdateGroupStatus(scaling_group=self.group,
                               status=ScalingGroupStatus.ERROR),
             noop),
            (Log('group-status-error',
                 dict(isError=True, cloud_feed=True, status='ERROR',
                      reasons=['Unknown error occurred'])),
             noop),
            (UpdateGroupErrorReasons(self.group, ['Unknown error occurred']),
             noop)
        ]
        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.FAILURE, ScalingGroupStatus.ERROR))
Пример #12
0
 def test_no_nodes(self):
     """
     Return empty if there are LBs but no nodes in them
     """
     seq = [
         lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}),
         parallel_sequence([[nodes_req(1, [])], [nodes_req(2, [])]]),
         parallel_sequence([]),  # No nodes to fetch
     ]
     self.assertEqual(perform_sequence(seq, get_clb_contents()), [])
Пример #13
0
 def test_no_lb(self):
     """
     Return empty list if there are no LB
     """
     seq = [
         lb_req('loadbalancers', True, {'loadBalancers': []}),
         parallel_sequence([]),  # No LBs to fetch
         parallel_sequence([]),  # No nodes to fetch
     ]
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), [])
Пример #14
0
    def test_success(self):
        """
        Executes the plan and returns SUCCESS when that's the most severe
        result.
        """
        dgs = get_desired_group_state(self.group_id, self.lc, 2)
        deleted = server(
            'c', ServerState.DELETED, servicenet_address='10.0.0.3',
            desired_lbs=self.desired_lbs,
            links=freeze([{'href': 'link3', 'rel': 'self'}]))
        self.servers += (deleted,)

        steps = [
            TestStep(
                Effect(
                    {'dgs': dgs,
                     'servers': self.servers,
                     'lb_nodes': (),
                     'now': 0})
                .on(lambda _: (StepResult.SUCCESS, [])))]

        def plan(dgs, servers, lb_nodes, now, build_timeout):
            self.assertEqual(build_timeout, 3600)
            return steps

        sequence = [
            parallel_sequence([]),
            (Log('execute-convergence',
                 dict(servers=self.servers, lb_nodes=(), steps=steps,
                      now=self.now, desired=dgs)), noop),
            parallel_sequence([
                [({'dgs': dgs, 'servers': self.servers,
                   'lb_nodes': (), 'now': 0},
                  noop)]
            ]),
            (Log('execute-convergence-results',
                 {'results': [{'step': steps[0],
                               'result': StepResult.SUCCESS,
                               'reasons': []}],
                  'worst_status': 'SUCCESS'}), noop),
            # Note that servers arg is non-deleted servers
            (UpdateServersCache(
                "tenant-id", "group-id", self.now,
                [thaw(self.servers[0].json.set("_is_as_active", True)),
                 thaw(self.servers[1].json.set("_is_as_active", True))]),
             noop)
        ]

        # all the servers updated in cache in beginning
        self.cache.append(thaw(deleted.json))

        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Пример #15
0
 def test_no_nodes(self):
     """
     Return empty if there are LBs but no nodes in them
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([[nodes_req(1, [])], [nodes_req(2, [])]]),
         parallel_sequence([]),  # No nodes to fetch
     ]
     self.assertEqual(perform_sequence(seq, get_clb_contents()), [])
Пример #16
0
 def test_no_lb(self):
     """
     Return empty list if there are no LB
     """
     seq = [
         lb_req('loadbalancers', True, {'loadBalancers': []}),
         parallel_sequence([]),  # No LBs to fetch
         parallel_sequence([]),  # No nodes to fetch
     ]
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), [])
Пример #17
0
    def test_returns_failure_set_error_state(self):
        """
        The group is put into ERROR state if any step returns FAILURE, and
        FAILURE is the final result of convergence.
        """
        exc_info = raise_to_exc_info(NoSuchCLBError(lb_id=u'nolb1'))
        exc_info2 = raise_to_exc_info(NoSuchCLBError(lb_id=u'nolb2'))

        def plan(*args, **kwargs):
            return [
                TestStep(Effect("success1")),
                TestStep(Effect("retry")),
                TestStep(Effect("success2")),
                TestStep(Effect("fail1")),
                TestStep(Effect("fail2")),
                TestStep(Effect("success3"))]

        def success(i):
            return StepResult.SUCCESS, []

        sequence = [
            parallel_sequence([]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("success1", success)],
                [("retry", lambda i: (StepResult.RETRY, []))],
                [("success2", success)],
                [("fail1", lambda i: (StepResult.FAILURE,
                                      [ErrorReason.Exception(exc_info)]))],
                [("fail2", lambda i: (StepResult.FAILURE,
                                      [ErrorReason.Exception(exc_info2)]))],
                [("success3", success)],
            ]),
            (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
            (UpdateGroupStatus(scaling_group=self.group,
                               status=ScalingGroupStatus.ERROR),
             noop),
            (Log('group-status-error',
                 dict(isError=True, cloud_feed=True, status='ERROR',
                      reasons=['Cloud Load Balancer does not exist: nolb1',
                               'Cloud Load Balancer does not exist: nolb2'])),
             noop),
            (UpdateGroupErrorReasons(
                self.group,
                ['Cloud Load Balancer does not exist: nolb1',
                 'Cloud Load Balancer does not exist: nolb2']), noop)
        ]
        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.FAILURE, ScalingGroupStatus.ERROR))
Пример #18
0
    def test_ignore_disappearing_divergent_flag(self):
        """
        When the divergent flag disappears just as we're starting to converge,
        the group does not get converged and None is returned as its result.

        This happens when a concurrent convergence iteration is just finishing
        up.
        """
        eff = self._converge_all_groups(['00_g1'])

        def get_bound_sequence(tid, gid):
            # since this GetStat is going to return None, no more effects will
            # be run. This is the crux of what we're testing.
            znode = '/groups/divergent/{}_{}'.format(tid, gid)
            return [
                (GetStat(path=znode), noop),
                (Log('converge-divergent-flag-disappeared',
                     fields={'znode': znode}),
                 noop)]

        sequence = [
            (ReadReference(ref=self.currently_converging), lambda i: pset()),
            (Log('converge-all-groups',
                 dict(group_infos=[self.group_infos[0]],
                      currently_converging=[])),
             noop),
            (ReadReference(ref=self.recently_converged), lambda i: pmap()),
            (Func(time.time), lambda i: 100),
            parallel_sequence([
                [(BoundFields(mock.ANY, fields={'tenant_id': '00',
                                                'scaling_group_id': 'g1'}),
                  nested_sequence(get_bound_sequence('00', 'g1')))],
             ]),
        ]
        self.assertEqual(perform_sequence(sequence, eff), [None])
Пример #19
0
 def test_no_nodes(self):
     """
     Return empty if there are LBs but no nodes in them
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([
             [nodes_req(1, [])], [nodes_req(2, [])],
             [lb_hm_req(1, {})], [lb_hm_req(2, {"type": "a"})]
         ]),
         parallel_sequence([]),  # No nodes to fetch
     ]
     self.assertEqual(
         perform_sequence(seq, get_clb_contents()),
         ([], {'1': CLB(False), '2': CLB(True)}))
Пример #20
0
 def test_no_steps(self):
     """
     If state of world matches desired, no steps are executed, but the
     `active` servers are still updated, and SUCCESS is the return value.
     """
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log('execute-convergence', mock.ANY), noop),
         (Log('execute-convergence-results',
              {'results': [], 'worst_status': 'SUCCESS'}), noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set('_is_as_active', True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Пример #21
0
 def test_reactivate_group_on_success_with_no_steps(self):
     """
     When the group started in ERROR state, and convergence succeeds, the
     group is put back into ACTIVE, even if there were no steps to execute.
     """
     self.manifest['state'].status = ScalingGroupStatus.ERROR
     for serv in self.servers:
         serv.desired_lbs = pset()
     sequence = [
         parallel_sequence([]),
         (Log(msg='execute-convergence', fields=mock.ANY), noop),
         (Log(msg='execute-convergence-results', fields=mock.ANY), noop),
         (UpdateGroupStatus(scaling_group=self.group,
                            status=ScalingGroupStatus.ACTIVE),
          noop),
         (Log('group-status-active',
              dict(cloud_feed=True, status='ACTIVE')),
          noop),
         (UpdateServersCache(
             "tenant-id", "group-id", self.now,
             [thaw(self.servers[0].json.set("_is_as_active", True)),
              thaw(self.servers[1].json.set("_is_as_active", True))]),
          noop)
     ]
     self.state_active = {
         'a': {'id': 'a', 'links': [{'href': 'link1', 'rel': 'self'}]},
         'b': {'id': 'b', 'links': [{'href': 'link2', 'rel': 'self'}]}
     }
     self.cache[0]["_is_as_active"] = True
     self.cache[1]["_is_as_active"] = True
     self.assertEqual(
         perform_sequence(self.get_seq() + sequence, self._invoke()),
         (StepResult.SUCCESS, ScalingGroupStatus.ACTIVE))
Пример #22
0
 def test_dont_filter_out_non_recently_converged(self):
     """
     If a group was converged in the past but not recently, it will be
     cleaned from the ``recently_converged`` map, and it will be converged.
     """
     # g1: converged a while ago; divergent -> removed and converged
     # g2: converged recently; not divergent -> not converged
     # g3: converged a while ago; not divergent -> removed and not converged
     eff = self._converge_all_groups(['00_g1'])
     sequence = [
         (ReadReference(ref=self.currently_converging), lambda i: pset([])),
         (Log('converge-all-groups',
              dict(group_infos=[self.group_infos[0]],
                   currently_converging=[])),
          noop),
         (ReadReference(ref=self.recently_converged),
          lambda i: pmap({'g1': 4, 'g2': 10, 'g3': 0})),
         (Func(time.time), lambda i: 20),
         (ModifyReference(self.recently_converged,
                          match_func("literally anything",
                                     pmap({'g2': 10}))),
          noop),
         parallel_sequence([[self._expect_group_converged('00', 'g1')]])
     ]
     self.assertEqual(perform_sequence(sequence, eff), ['converged g1!'])
Пример #23
0
 def test_lb_disappeared_during_feed_fetch(self):
     """
     If a load balancer gets deleted while fetching feeds, no nodes will be
     returned for it.
     """
     node21 = node("21", "a21", condition="DRAINING", weight=None)
     seq = [
         lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}),
         parallel_sequence(
             [[nodes_req(1, [node("11", "a11", condition="DRAINING"), node("12", "a12")])], [nodes_req(2, [node21])]]
         ),
         parallel_sequence(
             [[node_feed_req(1, "11", CLBNotFoundError(lb_id=u"1"))], [node_feed_req(2, "21", "22feed")]]
         ),
     ]
     eff = get_clb_contents()
     self.assertEqual(perform_sequence(seq, eff), [assoc_obj(CLBNode.from_node_json(2, node21), drained_at=2.0)])
Пример #24
0
    def test_log_reasons(self):
        """When a step doesn't succeed, useful information is logged."""
        try:
            1 / 0
        except ZeroDivisionError:
            exc_info = sys.exc_info()

        step = TestStep(Effect("step_intent"))

        def plan(*args, **kwargs):
            return pbag([step])

        exc_msg = "ZeroDivisionError('integer division or modulo by zero',)"
        tb_msg = ''.join(traceback.format_exception(*exc_info))
        expected_fields = {
            'results': [
                {
                    'step': step,
                    'result': StepResult.RETRY,
                    'reasons': [
                        {'exception': exc_msg, 'traceback': tb_msg},
                        {'string': 'foo'},
                        {'foo': 'bar'}
                    ]
                }
            ],
            'worst_status': 'RETRY'}
        sequence = [
            parallel_sequence([]),
            (Log(msg='execute-convergence', fields=mock.ANY), noop),
            parallel_sequence([
                [("step_intent", lambda i: (
                     StepResult.RETRY, [
                         ErrorReason.Exception(exc_info),
                         ErrorReason.String('foo'),
                         ErrorReason.Structured({'foo': 'bar'})]))]
            ]),
            (Log(msg='execute-convergence-results', fields=expected_fields),
             noop)
        ]

        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.RETRY, ScalingGroupStatus.ACTIVE))
Пример #25
0
 def test_no_draining(self):
     """
     Doesnt fetch feeds if all nodes are ENABLED
     """
     seq = [
         lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}),
         parallel_sequence([[nodes_req(1, [node("11", "a11")])], [nodes_req(2, [node("21", "a21")])]]),
         parallel_sequence([]),  # No nodes to fetch
     ]
     make_desc = partial(
         CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY
     )
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         [
             CLBNode(node_id="11", address="a11", description=make_desc(lb_id="1")),
             CLBNode(node_id="21", address="a21", description=make_desc(lb_id="2")),
         ],
     )
Пример #26
0
 def test_no_draining(self):
     """
     Doesnt fetch feeds if all nodes are ENABLED
     """
     seq = [
         lb_req('loadbalancers', True,
                {'loadBalancers': [{'id': 1}, {'id': 2}]}),
         parallel_sequence([[nodes_req(1, [node('11', 'a11')])],
                            [nodes_req(2, [node('21', 'a21')])]]),
         parallel_sequence([])  # No nodes to fetch
     ]
     make_desc = partial(CLBDescription, port=20, weight=2,
                         condition=CLBNodeCondition.ENABLED,
                         type=CLBNodeType.PRIMARY)
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         [CLBNode(node_id='11', address='a11',
                  description=make_desc(lb_id='1')),
          CLBNode(node_id='21', address='a21',
                  description=make_desc(lb_id='2'))])
Пример #27
0
 def test_lb_disappeared_during_node_fetch(self):
     """
     If a load balancer gets deleted while fetching nodes, no nodes will be
     returned for it.
     """
     seq = [
         lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}),
         parallel_sequence(
             [
                 [nodes_req(1, [node("11", "a11")])],
                 [lb_req("loadbalancers/2/nodes", True, CLBNotFoundError(lb_id=u"2"))],
             ]
         ),
         parallel_sequence([]),  # No nodes to fetch
     ]
     make_desc = partial(
         CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY
     )
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff), [CLBNode(node_id="11", address="a11", description=make_desc(lb_id="1"))]
     )
Пример #28
0
 def test_success(self):
     """
     Gets LB contents with drained_at correctly
     """
     node11 = node("11", "a11", condition="DRAINING")
     node12 = node("12", "a12")
     node21 = node("21", "a21", weight=3)
     node22 = node("22", "a22", weight=None, condition="DRAINING")
     seq = [
         lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}),
         parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])]]),
         parallel_sequence([[node_feed_req(1, "11", "11feed")], [node_feed_req(2, "22", "22feed")]]),
     ]
     eff = get_clb_contents()
     self.assertEqual(
         perform_sequence(seq, eff),
         [
             assoc_obj(CLBNode.from_node_json(1, node11), drained_at=1.0),
             CLBNode.from_node_json(1, node12),
             CLBNode.from_node_json(2, node21),
             assoc_obj(CLBNode.from_node_json(2, node22), drained_at=2.0),
         ],
     )
Пример #29
0
    def test_returns_retry(self):
        """
        If a step that results in RETRY is returned, and there are no FAILUREs,
        then the ultimate result of executing convergence will be a RETRY.
        """
        def plan(*args, **kwargs):
            return [
                TestStep(Effect("step1")),
                TestStep(Effect("retry"))]

        sequence = [
            parallel_sequence([]),
            (Log('execute-convergence', mock.ANY), noop),
            parallel_sequence([
                [("step1", lambda i: (StepResult.SUCCESS, []))],
                [("retry", lambda i: (StepResult.RETRY,
                                      [ErrorReason.String('mywish')]))],
            ]),
            (Log('execute-convergence-results', mock.ANY), noop)
        ]
        self.assertEqual(
            perform_sequence(self.get_seq() + sequence, self._invoke(plan)),
            (StepResult.RETRY, ScalingGroupStatus.ACTIVE))
Пример #30
0
 def test_filter_out_recently_converged(self):
     """
     If a group was recently converged, it will not be converged again.
     """
     eff = self._converge_all_groups(['00_g1'])
     sequence = [
         (ReadReference(ref=self.currently_converging), lambda i: pset([])),
         (Log('converge-all-groups',
              dict(group_infos=[self.group_infos[0]],
                   currently_converging=[])),
          noop),
         (ReadReference(ref=self.recently_converged),
          lambda i: pmap({'g1': 5})),
         (Func(time.time), lambda i: 14),
         parallel_sequence([])  # No groups to converge
     ]
     self.assertEqual(perform_sequence(sequence, eff), [])
Пример #31
0
 def test_filter_out_currently_converging(self):
     """
     If a group is already being converged, its dirty flag is not statted
     and convergence is not run for it.
     """
     eff = self._converge_all_groups(['00_g1', '01_g2'])
     sequence = [
         (ReadReference(ref=self.currently_converging),
          lambda i: pset(['g1'])),
         (Log('converge-all-groups',
              dict(group_infos=[self.group_infos[1]],
                   currently_converging=['g1'])),
          noop),
         (ReadReference(ref=self.recently_converged), lambda i: pmap()),
         (Func(time.time), lambda i: 100),
         parallel_sequence([[self._expect_group_converged('01', 'g2')]])
     ]
     self.assertEqual(perform_sequence(sequence, eff), ['converged g2!'])
Пример #32
0
 def test_converge_all_groups(self):
     """
     Fetches divergent groups and runs converge_one_group for each one
     needing convergence.
     """
     eff = self._converge_all_groups(['00_g1', '01_g2'])
     sequence = [
         (ReadReference(ref=self.currently_converging),
          lambda i: pset()),
         (Log('converge-all-groups',
              dict(group_infos=self.group_infos, currently_converging=[])),
          noop),
         (ReadReference(self.recently_converged), lambda i: pmap()),
         (Func(time.time), lambda i: 100),
         parallel_sequence([[self._expect_group_converged('00', 'g1')],
                            [self._expect_group_converged('01', 'g2')]])
     ]
     self.assertEqual(perform_sequence(sequence, eff),
                      ['converged g1!', 'converged g2!'])
Пример #33
0
 def get_seq(self, with_cache=True):
     exec_seq = [
         parallel_sequence([
             [(self.gsgi, lambda i: self.gsgi_result)],
             [(("gacd", self.tenant_id, self.group_id, self.now),
              lambda i: (self.servers, ()))]
         ])
     ]
     if with_cache:
         exec_seq.append(
             (UpdateServersCache(
                 self.tenant_id, self.group_id, self.now, self.cache),
              noop)
         )
     return [
         (Log("begin-convergence", {}), noop),
         (Func(datetime.utcnow), lambda i: self.now),
         (MsgWithTime("gather-convergence-data", mock.ANY),
          nested_sequence(exec_seq))
     ]
Пример #34
0
    def test_first_error_extraction(self):
        """
        If the GetScalingGroupInfo effect fails, its exception is raised
        directly, without the FirstError wrapper.
        """
        # Perform the GetScalingGroupInfo by raising an exception
        sequence = [
            (Log("begin-convergence", {}), noop),
            (Func(datetime.utcnow), lambda i: self.now),
            (MsgWithTime("gather-convergence-data", mock.ANY),
             nested_sequence([
                parallel_sequence([
                    [(self.gsgi, lambda i: raise_(RuntimeError('foo')))],
                    [("anything", noop)]
                ])
             ]))
        ]

        # And make sure that exception isn't wrapped in FirstError.
        e = self.assertRaises(
            RuntimeError, perform_sequence, sequence, self._invoke(),
            test_dispatcher())
        self.assertEqual(str(e), 'foo')