Esempio n. 1
0
    def sample(self, sentence, phrase, h):
        # use dynamic programming to find the probability of a given context C
        # and generated multiset A:
        #
        # p(a | C, A) = ( p(a|C) + Z_{A-{a} | trunc(C, N-1)} ) / Z_{A | C}
        #
        sequence = self.depgen.conditioning_fn(sentence, phrase, h)
        h_index = cond.head_index(sequence)
        head, (l_deps, r_deps) = pop(sequence, h_index)
        deps = l_deps + r_deps

        def gen_left_probs(context, xs_bag):
            left_score = self.depgen.left_distro.rfs[head].score
            right_score = self.depgen.right_distro.rfs[head].score
            n = self.depgen.n
            yield HALT, exp(left_score(HALT, context)
                            + ngram_perm_norm_from((right_score, n, context, xs_bag)))
            for x, rest in rfutils.thing_and_rest(xs_bag): # potential inefficiency
                next_context = truncate(context + (x,), n - 1)
                A_rest = double_ngram_left_perm_norm_from((left_score,
                                                          right_score,
                                                          n,
                                                          next_context,
                                                          pbag(rest))) 
                yield x, exp(left_score(x, context) + A_rest)

        def gen_right_probs(context, xs_bag):
            score = self.depgen.right_distro.rfs[head].score
            n = self.depgen.n
            if not xs_bag:
                yield HALT, exp(score(HALT, context))
            else:
                for x, rest in rfutils.thing_and_rest(xs_bag): # potential inefficiency
                    next_context = truncate(context + (x,), n - 1)
                    A_rest = ngram_perm_norm_from((score, n, next_context, pbag(rest))) 
                    yield x, exp(score(x, context) + A_rest)

        def sample_part(get_probs, context, xs_bag):
            while xs_bag:
                probs = get_probs(context, xs_bag)
                x = weighted_choice(list(probs))
                if x is HALT:
                    break
                else:
                    yield x
                    context = truncate(context + (x,), len(context))
                    xs_bag = xs_bag.remove(x)

        deps_bag = pbag(deps)
        start = (HALT,) * (self.depgen.n - 1)
        left = list(sample_part(gen_left_probs, start, deps_bag))
        remainder = deps_bag - pbag(left)
        right = sample_part(gen_right_probs, start, remainder)

        new_sequence = list(it.chain(reversed(left), [head], right))

        rfutils.get_cache(double_ngram_left_perm_norm_from).clear()
        rfutils.get_cache(ngram_perm_norm_from).clear()
        
        return o.sample_indices_in(sequence, new_sequence)
Esempio n. 2
0
    def test_optimize_clb_adds_maintain_unique_ports(self):
        """
        Multiple ports can be specified for the same address and LB ID when
        adding to a CLB.
        """
        steps = pbag([
            AddNodesToCLB(
                lb_id='5',
                address_configs=s(('1.1.1.1',
                                   CLBDescription(lb_id='5', port=80)))),
            AddNodesToCLB(
                lb_id='5',
                address_configs=s(('1.1.1.1',
                                   CLBDescription(lb_id='5', port=8080))))])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(
                        ('1.1.1.1',
                         CLBDescription(lb_id='5', port=80)),
                        ('1.1.1.1',
                         CLBDescription(lb_id='5', port=8080))))]))
Esempio n. 3
0
    def test_optimize_clb_adds_maintain_unique_ports(self):
        """
        Multiple ports can be specified for the same address and LB ID when
        adding to a CLB.
        """
        steps = pbag([
            AddNodesToCLB(lb_id='5',
                          address_configs=s(
                              ('1.1.1.1', CLBDescription(lb_id='5',
                                                         port=80)))),
            AddNodesToCLB(lb_id='5',
                          address_configs=s(
                              ('1.1.1.1', CLBDescription(lb_id='5',
                                                         port=8080))))
        ])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(
                        ('1.1.1.1', CLBDescription(lb_id='5', port=80)),
                        ('1.1.1.1', CLBDescription(lb_id='5', port=8080))))
            ]))
Esempio n. 4
0
 def test_filters_clb_types(self):
     """
     Only one CLB step is returned per CLB
     """
     steps = pbag([
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='5',
                                                      port=80)))),
         RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
         # Unoptimizable step
         CreateServer(server_config=pmap({})),
     ])
     # returned steps could be pbag of any of the 2 lists below depending
     # on how `one_clb_step` iterates over the steps. Since it is pbag the
     # order of elements is not guaranteed
     list1 = [
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='5',
                                                      port=80)))),
         CreateServer(server_config=pmap({}))
     ]
     list2 = [
         RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
         CreateServer(server_config=pmap({}))
     ]
     self.assertEqual(
         matches(MatchesAny(Equals(pbag(list1)), Equals(pbag(list2)))),
         optimize_steps(steps))
Esempio n. 5
0
 def test_filters_clb_types(self):
     """
     Only one CLB step is returned per CLB
     """
     steps = pbag([
         AddNodesToCLB(
             lb_id='5',
             address_configs=s(('1.1.1.1',
                                CLBDescription(lb_id='5', port=80)))),
         RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
         # Unoptimizable step
         CreateServer(server_config=pmap({})),
     ])
     # returned steps could be pbag of any of the 2 lists below depending
     # on how `one_clb_step` iterates over the steps. Since it is pbag the
     # order of elements is not guaranteed
     list1 = [
         AddNodesToCLB(
             lb_id='5',
             address_configs=s(
                 ('1.1.1.1', CLBDescription(lb_id='5', port=80)))),
         CreateServer(server_config=pmap({}))
     ]
     list2 = [
         RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
         CreateServer(server_config=pmap({}))
     ]
     self.assertEqual(
         matches(MatchesAny(Equals(pbag(list1)), Equals(pbag(list2)))),
         optimize_steps(steps)
     )
Esempio n. 6
0
    def test_mixed_optimization(self):
        """
        Mixes of optimizable and unoptimizable steps still get optimized
        correctly.
        """
        steps = pbag([
            # CLB adds
            AddNodesToCLB(
                lb_id='5',
                address_configs=s(('1.1.1.1',
                                   CLBDescription(lb_id='5', port=80)))),
            AddNodesToCLB(
                lb_id='5',
                address_configs=s(('1.1.1.2',
                                   CLBDescription(lb_id='5', port=80)))),
            AddNodesToCLB(
                lb_id='6',
                address_configs=s(('1.1.1.1',
                                   CLBDescription(lb_id='6', port=80)))),
            AddNodesToCLB(
                lb_id='6',
                address_configs=s(('1.1.1.2',
                                   CLBDescription(lb_id='6', port=80)))),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('4')),

            # Unoptimizable steps
            CreateServer(server_config=pmap({})),
        ])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                # Optimized CLB adds
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=80)),
                                      ('1.1.1.2',
                                       CLBDescription(lb_id='5', port=80)))),
                AddNodesToCLB(
                    lb_id='6',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='6', port=80)),
                                      ('1.1.1.2',
                                       CLBDescription(lb_id='6', port=80)))),
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2')),
                RemoveNodesFromCLB(lb_id='6', node_ids=s('3', '4')),

                # Unoptimizable steps
                CreateServer(server_config=pmap({}))
            ]))
Esempio n. 7
0
    def test_mixed_optimization(self):
        """
        Mixes of optimizable and unoptimizable steps still get optimized
        correctly.
        """
        steps = pbag([
            # CLB adds
            AddNodesToCLB(lb_id='5',
                          address_configs=s(
                              ('1.1.1.1', CLBDescription(lb_id='5',
                                                         port=80)))),
            AddNodesToCLB(lb_id='5',
                          address_configs=s(
                              ('1.1.1.2', CLBDescription(lb_id='5',
                                                         port=80)))),
            AddNodesToCLB(lb_id='6',
                          address_configs=s(
                              ('1.1.1.1', CLBDescription(lb_id='6',
                                                         port=80)))),
            AddNodesToCLB(lb_id='6',
                          address_configs=s(
                              ('1.1.1.2', CLBDescription(lb_id='6',
                                                         port=80)))),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('4')),

            # Unoptimizable steps
            CreateServer(server_config=pmap({})),
        ])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                # Optimized CLB adds
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(
                        ('1.1.1.1', CLBDescription(lb_id='5', port=80)),
                        ('1.1.1.2', CLBDescription(lb_id='5', port=80)))),
                AddNodesToCLB(
                    lb_id='6',
                    address_configs=s(
                        ('1.1.1.1', CLBDescription(lb_id='6', port=80)),
                        ('1.1.1.2', CLBDescription(lb_id='6', port=80)))),
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2')),
                RemoveNodesFromCLB(lb_id='6', node_ids=s('3', '4')),

                # Unoptimizable steps
                CreateServer(server_config=pmap({}))
            ]))
Esempio n. 8
0
File: actor.py Progetto: tlvu/mochi
def decode(obj):
    if isinstance(obj, ExtType):
        if obj.code == TYPE_PSET:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pset(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PLIST:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return plist(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PBAG:
            unpacked_data = unpackb(obj.data,
                                    use_list=False,
                                    encoding='utf-8')
            return pbag(decode(item) for item in unpacked_data)
        module_name, class_name, *data = unpackb(obj.data,
                                                 use_list=False,
                                                 encoding='utf-8')
        cls = getattr(sys.modules[module_name],
                      class_name)
        return cls(*(decode(item) for item in data))
    if isinstance(obj, tuple):
        return pvector(decode(item) for item in obj)
    if isinstance(obj, dict):
        new_dict = dict()
        for key in obj.keys():
            new_dict[decode(key)] = decode(obj[key])
        return pmap(new_dict)
    return obj
Esempio n. 9
0
    def test_optimize_clb_removes(self):
        """
        Aggregation is done on a per-load-balancer basis when remove nodes from
        a CLB.
        """
        steps = pbag([
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('4'))])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2', '3', '4'))
            ]))
Esempio n. 10
0
    def test_plan(self):
        """An optimized plan is returned. Steps are limited."""
        desc = CLBDescription(lb_id='5', port=80)
        desired_lbs = s(desc)
        desired_group_state = DesiredGroupState(
            server_config={}, capacity=20, desired_lbs=desired_lbs)

        result = plan(
            desired_group_state,
            set([server('server1', state=ServerState.ACTIVE,
                        servicenet_address='1.1.1.1',
                        desired_lbs=desired_lbs),
                 server('server2', state=ServerState.ACTIVE,
                        servicenet_address='1.2.3.4',
                        desired_lbs=desired_lbs)]),
            set(),
            0,
            build_timeout=3600)

        self.assertEqual(
            result,
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1', desc), ('1.2.3.4', desc))
                )] + [CreateServer(server_config=pmap({}))] * 10))
Esempio n. 11
0
    def test_draining_server_has_all_enabled_lb_set_to_draining(self):
        """
        If a draining server is associated with any load balancers, those
        load balancer nodes will be set to draining and the server is not
        deleted.  The metadata on the server is not re-set to draining.

        This can happen for instance if the server was supposed to be deleted
        in a previous convergence run, and the server metadata was set but
        the load balancers update failed.

        Or if the server is set to be manually deleted via the API.
        """
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=0,
                                  draining_timeout=10.0),
                set([server('abc', state=ServerState.ACTIVE,
                            metadata=dict([DRAINING_METADATA]),
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
                set([CLBNode(node_id='1', address='1.1.1.1',
                             description=self.clb_desc)]),
                1),
            pbag([
                ChangeCLBNode(lb_id='1', node_id='1', weight=1,
                              condition=CLBNodeCondition.DRAINING,
                              type=CLBNodeType.PRIMARY)
            ]))
Esempio n. 12
0
    def wait_for_stack_list(self, expected_states, timeout=180, period=10):
        def check(content):
            states = pbag([s['stack_status'] for s in content['stacks']])
            if not (states == expected_states):
                msg("Waiting for group {} to reach desired group state.\n"
                    "{} (actual) {} (expected)".format(self.group.group_id,
                                                       states,
                                                       expected_states))
                raise TransientRetryError(
                    "Group states of {} did not match expected {})".format(
                        states, expected_states))

            msg("Success: desired group state reached:\n{}".format(
                expected_states))
            return self.rcs

        def poll():
            return self.get_stack_list().addCallback(check)

        expected_states = pbag(expected_states)

        return retry_and_timeout(
            poll,
            timeout,
            can_retry=terminal_errors_except(TransientRetryError),
            next_interval=repeating_interval(period),
            clock=reactor,
            deferred_description=(
                "Waiting for group {} to reach state {}".format(
                    self.group.group_id, str(expected_states))))
Esempio n. 13
0
    def test_active_server_is_drained_even_if_all_already_in_draining(self):
        """
        If an active server is attached to load balancers, and all those load
        balancer nodes are already in draining but it cannot be removed yet,
        the server is set to draining state even though no load balancer
        actions need to be performed.

        This can happen for instance if the server was supposed to be deleted
        in a previous convergence run, and the load balancers were set to
        draining but setting the server metadata failed.
        """
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=0,
                                  draining_timeout=10.0),
                set([server('abc', state=ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
                set([CLBNode(node_id='1', address='1.1.1.1',
                             description=copy_clb_desc(
                                 self.clb_desc,
                                 condition=CLBNodeCondition.DRAINING),
                             connections=1, drained_at=0.0)]),
                1),
            pbag([
                SetMetadataItemOnServer(server_id='abc',
                                        key=DRAINING_METADATA[0],
                                        value=DRAINING_METADATA[1]),
                self.clstep
            ]))
Esempio n. 14
0
    def test_same_clb_multiple_ports(self):
        """
        It's possible to have the same cloud load balancer using multiple ports
        on the host.

        (use case: running multiple single-threaded server processes on a
        machine)
        """
        desired = s(CLBDescription(lb_id='5', port=8080),
                    CLBDescription(lb_id='5', port=8081))
        current = []
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=desired)]),
                set(current),
                0),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=8080)))),
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=8081))))
                ]))
Esempio n. 15
0
 def test_draining_server_can_be_deleted_if_all_lbs_can_be_removed(self):
     """
     If draining server can be removed from all the load balancers, the
     server can be deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', state=ServerState.ACTIVE,
                         metadata=dict([DRAINING_METADATA]),
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=copy_clb_desc(
                              self.clb_desc,
                              condition=CLBNodeCondition.DRAINING)),
                  RCv3Node(node_id='2', cloud_server_id='abc',
                           description=self.rcv3_desc)]),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             RemoveNodesFromCLB(lb_id='1', node_ids=s('1')),
             BulkRemoveFromRCv3(lb_node_pairs=s(
                 (self.rcv3_desc.lb_id, 'abc')))
         ]))
Esempio n. 16
0
    def test_scale_down_order(self):
        """Preferred order of servers to delete when scaling down:

        - WAIT_WITH_TIMEOUT
        - WAIT
        - AVOID_REPLACING
        - CONSIDER_ACTIVE
        """
        order = (Destiny.WAIT_WITH_TIMEOUT, Destiny.WAIT,
                 Destiny.AVOID_REPLACING, Destiny.CONSIDER_AVAILABLE)
        examples = {Destiny.WAIT_WITH_TIMEOUT: ServerState.BUILD,
                    Destiny.WAIT: ServerState.HARD_REBOOT,
                    Destiny.AVOID_REPLACING: ServerState.RESCUE,
                    Destiny.CONSIDER_AVAILABLE: ServerState.ACTIVE}
        for combo in combinations(order, 2):
            before, after = combo
            also = []
            if after == Destiny.WAIT:
                # If we're waiting for some other servers we need to also
                # expect a ConvergeLater
                also = [ConvergeLater(reasons=[
                    ErrorReason.String(
                        'waiting for temporarily unavailable server to become '
                        'ACTIVE')],
                    limited=True)]

            self.assertEqual(
                converge(
                    DesiredGroupState(server_config={}, capacity=2),
                    set([server('abc', examples[after], created=0),
                         server('def', examples[before], created=1),
                         server('ghi', examples[after], created=2)]),
                    set(),
                    0),
                pbag([DeleteServer(server_id='def')] + also))
Esempio n. 17
0
    def test_change_lb_node(self):
        """
        If a desired CLB mapping is in the set of current configs,
        but the configuration is wrong, `converge_lb_state` returns a
        :class:`ChangeCLBNode` object.  RCv3 nodes cannot be changed - they are
        either right or wrong.
        """
        clb_desc = CLBDescription(lb_id='5', port=80)
        rcv3_desc = RCv3Description(
            lb_id='c6fe49fa-114a-4ea4-9425-0af8b30ff1e7')

        current = [CLBNode(node_id='123', address='1.1.1.1',
                           description=copy_clb_desc(clb_desc, weight=5)),
                   RCv3Node(node_id='234', cloud_server_id='abc',
                            description=rcv3_desc)]
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(clb_desc, rcv3_desc))]),
                set(current),
                0),
            pbag([
                ChangeCLBNode(lb_id='5', node_id='123', weight=1,
                              condition=CLBNodeCondition.ENABLED,
                              type=CLBNodeType.PRIMARY)]))
Esempio n. 18
0
 def test_converge_active_servers_ignores_servers_to_be_deleted(self):
     """
     Only servers in active that are not being deleted will have their
     load balancers converged.
     """
     desc = CLBDescription(lb_id='5', port=80)
     desired_lbs = s(desc)
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1,
                               desired_lbs=desired_lbs),
             set([server('abc', ServerState.ACTIVE,
                         servicenet_address='1.1.1.1', created=0,
                         desired_lbs=desired_lbs),
                  server('bcd', ServerState.ACTIVE,
                         servicenet_address='2.2.2.2', created=1,
                         desired_lbs=desired_lbs)]),
             set(),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             AddNodesToCLB(
                 lb_id='5',
                 address_configs=s(('2.2.2.2', desc)))
         ]))
Esempio n. 19
0
 def test_rcv3_mixed(self):
     """
     Multiple BulkAddToRCv3 and BulkRemoveFromRCv3 steps are combined
     into one BulkAddToRCv3 step and one BulkRemoveFromRCv3 step
     """
     steps = [
         BulkAddToRCv3(
             lb_node_pairs=pset([("l1", "s1"), ("l1", "s2")])),
         # Same pair for different class does not conflict
         BulkRemoveFromRCv3(lb_node_pairs=pset([("l1", "s1")])),
         BulkAddToRCv3(lb_node_pairs=pset([("l1", "s3")])),
         BulkRemoveFromRCv3(
             lb_node_pairs=pset([("l3", "s3"), ("l2", "s3")]))
     ]
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             BulkAddToRCv3(
                 lb_node_pairs=pset([
                     ("l1", "s1"), ("l1", "s2"), ("l1", "s3")])),
             BulkRemoveFromRCv3(
                 lb_node_pairs=pset([
                     ("l1", "s1"), ("l3", "s3"), ("l2", "s3")]))
         ])
     )
Esempio n. 20
0
    def wait_for_stack_list(self, expected_states, timeout=180, period=10):
        def check(content):
            states = pbag([s['stack_status'] for s in content['stacks']])
            if not (states == expected_states):
                msg("Waiting for group {} to reach desired group state.\n"
                    "{} (actual) {} (expected)"
                    .format(self.group.group_id, states, expected_states))
                raise TransientRetryError(
                    "Group states of {} did not match expected {})"
                    .format(states, expected_states))

            msg("Success: desired group state reached:\n{}"
                .format(expected_states))
            return self.rcs

        def poll():
            return self.get_stack_list().addCallback(check)

        expected_states = pbag(expected_states)

        return retry_and_timeout(
            poll, timeout,
            can_retry=terminal_errors_except(TransientRetryError),
            next_interval=repeating_interval(period),
            clock=reactor,
            deferred_description=(
                "Waiting for group {} to reach state {}".format(
                    self.group.group_id, str(expected_states))))
Esempio n. 21
0
 def _filter_only_lb_steps(self, steps):
     """
     Converge may do other things to a server depending on its draining
     state.  This suite of tests is only testing how it handles the load
     balancer, so ignore steps that are not load-balancer related.
     """
     return pbag([step for step in steps if type(step) in self.LB_STEPS])
Esempio n. 22
0
 def test_bulk_remove_from_rcv3(self):
     """Logs :obj:`BulkRemoveFromRCv3`."""
     adds = pbag([
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lb1', 'node1'), ('lb1', 'node2'),
             ('lb2', 'node2'), ('lb2', 'node3'),
             ('lb3', 'node4')])),
         BulkRemoveFromRCv3(lb_node_pairs=pset([
             ('lba', 'nodea'), ('lba', 'nodeb'),
             ('lb1', 'nodea')]))
     ])
     self.assert_logs(adds, [
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb2', 'servers': ['node2', 'node3'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lb3', 'servers': ['node4'],
                     'cloud_feed': True}),
         Log('convergence-remove-rcv3-nodes',
             fields={'lb_id': 'lba', 'servers': ['nodea', 'nodeb'],
                     'cloud_feed': True})
     ])
Esempio n. 23
0
 def test_add_nodes_to_clbs(self):
     """Logs :obj:`AddNodesToCLB`."""
     adds = pbag([
         AddNodesToCLB(lb_id='lbid1',
                       address_configs=pset([('10.0.0.1',
                                              _clbd('lbid1', 1234))])),
         AddNodesToCLB(lb_id='lbid1',
                       address_configs=pset([('10.0.0.2',
                                              _clbd('lbid1', 1235))])),
         AddNodesToCLB(lb_id='lbid2',
                       address_configs=pset([('10.0.0.1',
                                              _clbd('lbid2', 4321))]))
     ])
     self.assert_logs(adds, [
         Log('convergence-add-clb-nodes',
             fields={
                 'lb_id': 'lbid1',
                 'addresses': ['10.0.0.1:1234', '10.0.0.2:1235'],
                 'cloud_feed': True
             }),
         Log('convergence-add-clb-nodes',
             fields={
                 'lb_id': 'lbid2',
                 'addresses': ['10.0.0.1:4321'],
                 'cloud_feed': True
             })
     ])
Esempio n. 24
0
def decode(obj):
    if isinstance(obj, ExtType):
        if obj.code == TYPE_PSET:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return pset(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PLIST:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return plist(decode(item) for item in unpacked_data)
        if obj.code == TYPE_PBAG:
            unpacked_data = unpackb(obj.data, use_list=False, encoding='utf-8')
            return pbag(decode(item) for item in unpacked_data)
        if obj.code == TYPE_FUNC:
            module_name, func_name = unpackb(obj.data,
                                             use_list=False,
                                             encoding='utf-8')

            return getattr(sys.modules[module_name], func_name)
        module_name, class_name, *data = unpackb(obj.data,
                                                 use_list=False,
                                                 encoding='utf-8')
        cls = getattr(sys.modules[module_name], class_name)
        if obj.code == TYPE_MBOX:
            return cls.decode(data)
        return cls(*(decode(item) for item in data))
    if isinstance(obj, tuple):
        return pvector(decode(item) for item in obj)
    if isinstance(obj, dict):
        new_dict = dict()
        for key in obj.keys():
            new_dict[decode(key)] = decode(obj[key])
        return pmap(new_dict)
    return obj
Esempio n. 25
0
 def test_delete_error_state_servers_with_lb_nodes(self):
     """
     If a server we created enters error state and it is attached to one
     or more load balancers, it will be removed from its load balancers
     as well as get deleted.  (Tests that error state servers are not
     excluded from converging load balancer state.)
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.ERROR,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(CLBDescription(lb_id='5', port=80),
                                       CLBDescription(lb_id='5', port=8080),
                                       RCv3Description(lb_id='6')))]),
             set([CLBNode(address='1.1.1.1', node_id='3',
                          description=CLBDescription(lb_id='5',
                                                     port=80)),
                  CLBNode(address='1.1.1.1', node_id='5',
                          description=CLBDescription(lb_id='5',
                                                     port=8080)),
                  RCv3Node(node_id='123', cloud_server_id='abc',
                           description=RCv3Description(lb_id='6'))]),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('5')),
             BulkRemoveFromRCv3(lb_node_pairs=s(('6', 'abc'))),
             CreateServer(server_config=pmap()),
         ]))
Esempio n. 26
0
    def test_add_to_lb(self):
        """
        If a desired LB config is not in the set of current configs,
        `converge_lb_state` returns the relevant adding-to-load-balancer
        steps (:class:`AddNodesToCLB` in the case of CLB,
        :class:`BulkAddToRCv3` in the case of RCv3).
        """
        clb_desc = CLBDescription(lb_id='5', port=80)
        rcv3_desc = RCv3Description(
            lb_id='c6fe49fa-114a-4ea4-9425-0af8b30ff1e7')

        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(clb_desc, rcv3_desc))]),
                set(),
                0),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1', clb_desc))),
                BulkAddToRCv3(
                    lb_node_pairs=s(
                        ('c6fe49fa-114a-4ea4-9425-0af8b30ff1e7', 'abc')))
            ]))
Esempio n. 27
0
 def test_clean_up_deleted_servers_with_lb_nodes(self):
     """
     If a server has been deleted, we want to remove any dangling LB nodes
     referencing the server.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', ServerState.DELETED,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(CLBDescription(lb_id='5', port=80),
                                       CLBDescription(lb_id='5', port=8080),
                                       RCv3Description(lb_id='6')))]),
             set([CLBNode(address='1.1.1.1', node_id='3',
                          description=CLBDescription(lb_id='5',
                                                     port=80)),
                  CLBNode(address='1.1.1.1', node_id='5',
                          description=CLBDescription(lb_id='5',
                                                     port=8080)),
                  RCv3Node(node_id='123', cloud_server_id='abc',
                           description=RCv3Description(lb_id='6'))]),
             0),
         pbag([
             RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('5')),
             BulkRemoveFromRCv3(lb_node_pairs=s(('6', 'abc'))),
         ]))
Esempio n. 28
0
 def test_active_server_is_drained_if_not_all_lbs_can_be_removed(self):
     """
     If an active server to be deleted cannot be removed from all the load
     balancers, it is set to draining state and all the nodes are set to
     draining condition.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0,
                               draining_timeout=10.0),
             set([server('abc', state=ServerState.ACTIVE,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=self.clb_desc),
                  RCv3Node(node_id='2', cloud_server_id='abc',
                           description=self.rcv3_desc)]),
             0),
         pbag([
             ChangeCLBNode(lb_id='1', node_id='1', weight=1,
                           condition=CLBNodeCondition.DRAINING,
                           type=CLBNodeType.PRIMARY),
             SetMetadataItemOnServer(server_id='abc',
                                     key=DRAINING_METADATA[0],
                                     value=DRAINING_METADATA[1]),
             BulkRemoveFromRCv3(lb_node_pairs=s(
                 (self.rcv3_desc.lb_id, 'abc')))
         ]))
Esempio n. 29
0
    def test_optimize_clb_removes(self):
        """
        Aggregation is done on a per-load-balancer basis when remove nodes from
        a CLB.
        """
        steps = pbag([
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('4'))
        ])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2', '3', '4'))
            ]))
Esempio n. 30
0
    def test_clb_remove_multiple_load_balancers(self):
        """
        Multiple :class:`RemoveNodesFromCLB` steps for the same LB
        are merged into one.
        """
        steps = pbag([
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('4'))])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2')),
                RemoveNodesFromCLB(lb_id='6', node_ids=s('3', '4'))
            ]))
Esempio n. 31
0
def double_ngram_perm_norm(score_left, score_right, n, xs): # xs :: Iterable
    start = (HALT,) * (n - 1)
    result = double_ngram_left_perm_norm_from((score_left,
                                            score_right,
                                            n,
                                            start,
                                            pbag(xs)))
    rfutils.get_cache(double_ngram_left_perm_norm_from).clear()
    return result
Esempio n. 32
0
    def test_clb_remove_multiple_load_balancers(self):
        """
        Multiple :class:`RemoveNodesFromCLB` steps for the same LB
        are merged into one.
        """
        steps = pbag([
            RemoveNodesFromCLB(lb_id='5', node_ids=s('1')),
            RemoveNodesFromCLB(lb_id='5', node_ids=s('2')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('3')),
            RemoveNodesFromCLB(lb_id='6', node_ids=s('4'))
        ])

        self.assertEqual(
            optimize_steps(steps),
            pbag([
                RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2')),
                RemoveNodesFromCLB(lb_id='6', node_ids=s('3', '4'))
            ]))
Esempio n. 33
0
 def test_delete_servers(self):
     """Logs :obj:`DeleteServer`."""
     deletes = pbag([DeleteServer(server_id='1'),
                     DeleteServer(server_id='2'),
                     DeleteServer(server_id='3')])
     self.assert_logs(deletes, [
         Log('convergence-delete-servers',
             fields={'servers': ['1', '2', '3'], 'cloud_feed': True})
     ])
Esempio n. 34
0
 def gen_right_probs(context, xs_bag):
     score = self.depgen.right_distro.rfs[head].score
     n = self.depgen.n
     if not xs_bag:
         yield HALT, exp(score(HALT, context))
     else:
         for x, rest in rfutils.thing_and_rest(xs_bag): # potential inefficiency
             next_context = truncate(context + (x,), n - 1)
             A_rest = ngram_perm_norm_from((score, n, next_context, pbag(rest))) 
             yield x, exp(score(x, context) + A_rest)
Esempio n. 35
0
 def test_scale_down(self):
     """If we have more servers than desired, we delete the oldest."""
     self.assertEqual(
         converge(
             DesiredGroupState(launch_config={}, desired=1),
             [server('abc', ACTIVE, created=0),
              server('def', ACTIVE, created=1)],
             {},
             0),
         Convergence(steps=pbag([DeleteServer(server_id='abc')])))
Esempio n. 36
0
 def test_scale_down(self):
     """If we have more servers than desired, we delete the oldest."""
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.ACTIVE, created=0),
                  server('def', ServerState.ACTIVE, created=1)]),
             set(),
             0),
         pbag([DeleteServer(server_id='abc')]))
Esempio n. 37
0
def converge(desired_state, servers_with_cheese, load_balancer_contents, now,
             timeout=3600):
    """
    Create a :obj:`Convergence` that indicates how to transition from the state
    provided by the given parameters to the :obj:`DesiredGroupState` described
    by ``desired_state``.

    :param DesiredGroupState desired_state: The desired group state.
    :param list servers_with_cheese: a list of of :obj:`NovaServer` instances.
        This must only contain servers that are being managed for the specified
        group.
    :param dict load_balancer_contents: a dictionary mapping load balancer IDs
        to lists of 2-tuples of (IP address, loadbalancer node ID).
    :param float now: number of seconds since the POSIX epoch indicating the
        time at which the convergence was requested.
    :param float timeout: Number of seconds after which we will delete a server
        in BUILD.

    :rtype: obj:`Convergence`
    """
    newest_to_oldest = sorted(servers_with_cheese, key=lambda s: -s.created)
    servers_in_error, servers_in_active, servers_in_build = partition_groups(
        lambda s: s.state, newest_to_oldest, [ERROR, ACTIVE, BUILD])

    building_too_long, waiting_for_build = partition_bool(
        lambda server: now - server.created >= timeout,
        servers_in_build)

    create_server = CreateServer(launch_config=desired_state.launch_config)

    # delete any servers that have been building for too long
    delete_timeout_steps = [DeleteServer(server_id=server.id)
                            for server in building_too_long]

    # create servers
    create_steps = [create_server] * (desired_state.desired
                                      - (len(servers_in_active)
                                         + len(waiting_for_build)))

    # delete over capacity, starting with building, then active,
    # preferring older
    servers_to_delete = (servers_in_active + waiting_for_build)[desired_state.desired:]
    delete_steps = [DeleteServer(server_id=server.id)
                    for server in servers_to_delete]

    # delete all servers in error.
    delete_error_steps = [DeleteServer(server_id=server.id)
                          for server in servers_in_error]

    return Convergence(
        steps=pbag(create_steps
                   + delete_steps
                   + delete_error_steps
                   + delete_timeout_steps
                   ))
Esempio n. 38
0
def ngram_perm_norm(score, n, xs):
    """ N-gram Permutation Norm

    Get the log norm of N-gram model scores of permutations of xs.

    """
    # dynamic programming solution
    start = (HALT, ) * (n - 1)
    result = ngram_perm_norm_from((score, n, start, pbag(xs)))
    rfutils.get_cache(ngram_perm_norm_from).clear()
    return result
Esempio n. 39
0
def ngram_perm_norm(score, n, xs):
    """ N-gram Permutation Norm

    Get the log norm of N-gram model scores of permutations of xs.

    """
    # dynamic programming solution
    start = (HALT,) * (n - 1)
    result = ngram_perm_norm_from((score, n, start, pbag(xs)))
    rfutils.get_cache(ngram_perm_norm_from).clear()
    return result
Esempio n. 40
0
 def test_change_clb_node(self):
     """Logs :obj:`ChangeCLBNode`."""
     changes = pbag([
         ChangeCLBNode(lb_id='lbid1',
                       node_id='node1',
                       condition=CLBNodeCondition.DRAINING,
                       type=CLBNodeType.PRIMARY,
                       weight=50),
         ChangeCLBNode(lb_id='lbid1',
                       node_id='node2',
                       condition=CLBNodeCondition.DRAINING,
                       type=CLBNodeType.PRIMARY,
                       weight=50),
         ChangeCLBNode(lb_id='lbid1',
                       node_id='node3',
                       condition=CLBNodeCondition.ENABLED,
                       type=CLBNodeType.PRIMARY,
                       weight=50),
         ChangeCLBNode(lb_id='lbid2',
                       node_id='node4',
                       condition=CLBNodeCondition.ENABLED,
                       type=CLBNodeType.PRIMARY,
                       weight=50),
     ])
     self.assert_logs(changes, [
         Log('convergence-change-clb-nodes',
             fields={
                 'lb_id': 'lbid1',
                 'nodes': ['node3'],
                 'type': 'PRIMARY',
                 'condition': 'ENABLED',
                 'weight': 50,
                 'cloud_feed': True
             }),
         Log('convergence-change-clb-nodes',
             fields={
                 'lb_id': 'lbid1',
                 'nodes': ['node1', 'node2'],
                 'type': 'PRIMARY',
                 'condition': 'DRAINING',
                 'weight': 50,
                 'cloud_feed': True
             }),
         Log('convergence-change-clb-nodes',
             fields={
                 'lb_id': 'lbid2',
                 'nodes': ['node4'],
                 'type': 'PRIMARY',
                 'condition': 'ENABLED',
                 'weight': 50,
                 'cloud_feed': True
             }),
     ])
Esempio n. 41
0
 def gen_right_probs(context, xs_bag):
     score = self.depgen.right_distro.rfs[head].score
     n = self.depgen.n
     if not xs_bag:
         yield HALT, exp(score(HALT, context))
     else:
         for x, rest in rfutils.thing_and_rest(
                 xs_bag):  # potential inefficiency
             next_context = truncate(context + (x, ), n - 1)
             A_rest = ngram_perm_norm_from(
                 (score, n, next_context, pbag(rest)))
             yield x, exp(score(x, context) + A_rest)
Esempio n. 42
0
 def _test_rcv3_step(self, step_class):
     steps = [
         step_class(lb_node_pairs=pset([("l1", "s1"), ("l1", "s2")])),
         step_class(lb_node_pairs=pset([("l2", "s1")])),
         step_class(lb_node_pairs=pset([("l1", "s3"), ("l2", "s3")]))
     ]
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             step_class(lb_node_pairs=pset([("l1", "s1"), (
                 "l1", "s2"), ("l2", "s1"), ("l1", "s3"), ("l2", "s3")]))
         ]))
Esempio n. 43
0
 def gen_left_probs(context, xs_bag):
     left_score = self.depgen.left_distro.rfs[head].score
     right_score = self.depgen.right_distro.rfs[head].score
     n = self.depgen.n
     yield HALT, exp(
         left_score(HALT, context) +
         ngram_perm_norm_from((right_score, n, context, xs_bag)))
     for x, rest in rfutils.thing_and_rest(
             xs_bag):  # potential inefficiency
         next_context = truncate(context + (x, ), n - 1)
         A_rest = double_ngram_left_perm_norm_from(
             (left_score, right_score, n, next_context, pbag(rest)))
         yield x, exp(left_score(x, context) + A_rest)
Esempio n. 44
0
def limit_steps_by_count(steps, step_limits):
    """
    Limits step count by type.

    :param steps: An iterable of steps.
    :param step_limits: A dict mapping step classes to their maximum allowable
        count. Classes not present in this dict have no limit.
    :return: The input steps
    :rtype: pset
    """
    return pbag(concat(typed_steps[:step_limits.get(cls)]
                       for (cls, typed_steps)
                       in groupby(type, steps).iteritems()))
Esempio n. 45
0
        def check(content):
            states = pbag([s['stack_status'] for s in content['stacks']])
            if not (states == expected_states):
                msg("Waiting for group {} to reach desired group state.\n"
                    "{} (actual) {} (expected)".format(self.group.group_id,
                                                       states,
                                                       expected_states))
                raise TransientRetryError(
                    "Group states of {} did not match expected {})".format(
                        states, expected_states))

            msg("Success: desired group state reached:\n{}".format(
                expected_states))
            return self.rcs
Esempio n. 46
0
 def test_delete_servers(self):
     """Logs :obj:`DeleteServer`."""
     deletes = pbag([
         DeleteServer(server_id='1'),
         DeleteServer(server_id='2'),
         DeleteServer(server_id='3')
     ])
     self.assert_logs(deletes, [
         Log('convergence-delete-servers',
             fields={
                 'servers': ['1', '2', '3'],
                 'cloud_feed': True
             })
     ])
Esempio n. 47
0
 def test_optimize_clb_adds(self):
     """
     Multiple :class:`AddNodesToCLB` steps for the same LB
     are merged into one.
     """
     steps = pbag([
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='5',
                                                      port=80)))),
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.2.3.4', CLBDescription(lb_id='5', port=80))))
     ])
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             AddNodesToCLB(
                 lb_id='5',
                 address_configs=s(
                     ('1.1.1.1', CLBDescription(lb_id='5', port=80)),
                     ('1.2.3.4', CLBDescription(lb_id='5', port=80))))
         ]))
Esempio n. 48
0
 def test_clb_adds_multiple_load_balancers(self):
     """
     Aggregation is done on a per-load-balancer basis when adding to a CLB.
     """
     steps = pbag([
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='5',
                                                      port=80)))),
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.2', CLBDescription(lb_id='5',
                                                      port=80)))),
         AddNodesToCLB(lb_id='6',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='6',
                                                      port=80)))),
         AddNodesToCLB(lb_id='6',
                       address_configs=s(
                           ('1.1.1.2', CLBDescription(lb_id='6',
                                                      port=80)))),
     ])
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             AddNodesToCLB(
                 lb_id='5',
                 address_configs=s(
                     ('1.1.1.1', CLBDescription(lb_id='5', port=80)),
                     ('1.1.1.2', CLBDescription(lb_id='5', port=80)))),
             AddNodesToCLB(
                 lb_id='6',
                 address_configs=s(
                     ('1.1.1.1', CLBDescription(lb_id='6', port=80)),
                     ('1.1.1.2', CLBDescription(lb_id='6', port=80)))),
         ]))
Esempio n. 49
0
 def test_optimize_leaves_other_steps(self):
     """
     Unoptimizable steps pass the optimizer unchanged.
     """
     steps = pbag([
         AddNodesToCLB(lb_id='5',
                       address_configs=s(
                           ('1.1.1.1', CLBDescription(lb_id='5',
                                                      port=80)))),
         RemoveNodesFromCLB(lb_id='6', node_ids=s('1')),
         CreateServer(server_config=pmap({})),
         BulkRemoveFromRCv3(lb_node_pairs=pset([("lb-1", "node-a")])),
         BulkAddToRCv3(lb_node_pairs=pset([("lb-2", "node-b")]))
         # Note that the add & remove pair should not be the same;
         # the optimizer might reasonably optimize opposite
         # operations away in the future.
     ])
     self.assertEqual(optimize_steps(steps), steps)
Esempio n. 50
0
    def test_remove_nodes_from_clbs(self):
        """Logs :obj:`RemoveNodesFromCLB`."""
        removes = pbag([
            RemoveNodesFromCLB(lb_id='lbid1', node_ids=pset(['a', 'b', 'c'])),
            RemoveNodesFromCLB(lb_id='lbid2', node_ids=pset(['d', 'e', 'f']))
        ])

        self.assert_logs(removes, [
            Log('convergence-remove-clb-nodes',
                fields={
                    'lb_id': 'lbid1',
                    'nodes': ['a', 'b', 'c'],
                    'cloud_feed': True
                }),
            Log('convergence-remove-clb-nodes',
                fields={
                    'lb_id': 'lbid2',
                    'nodes': ['d', 'e', 'f'],
                    'cloud_feed': True
                }),
        ])
Esempio n. 51
0
 def test_bulk_remove_from_rcv3(self):
     """Logs :obj:`BulkRemoveFromRCv3`."""
     adds = pbag([
         BulkRemoveFromRCv3(lb_node_pairs=pset([(
             'lb1',
             'node1'), ('lb1',
                        'node2'), ('lb2',
                                   'node2'), ('lb2',
                                              'node3'), ('lb3', 'node4')])),
         BulkRemoveFromRCv3(
             lb_node_pairs=pset([('lba',
                                  'nodea'), ('lba',
                                             'nodeb'), ('lb1', 'nodea')]))
     ])
     self.assert_logs(adds, [
         Log('convergence-remove-rcv3-nodes',
             fields={
                 'lb_id': 'lb1',
                 'servers': ['node1', 'node2', 'nodea'],
                 'cloud_feed': True
             }),
         Log('convergence-remove-rcv3-nodes',
             fields={
                 'lb_id': 'lb2',
                 'servers': ['node2', 'node3'],
                 'cloud_feed': True
             }),
         Log('convergence-remove-rcv3-nodes',
             fields={
                 'lb_id': 'lb3',
                 'servers': ['node4'],
                 'cloud_feed': True
             }),
         Log('convergence-remove-rcv3-nodes',
             fields={
                 'lb_id': 'lba',
                 'servers': ['nodea', 'nodeb'],
                 'cloud_feed': True
             })
     ])
Esempio n. 52
0
def optimize_steps(steps):
    """
    Optimize steps.

    Currently only optimizes per step type. See the :func:`_optimizer`
    decorator for more information on how to register an optimizer.

    :param pbag steps: Collection of steps.
    :return: a pbag of steps.
    """
    def grouping_fn(step):
        step_type = type(step)
        if step_type in _optimizers:
            return step_type
        else:
            return "unoptimizable"

    steps_by_type = groupby(grouping_fn, steps)
    unoptimizable = steps_by_type.pop("unoptimizable", [])
    omg_optimized = concat(_optimizers[step_type](steps)
                           for step_type, steps in steps_by_type.iteritems())
    return pbag(concatv(omg_optimized, unoptimizable))
Esempio n. 53
0
    def _create_some_steps(self, counts={}):
        """
        Creates some steps for testing.

        :param counts: A mapping of supported step classes to the number of
            those steps to create. If unspecified, assumed to be zero.
        :return: A pbag of steps.
        """
        create_servers = [
            CreateServer(server_config=pmap({"sentinel": i}))
            for i in xrange(counts.get(CreateServer, 0))
        ]
        delete_servers = [
            DeleteServer(server_id='abc-' + str(i))
            for i in xrange(counts.get(DeleteServer, 0))
        ]
        remove_from_clbs = [
            RemoveNodesFromCLB(lb_id='1', node_ids=(str(i), ))
            for i in xrange(counts.get(RemoveNodesFromCLB, 0))
        ]

        return pbag(create_servers + delete_servers + remove_from_clbs)
Esempio n. 54
0
 def test_create_servers(self):
     """Logs :obj:`CreateServer`."""
     cfg = {'configgy': 'configged', 'nested': {'a': 'b'}}
     cfg2 = {'configgy': 'configged', 'nested': {'a': 'c'}}
     creates = pbag([
         CreateServer(server_config=freeze(cfg)),
         CreateServer(server_config=freeze(cfg)),
         CreateServer(server_config=freeze(cfg2))
     ])
     self.assert_logs(creates, [
         Log('convergence-create-servers',
             fields={
                 'num_servers': 2,
                 'server_config': cfg,
                 'cloud_feed': True
             }),
         Log('convergence-create-servers',
             fields={
                 'num_servers': 1,
                 'server_config': cfg2,
                 'cloud_feed': True
             })
     ])
Esempio n. 55
0
 def test_rcv3_mixed(self):
     """
     Multiple BulkAddToRCv3 and BulkRemoveFromRCv3 steps are combined
     into one BulkAddToRCv3 step and one BulkRemoveFromRCv3 step
     """
     steps = [
         BulkAddToRCv3(lb_node_pairs=pset([("l1", "s1"), ("l1", "s2")])),
         # Same pair for different class does not conflict
         BulkRemoveFromRCv3(lb_node_pairs=pset([("l1", "s1")])),
         BulkAddToRCv3(lb_node_pairs=pset([("l1", "s3")])),
         BulkRemoveFromRCv3(lb_node_pairs=pset([("l3", "s3"), ("l2",
                                                               "s3")]))
     ]
     self.assertEqual(
         optimize_steps(steps),
         pbag([
             BulkAddToRCv3(lb_node_pairs=pset([("l1",
                                                "s1"), ("l1",
                                                        "s2"), ("l1",
                                                                "s3")])),
             BulkRemoveFromRCv3(
                 lb_node_pairs=pset([("l1", "s1"), ("l3", "s3"), ("l2",
                                                                  "s3")]))
         ]))
Esempio n. 56
0
    def test_set_metadata_item_on_server(self):
        """Logs :obj:`SetMetadataItemOnServer`."""
        sets = pbag([
            SetMetadataItemOnServer(server_id='s1', key='k1', value='v1'),
            SetMetadataItemOnServer(server_id='s2', key='k1', value='v1'),
            SetMetadataItemOnServer(server_id='s3', key='k2', value='v2'),
        ])

        self.assert_logs(sets, [
            Log('convergence-set-server-metadata',
                fields={
                    'servers': ['s1', 's2'],
                    'key': 'k1',
                    'value': 'v1',
                    'cloud_feed': True
                }),
            Log('convergence-set-server-metadata',
                fields={
                    'servers': ['s3'],
                    'key': 'k2',
                    'value': 'v2',
                    'cloud_feed': True
                })
        ])
Esempio n. 57
0
def converge_launch_stack(desired_state, stacks):
    """
    Create steps that indicate how to transition from the state provided
    by the given parameters to the :obj:`DesiredStackGroupState` described by
    ``desired_state``.

    See note [Converging stacks] for more information.

    :param DesiredStackGroupState desired_state: The desired group state.
    :param set stacks: a set of :obj:`HeatStack` instances.
        This must only contain stacks that are being managed for the specified
        group.
    :rtype: :obj:`pbag` of `IStep`

    """
    config = desired_state.stack_config

    by_state = groupby(lambda stack: stack.get_state(), stacks)

    stacks_complete = by_state.get(StackState.CREATE_UPDATE_COMPLETE, [])
    stacks_failed = by_state.get(StackState.CREATE_UPDATE_FAILED, [])
    stacks_check_complete = by_state.get(StackState.CHECK_COMPLETE, [])
    stacks_check_failed = by_state.get(StackState.CHECK_FAILED, [])
    stacks_in_progress = by_state.get(StackState.IN_PROGRESS, [])
    stacks_delete_in_progress = by_state.get(StackState.DELETE_IN_PROGRESS, [])
    stacks_delete_failed = by_state.get(StackState.DELETE_FAILED, [])

    stacks_good = stacks_complete + stacks_check_complete
    stacks_amiss = (stacks_failed +
                    stacks_check_failed +
                    stacks_in_progress +
                    stacks_delete_in_progress)

    if stacks_delete_failed:
        reasons = [ErrorReason.String("Stacks in DELETE_FAILED found.")]
        return pbag([FailConvergence(reasons)])

    # If there are no stacks in CHECK_* or other work to be done, we assume
    # we're at the beginning of a convergence cycle and need to perform stack
    # checks.
    if stacks_complete and not (stacks_check_complete or stacks_amiss):
        return pbag([CheckStack(stack) for stack in stacks_complete])

    # Otherwise, if all stacks are in a good state and we have the right number
    # of stacks, we call update on the stacks in CHECK_COMPLETE and return
    # SUCCESS without waiting for it to finish (calling update on a stack in
    # CREATE_COMPLETE is essentially a no-op) so that there will be no stacks
    # in CREATE_* the next time otter tries to converge this group. This will
    # cause all of the stacks to be checked at that time and let otter know
    # if there are any stacks that have fallen into an error state.
    elif not stacks_amiss and len(stacks_good) == desired_state.capacity:
        return pbag([UpdateStack(stack=stack, stack_config=config, retry=False)
                     for stack in stacks_check_complete])

    def get_create_steps():
        create_stack = CreateStack(stack_config=config)
        good_or_fixable_stack_count = (len(stacks_good) +
                                       len(stacks_in_progress) +
                                       len(stacks_check_failed))
        return [create_stack] * (desired_state.capacity -
                                 good_or_fixable_stack_count)

    def get_scale_down_steps():
        stacks_in_preferred_order = (
            stacks_good + stacks_in_progress + stacks_check_failed)
        unneeded_stacks = stacks_in_preferred_order[desired_state.capacity:]
        return map(DeleteStack, unneeded_stacks)

    def get_fix_steps(scale_down_steps):
        num_stacks_to_update = len(stacks_check_failed) - len(scale_down_steps)
        stacks_to_update = (stacks_check_failed[:num_stacks_to_update]
                            if num_stacks_to_update > 0 else [])
        return [UpdateStack(stack=s, stack_config=config)
                for s in stacks_to_update]

    create_steps = get_create_steps()
    scale_down_steps = get_scale_down_steps()
    fix_steps = get_fix_steps(scale_down_steps)
    delete_stacks_failed_steps = map(DeleteStack, stacks_failed)

    converge_later = (
        [ConvergeLater([ErrorReason.String("Waiting for stacks to finish.")])]
        if stacks_delete_in_progress or stacks_in_progress
        else [])

    return pbag(create_steps +
                fix_steps +
                scale_down_steps +
                delete_stacks_failed_steps +
                converge_later)
Esempio n. 58
0
def converge_launch_server(desired_state, servers_with_cheese,
                           load_balancer_nodes, load_balancers,
                           now, timeout=3600):
    """
    Create steps that indicate how to transition from the state provided
    by the given parameters to the :obj:`DesiredServerGroupState` described by
    ``desired_state``.

    :param DesiredServerGroupState desired_state: The desired group state.
    :param set servers_with_cheese: a list of :obj:`NovaServer` instances.
        This must only contain servers that are being managed for the specified
        group.
    :param load_balancer_nodes: a set of :obj:`ILBNode` providers. This
        must contain all the load balancer mappings for all the load balancers
        (of all types) on the tenant.
    :param dict load_balancers: Collection of load balancer objects accessed
        based on its ID. The object is opaque and is not used by planner
        directly. It is intended to contain extra info for specific LB provider
    :param float now: number of seconds since the POSIX epoch indicating the
        time at which the convergence was requested.
    :param float timeout: Number of seconds after which we will delete a server
        in BUILD.
    :rtype: :obj:`pbag` of `IStep`

    """
    newest_to_oldest = sorted(servers_with_cheese, key=lambda s: -s.created)

    servers = defaultdict(lambda: [], groupby(get_destiny, newest_to_oldest))
    servers_in_active = servers[Destiny.CONSIDER_AVAILABLE]

    building_too_long, waiting_for_build = partition_bool(
        lambda server: now - server.created >= timeout,
        servers[Destiny.WAIT_WITH_TIMEOUT])

    create_server = CreateServer(server_config=desired_state.server_config)

    # delete any servers that have been building for too long
    delete_timeout_steps = [DeleteServer(server_id=server.id)
                            for server in building_too_long]

    # create servers
    create_steps = [create_server] * (
        desired_state.capacity - (
            len(servers_in_active) +
            len(waiting_for_build) +
            len(servers[Destiny.WAIT]) +
            len(servers[Destiny.AVOID_REPLACING])))

    # Scale down over capacity, starting with building, then WAIT, then
    # AVOID_REPLACING, then active, preferring older.  Also, finish
    # draining/deleting servers already in draining state
    servers_in_preferred_order = (
        servers_in_active +
        servers[Destiny.AVOID_REPLACING] +
        servers[Destiny.WAIT] +
        waiting_for_build)
    servers_to_delete = servers_in_preferred_order[desired_state.capacity:]

    def drain_and_delete_a_server(server):
        return _drain_and_delete(
            server,
            desired_state.draining_timeout,
            [node for node in load_balancer_nodes if node.matches(server)],
            now)

    try:
        scale_down_steps = list(
                mapcat(drain_and_delete_a_server,
                       servers_to_delete + servers[Destiny.DRAIN]))
    except DrainingUnavailable as de:
        return pbag([fail_convergence(de)])

    # delete all servers in error - draining does not need to be
    # handled because servers in error presumably are not serving
    # traffic anyway
    delete_error_steps = [DeleteServer(server_id=server.id)
                          for server in servers[Destiny.DELETE]]

    # clean up all the load balancers from deleted and errored servers
    cleanup_errored_and_deleted_steps = [
        remove_node_from_lb(lb_node)
        for server in servers[Destiny.DELETE] + servers[Destiny.CLEANUP]
        for lb_node in load_balancer_nodes if lb_node.matches(server)]

    # converge all the servers that remain to their desired load balancer state
    still_active_servers = filter(lambda s: s not in servers_to_delete,
                                  servers_in_active)
    try:
        lb_converge_steps = [
            step
            for server in still_active_servers
            for step in _converge_lb_state(
                server,
                [node for node in load_balancer_nodes if node.matches(server)],
                load_balancers,
                now,
                # Temporarily using build timeout as node offline timeout.
                # See https://github.com/rackerlabs/otter/issues/1905
                timeout)
            ]
    except DrainingUnavailable as de:
        return pbag([fail_convergence(de)])

    # Converge again if we expect state transitions on any servers
    converge_later = []
    if any((s not in servers_to_delete
            for s in waiting_for_build)):
        converge_later = [
            ConvergeLater(reasons=[ErrorReason.String('waiting for servers')])]

    unavail_fmt = ('Waiting for server {server_id} to transition to ACTIVE '
                   'from {status}')
    reasons = [ErrorReason.UserMessage(unavail_fmt.format(server_id=s.id,
                                                          status=s.state.name))
               for s in servers[Destiny.WAIT] if s not in servers_to_delete]
    if reasons:
        converge_later.append(ConvergeLater(limited=True, reasons=reasons))

    return pbag(create_steps +
                scale_down_steps +
                delete_error_steps +
                cleanup_errored_and_deleted_steps +
                delete_timeout_steps +
                lb_converge_steps +
                converge_later)
Esempio n. 59
0
def test_literalish_works():
    assert b(1, 2) == pbag([1, 2])
Esempio n. 60
0
def test_iterable():
    """
    PBags can be created from iterables even though they can't be len() hinted.
    """

    assert pbag(iter("a")) == pbag(iter("a"))