def test_contains_all_ips_success(self):
     """
     :class:`ContainsAllIPs` succeeds when the nodes contain all the IPs
     given.
     """
     matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])
     mismatch = matcher.match([
         {'id': i, 'address': '10.0.0.{0}'.format(i)}
         for i in (1, 2)
     ])
     self.assertEqual(None, mismatch)
Beispiel #2
0
 def test_contains_all_ips_success(self):
     """
     :class:`ContainsAllIPs` succeeds when the nodes contain all the IPs
     given.
     """
     matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])
     mismatch = matcher.match([{
         'id': i,
         'address': '10.0.0.{0}'.format(i)
     } for i in (1, 2)])
     self.assertEqual(None, mismatch)
 def test_contains_all_ips_failure(self):
     """
     :class:`ContainsAllIPs` fail when the nodes contain only some or
     none of the all the IPs given.
     """
     matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])
     self.assertNotEqual(
         None,
         matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)}
                        for i in (1, 3)]),
         "Partial match succeeds when all should be required."
     )
     self.assertNotEqual(None, matcher.match([]), "No matches succed.")
Beispiel #4
0
 def test_contains_all_ips_failure(self):
     """
     :class:`ContainsAllIPs` fail when the nodes contain only some or
     none of the all the IPs given.
     """
     matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])
     self.assertNotEqual(
         None,
         matcher.match([{
             'id': i,
             'address': '10.0.0.{0}'.format(i)
         } for i in (1, 3)]),
         "Partial match succeeds when all should be required.")
     self.assertNotEqual(None, matcher.match([]), "No matches succed.")
Beispiel #5
0
    def test_oob_deleted_clb_node(self):
        """
        If an autoscaled server is removed from the CLB out of band its
        supposed to be on, Otter will put it back.

        1. Create a scaling group with 1 CLB and 1 server
        2. Wait for server to be active
        3. Delete server from the CLB
        4. Converge
        5. Assert that the server is put back on the CLB.
        """
        clb = self.helper.clbs[0]
        yield self.confirm_clb_nodecounts([(clb, 0)])

        group, _ = self.helper.create_group(min_entities=1)
        yield self.helper.start_group_and_wait(group, self.rcs)

        clbs_nodes = yield self.confirm_clb_nodecounts([(clb, 1)])
        the_node = clbs_nodes[0][0]

        yield clb.delete_nodes(self.rcs, [the_node['id']])

        yield clb.wait_for_nodes(self.rcs,
                                 HasLength(0),
                                 timeout=timeout_default)
        yield group.trigger_convergence(self.rcs)

        yield clb.wait_for_nodes(self.rcs,
                                 MatchesAll(
                                     HasLength(1),
                                     ContainsAllIPs([the_node["address"]])),
                                 timeout=timeout_default)
Beispiel #6
0
    def test_move_node_to_oob_lb(self):
        """
        1 group, LB1 in config, LB2 not in any autoscale configs:

        Server node moved from LB1 to LB2
        Assert: Server put back on LB1
        Assert: Server remains in LB2
        """
        clb_as = self.helper.clbs[0]
        clb_other = yield self.create_another_clb()

        yield self.confirm_clb_nodecounts([(clb_as, 0), (clb_other, 0)])

        group, _ = self.helper.create_group(min_entities=1)
        yield self.helper.start_group_and_wait(group, self.rcs)

        nodes = yield self.confirm_clb_nodecounts([(clb_as, 1),
                                                   (clb_other, 0)])
        nodes_as = nodes[0]

        the_node = nodes_as[0]
        node_info = {
            "address": the_node["address"],
            "port": the_node["port"],
            "condition": the_node["condition"],
            "weight": 2
        }

        yield clb_as.delete_nodes(self.rcs, [the_node['id']])
        yield clb_other.add_nodes(self.rcs, [node_info])
        yield self.confirm_clb_nodecounts([(clb_as, 0), (clb_other, 1)])

        yield group.trigger_convergence(self.rcs)

        yield gatherResults([
            clb_as.wait_for_nodes(self.rcs,
                                  MatchesAll(
                                      HasLength(1),
                                      ContainsAllIPs([the_node["address"]])),
                                  timeout=timeout_default),
            clb_other.wait_for_nodes(
                self.rcs,
                MatchesAll(HasLength(1),
                           ContainsAllIPs([the_node["address"]])),
                timeout=timeout_default)
        ])
Beispiel #7
0
    def test_changing_disowned_server_is_not_converged_2(self):
        """
        Copying a disowned autoscale server to a different CLB and converging
        will not move the disowned server back on its intended CLB.

        1. Create an AS group with CLB and 2 servers.
        2. Disown 1 server.
        3. Place both servers on a different CLB
        4. Converge group.
        6. Assert that the everything remains the same since group's server is
           in its CLB and otter does not bother about other CLB

        This is slightly different than
        :func:`test_changing_disowned_server_is_not_converged_1` in that it
        does not remove the servers from their original CLB.  This tests
        that autoscale will not remove disowned servers from the original
        autoscale CLB.
        """
        group, clb_as, clb_other, gone_ip, stay_ip = (
            yield self._disown_change_and_converge(False))

        yield gatherResults([
            clb_as.wait_for_nodes(self.rcs,
                                  MatchesAll(
                                      ContainsAllIPs([gone_ip, stay_ip]),
                                      HasLength(2)),
                                  timeout=timeout_default),
            clb_other.wait_for_nodes(self.rcs,
                                     MatchesAll(
                                         ContainsAllIPs([gone_ip, stay_ip]),
                                         HasLength(2)),
                                     timeout=timeout_default),
            group.wait_for_state(self.rcs,
                                 MatchesAll(
                                     ContainsDict({
                                         'pendingCapacity': Equals(0),
                                         'desiredCapacity': Equals(1),
                                         'status': Equals('ACTIVE'),
                                         'active': HasLength(1)
                                     })),
                                 timeout=timeout_default)
        ])
Beispiel #8
0
    def test_changing_disowned_server_is_not_converged_1(self):
        """
        Moving a disowned autoscale server to a different CLB and converging
        will not move the disowned server back on its intended CLB.

        1. Create an AS group with CLB and 2 servers.
        2. Disown 1 server.
        3. Remove both servers from group CLB and add to a different CLB.
        4. Converge group.
        5. Assert that the group's server is back on its CLB and it is not
           removed from other CLB. Disowned server remains on the other CLB.
        """
        group, clb_as, clb_other, gone_ip, stay_ip = (
            yield self._disown_change_and_converge(True))

        yield gatherResults([
            clb_as.wait_for_nodes(self.rcs,
                                  MatchesAll(ExcludesAllIPs([gone_ip]),
                                             ContainsAllIPs([stay_ip]),
                                             HasLength(1)),
                                  timeout=timeout_default),
            clb_other.wait_for_nodes(self.rcs,
                                     MatchesAll(
                                         ContainsAllIPs([stay_ip, gone_ip]),
                                         HasLength(2)),
                                     timeout=timeout_default),
            group.wait_for_state(self.rcs,
                                 MatchesAll(
                                     ContainsDict({
                                         'pendingCapacity': Equals(0),
                                         'desiredCapacity': Equals(1),
                                         'status': Equals('ACTIVE'),
                                         'active': HasLength(1)
                                     })),
                                 timeout=timeout_default),
        ])
Beispiel #9
0
    def start_group_and_wait(self, group, rcs, desired=None):
        """
        Start a group, and if desired is supplied, creates and executes a
        policy that scales to that number.  This would be used for example
        if we wanted to scale to the max of a group, but did not want the min
        to be equal to the max.

        This also waits for the desired number of servers to be reached - that
        would be desired if provided, or the min if not provided.

        :param TestResources rcs: An instance of
            :class:`otter.integration.lib.resources.TestResources`
        :param ScalingGroup group: An instance of
            :class:`otter.integration.lib.autoscale.ScalingGroup` to start -
            this group should not have been started already.
        :param int desired: A desired number to scale to.
        """
        yield group.start(rcs, self.test_case)
        if desired is not None:
            p = ScalingPolicy(set_to=desired, scaling_group=group)
            yield p.start(rcs, self.test_case)
            yield p.execute(rcs)

        if desired is None:
            desired = group.group_config['groupConfiguration'].get(
                'minEntities', 0)

        yield group.wait_for_state(rcs,
                                   MatchesAll(
                                       HasActive(desired),
                                       ContainsDict({
                                           'pendingCapacity':
                                           Equals(0),
                                           'desiredCapacity':
                                           Equals(desired)
                                       })),
                                   timeout=600)

        if self.clbs:
            ips = yield group.get_servicenet_ips(rcs)
            yield gatherResults([
                clb.wait_for_nodes(rcs,
                                   ContainsAllIPs(ips.values()),
                                   timeout=600) for clb in self.clbs
            ])

        returnValue(rcs)