Example #1
0
 def test_pause_and_create_policy(self):
     """
     Policy can be created on a paused group
     """
     group, _ = self.helper.create_group()
     yield group.start(self.rcs, self)
     yield group.pause(self.rcs)
     policy = ScalingPolicy(set_to=1, scaling_group=group)
     yield policy.start(self.rcs, self)
     returnValue((group, policy))
Example #2
0
 def test_pause_and_execute_policy(self):
     """
     Executing any policy of a paused group will result in 403
     """
     group, _ = self.helper.create_group()
     yield group.start(self.rcs, self)
     policy = ScalingPolicy(set_to=1, scaling_group=group)
     yield policy.start(self.rcs, self)
     yield group.pause(self.rcs)
     yield policy.execute(self.rcs, [403])
Example #3
0
    def start_group_and_wait(self, group, rcs, desired=None):
        """
        Start a group, and if desired is supplied, creates and executes a
        policy that scales to that number.  This would be used for example
        if we wanted to scale to the max of a group, but did not want the min
        to be equal to the max.

        This also waits for the desired number of servers to be reached - that
        would be desired if provided, or the min if not provided.

        :param TestResources rcs: An instance of
            :class:`otter.integration.lib.resources.TestResources`
        :param ScalingGroup group: An instance of
            :class:`otter.integration.lib.autoscale.ScalingGroup` to start -
            this group should not have been started already.
        :param int desired: A desired number to scale to.
        """
        yield group.start(rcs, self.test_case)
        if desired is not None:
            p = ScalingPolicy(set_to=desired, scaling_group=group)
            yield p.start(rcs, self.test_case)
            yield p.execute(rcs)

        if desired is None:
            desired = group.group_config['groupConfiguration'].get(
                'minEntities', 0)

        yield group.wait_for_state(rcs,
                                   MatchesAll(
                                       HasActive(desired),
                                       ContainsDict({
                                           'pendingCapacity':
                                           Equals(0),
                                           'desiredCapacity':
                                           Equals(desired)
                                       })),
                                   timeout=600)

        if self.clbs:
            ips = yield group.get_servicenet_ips(rcs)
            yield gatherResults([
                clb.wait_for_nodes(rcs,
                                   ContainsAllIPs(ips.values()),
                                   timeout=600) for clb in self.clbs
            ])

        returnValue(rcs)
Example #4
0
    def test_draining(self):
        """
        When draining timeout is provided in launch config, the server
        is put in draining for that much time before removing it from CLB
        """
        # Create group with CLB and draining timeout
        group, _ = self.helper.create_group(max_entities=5,
                                            draining_timeout=30)
        yield group.start(self.rcs, self)

        # Execute policy to scale up and extract server IP
        policy = ScalingPolicy(scale_by=1, scaling_group=group)
        yield policy.start(self.rcs, self)
        yield policy.execute(self.rcs)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict({
                "activeCapacity": Equals(1),
                "pendingCapacity": Equals(0),
                "desiredCapacity": Equals(1),
                "status": Equals("ACTIVE")
            }))
        ip = (yield group.get_servicenet_ips(self.rcs)).values()[0]

        # Scale down
        policy = ScalingPolicy(scale_by=-1, scaling_group=group)
        yield policy.start(self.rcs, self)
        yield policy.execute(self.rcs)

        # Corresponding CLB node should be draining
        clb = self.helper.clbs[0]
        yield clb.wait_for_nodes(
            self.rcs,
            MatchesListwise([
                ContainsDict({
                    "address": Equals(ip),
                    "condition": Equals("DRAINING")
                })
            ]),
            15,  # timeout
            2)  # interval

        # After 30s the node should be removed
        # Extra 5s due to feed latency
        # extra 2 convergence intervals assuming 35s pass at end of one cycle.
        # Next cycle would remove node and wait for another for safety
        yield clb.wait_for_nodes(self.rcs, HasLength(0),
                                 30 + 5 + convergence_interval * 2)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict({
                "activeCapacity": Equals(0),
                "pendingCapacity": Equals(0),
                "desiredCapacity": Equals(0),
                "status": Equals("ACTIVE")
            }))
Example #5
0
 def test_pause_and_scheduled_policy(self):
     """
     A scheduled policy is not executed on a paused group
     """
     group, _ = self.helper.create_group()
     yield group.start(self.rcs, self)
     policy = ScalingPolicy(
         set_to=1, scaling_group=group,
         schedule={"at": get_utcstr_from_now(5)})
     yield policy.start(self.rcs, self)
     yield group.pause(self.rcs)
     yield sleep(reactor, 5 + scheduler_interval + 2)
     yield self.helper.assert_group_state(
         group,
         ContainsDict({
             "pendingCapacity": Equals(0),
             "activeCapacity": Equals(0),
             "desiredCapacity": Equals(0)}))
Example #6
0
    def test_resume(self):
        """
        Calling resume on a paused group changes {"paused": False} in group
        state and triggers convergence. The group can then execute policy,
        webhook or trigger convergence
        """
        group = yield self.test_pause_stops_convergence()
        yield group.resume(self.rcs)
        yield self.helper.assert_group_state(
            group, ContainsDict({"paused": Equals(False)}))
        yield group.wait_for_state(
            self.rcs,
            ContainsDict({"activeCapacity": Equals(1),
                          "pendingCapacity": Equals(0),
                          "desiredCapacity": Equals(1),
                          "status": Equals("ACTIVE")}))

        # can create and execute policy
        policy = ScalingPolicy(scale_by=1, scaling_group=group)
        yield policy.start(self.rcs, self)
        yield policy.execute(self.rcs)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict({"activeCapacity": Equals(2),
                          "pendingCapacity": Equals(0),
                          "desiredCapacity": Equals(2),
                          "status": Equals("ACTIVE")}))

        # can create and execute webhooks
        webhook = yield policy.create_webhook(self.rcs)
        resp = yield treq.post(webhook.capurl, pool=self.helper.pool)
        self.assertEqual(resp.code, 202)
        yield treq.content(resp)
        yield sleep(reactor, 2)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict({"activeCapacity": Equals(3),
                          "pendingCapacity": Equals(0),
                          "desiredCapacity": Equals(3),
                          "status": Equals("ACTIVE")}))

        # can trigger convergence
        yield group.trigger_convergence(self.rcs)
Example #7
0
    def start_group_and_wait(self, group, rcs, desired=None):
        """
        Start a group, and if desired is supplied, creates and executes a
        policy that scales to that number.  This would be used for example
        if we wanted to scale to the max of a group, but did not want the min
        to be equal to the max.

        This also waits for the desired number of servers to be reached - that
        would be desired if provided, or the min if not provided.

        :param TestResources rcs: An instance of
            :class:`otter.integration.lib.resources.TestResources`
        :param ScalingGroup group: An instance of
            :class:`otter.integration.lib.autoscale.ScalingGroup` to start -
            this group should not have been started already.
        :param int desired: A desired number to scale to.
        """
        yield group.start(rcs, self.test_case)
        if desired is not None:
            p = ScalingPolicy(set_to=desired, scaling_group=group)
            yield p.start(rcs, self.test_case)
            yield p.execute(rcs)

        if desired is None:
            desired = group.group_config['groupConfiguration'].get(
                'minEntities', 0)

        yield group.wait_for_state(
            rcs,
            MatchesAll(HasActive(desired),
                       ContainsDict({'pendingCapacity': Equals(0),
                                     'desiredCapacity': Equals(desired)})),
            timeout=600)

        if self.clbs:
            ips = yield group.get_servicenet_ips(rcs)
            yield gatherResults([
                clb.wait_for_nodes(
                    rcs, ContainsAllIPs(ips.values()), timeout=600)
                for clb in self.clbs])

        returnValue(rcs)
Example #8
0
    def test_draining(self):
        """
        When draining timeout is provided in launch config, the server
        is put in draining for that much time before removing it from CLB
        """
        # Create group with CLB and draining timeout
        group, _ = self.helper.create_group(max_entities=5, draining_timeout=30)
        yield group.start(self.rcs, self)

        # Execute policy to scale up and extract server IP
        policy = ScalingPolicy(scale_by=1, scaling_group=group)
        yield policy.start(self.rcs, self)
        yield policy.execute(self.rcs)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict(
                {
                    "activeCapacity": Equals(1),
                    "pendingCapacity": Equals(0),
                    "desiredCapacity": Equals(1),
                    "status": Equals("ACTIVE"),
                }
            ),
        )
        ip = (yield group.get_servicenet_ips(self.rcs)).values()[0]

        # Scale down
        policy = ScalingPolicy(scale_by=-1, scaling_group=group)
        yield policy.start(self.rcs, self)
        yield policy.execute(self.rcs)

        # Corresponding CLB node should be draining
        clb = self.helper.clbs[0]
        yield clb.wait_for_nodes(
            self.rcs,
            MatchesListwise([ContainsDict({"address": Equals(ip), "condition": Equals("DRAINING")})]),
            15,  # timeout
            2,
        )  # interval

        # After 30s the node should be removed
        # Extra 5s due to feed latency
        # extra 2 convergence intervals assuming 35s pass at end of one cycle.
        # Next cycle would remove node and wait for another for safety
        yield clb.wait_for_nodes(self.rcs, HasLength(0), 30 + 5 + convergence_interval * 2)
        yield group.wait_for_state(
            self.rcs,
            ContainsDict(
                {
                    "activeCapacity": Equals(0),
                    "pendingCapacity": Equals(0),
                    "desiredCapacity": Equals(0),
                    "status": Equals("ACTIVE"),
                }
            ),
        )
Example #9
0
    def test_create(self):
        """
        For a launch_stack config, stacks are created, checked, updated, and
        deleted through Heat.
        """
        p = ScalingPolicy(set_to=5, scaling_group=self.group)
        scale_up = ScalingPolicy(set_to=7, scaling_group=self.group)
        scale_down = ScalingPolicy(set_to=1, scaling_group=self.group)

        yield self.group.start(self.rcs, self)

        yield p.start(self.rcs, self)
        yield p.execute(self.rcs)
        yield self.wait_for_stack_list([u'UPDATE_COMPLETE'] * 5)

        yield scale_up.start(self.rcs, self)
        yield scale_up.execute(self.rcs)
        yield self.wait_for_stack_list(
            [u'UPDATE_COMPLETE'] * 5 + [u'CREATE_COMPLETE'] * 2)

        yield scale_down.start(self.rcs, self)
        yield scale_down.execute(self.rcs)
        yield self.wait_for_stack_list([u'UPDATE_COMPLETE'])
Example #10
0
    def test_create(self):
        """
        For a launch_stack config, stacks are created, checked, updated, and
        deleted through Heat.
        """
        p = ScalingPolicy(set_to=5, scaling_group=self.group)
        scale_up = ScalingPolicy(set_to=7, scaling_group=self.group)
        scale_down = ScalingPolicy(set_to=1, scaling_group=self.group)

        yield self.group.start(self.rcs, self)

        yield p.start(self.rcs, self)
        yield p.execute(self.rcs)
        yield self.wait_for_stack_list([u'UPDATE_COMPLETE'] * 5)

        yield scale_up.start(self.rcs, self)
        yield scale_up.execute(self.rcs)
        yield self.wait_for_stack_list([u'UPDATE_COMPLETE'] * 5 +
                                       [u'CREATE_COMPLETE'] * 2)

        yield scale_down.start(self.rcs, self)
        yield scale_down.execute(self.rcs)
        yield self.wait_for_stack_list([u'UPDATE_COMPLETE'])