Example #1
0
    def assert_converge_clb_steps(self, clb_descs, clb_nodes, clb_steps,
                                  draining_timeout, now):
        """
        Run the converge function on the given a server with the given
        :class:`CLBDescription`s  and :class:`CLBNode`s, the given
        draining timeout, and the given time.

        Assert that the LB steps produced are equivalent to the given
        CLB steps.

        Run the converge function again, this time with a default
        :class:`RCv3Description` and a default :class:`RCv3Node` added, and
        assert that the LB steps produced are equivalent to the given
        CLB steps plus a RCv3 node removal, because RCv3 nodes are not
        drainable and are hence unaffected by timeouts.
        """
        without_rcv3_steps = converge(
            DesiredGroupState(server_config={}, capacity=0,
                              draining_timeout=draining_timeout),
            s(server('abc',
                     ServerState.ACTIVE,
                     servicenet_address=self.address,
                     desired_lbs=s(*clb_descs))),
            s(*clb_nodes),
            now=now)

        self.assertEqual(self._filter_only_lb_steps(without_rcv3_steps),
                         b(*clb_steps))

        rcv3_desc = RCv3Description(
            lb_id='e762e42a-8a4e-4ffb-be17-f9dc672729b2')
        rcv3_step = BulkRemoveFromRCv3(
            lb_node_pairs=s(('e762e42a-8a4e-4ffb-be17-f9dc672729b2', 'abc')))

        with_rcv3_steps = converge(
            DesiredGroupState(server_config={}, capacity=0,
                              draining_timeout=draining_timeout),
            s(server('abc',
                     ServerState.ACTIVE,
                     servicenet_address=self.address,
                     desired_lbs=s(rcv3_desc, *clb_descs))),
            s(RCv3Node(node_id='43a39c18-8cad-4bb1-808e-450d950be289',
                       cloud_server_id='abc', description=rcv3_desc),
              *clb_nodes),
            now=now)

        self.assertEqual(self._filter_only_lb_steps(with_rcv3_steps),
                         b(rcv3_step, *clb_steps))
Example #2
0
    def test_change_lb_node(self):
        """
        If a desired CLB mapping is in the set of current configs,
        but the configuration is wrong, `converge_lb_state` returns a
        :class:`ChangeCLBNode` object.  RCv3 nodes cannot be changed - they are
        either right or wrong.
        """
        clb_desc = CLBDescription(lb_id='5', port=80)
        rcv3_desc = RCv3Description(
            lb_id='c6fe49fa-114a-4ea4-9425-0af8b30ff1e7')

        current = [CLBNode(node_id='123', address='1.1.1.1',
                           description=copy_clb_desc(clb_desc, weight=5)),
                   RCv3Node(node_id='234', cloud_server_id='abc',
                            description=rcv3_desc)]
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(clb_desc, rcv3_desc))]),
                set(current),
                0),
            pbag([
                ChangeCLBNode(lb_id='5', node_id='123', weight=1,
                              condition=CLBNodeCondition.ENABLED,
                              type=CLBNodeType.PRIMARY)]))
Example #3
0
    def test_add_to_lb(self):
        """
        If a desired LB config is not in the set of current configs,
        `converge_lb_state` returns the relevant adding-to-load-balancer
        steps (:class:`AddNodesToCLB` in the case of CLB,
        :class:`BulkAddToRCv3` in the case of RCv3).
        """
        clb_desc = CLBDescription(lb_id='5', port=80)
        rcv3_desc = RCv3Description(
            lb_id='c6fe49fa-114a-4ea4-9425-0af8b30ff1e7')

        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(clb_desc, rcv3_desc))]),
                set(),
                0),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1', clb_desc))),
                BulkAddToRCv3(
                    lb_node_pairs=s(
                        ('c6fe49fa-114a-4ea4-9425-0af8b30ff1e7', 'abc')))
            ]))
Example #4
0
 def test_converge_active_servers_ignores_servers_to_be_deleted(self):
     """
     Only servers in active that are not being deleted will have their
     load balancers converged.
     """
     desc = CLBDescription(lb_id='5', port=80)
     desired_lbs = s(desc)
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1,
                               desired_lbs=desired_lbs),
             set([server('abc', ServerState.ACTIVE,
                         servicenet_address='1.1.1.1', created=0,
                         desired_lbs=desired_lbs),
                  server('bcd', ServerState.ACTIVE,
                         servicenet_address='2.2.2.2', created=1,
                         desired_lbs=desired_lbs)]),
             set(),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             AddNodesToCLB(
                 lb_id='5',
                 address_configs=s(('2.2.2.2', desc)))
         ]))
Example #5
0
    def test_same_clb_multiple_ports(self):
        """
        It's possible to have the same cloud load balancer using multiple ports
        on the host.

        (use case: running multiple single-threaded server processes on a
        machine)
        """
        desired = s(CLBDescription(lb_id='5', port=8080),
                    CLBDescription(lb_id='5', port=8081))
        current = []
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=desired)]),
                set(current),
                0),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=8080)))),
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=8081))))
                ]))
Example #6
0
    def test_active_server_is_drained_even_if_all_already_in_draining(self):
        """
        If an active server is attached to load balancers, and all those load
        balancer nodes are already in draining but it cannot be removed yet,
        the server is set to draining state even though no load balancer
        actions need to be performed.

        This can happen for instance if the server was supposed to be deleted
        in a previous convergence run, and the load balancers were set to
        draining but setting the server metadata failed.
        """
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=0,
                                  draining_timeout=10.0),
                set([server('abc', state=ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
                set([CLBNode(node_id='1', address='1.1.1.1',
                             description=copy_clb_desc(
                                 self.clb_desc,
                                 condition=CLBNodeCondition.DRAINING),
                             connections=1, drained_at=0.0)]),
                1),
            pbag([
                SetMetadataItemOnServer(server_id='abc',
                                        key=DRAINING_METADATA[0],
                                        value=DRAINING_METADATA[1]),
                self.clstep
            ]))
Example #7
0
    def test_scale_down_order(self):
        """Preferred order of servers to delete when scaling down:

        - WAIT_WITH_TIMEOUT
        - WAIT
        - AVOID_REPLACING
        - CONSIDER_ACTIVE
        """
        order = (Destiny.WAIT_WITH_TIMEOUT, Destiny.WAIT,
                 Destiny.AVOID_REPLACING, Destiny.CONSIDER_AVAILABLE)
        examples = {Destiny.WAIT_WITH_TIMEOUT: ServerState.BUILD,
                    Destiny.WAIT: ServerState.HARD_REBOOT,
                    Destiny.AVOID_REPLACING: ServerState.RESCUE,
                    Destiny.CONSIDER_AVAILABLE: ServerState.ACTIVE}
        for combo in combinations(order, 2):
            before, after = combo
            also = []
            if after == Destiny.WAIT:
                # If we're waiting for some other servers we need to also
                # expect a ConvergeLater
                also = [ConvergeLater(reasons=[
                    ErrorReason.String(
                        'waiting for temporarily unavailable server to become '
                        'ACTIVE')],
                    limited=True)]

            self.assertEqual(
                converge(
                    DesiredGroupState(server_config={}, capacity=2),
                    set([server('abc', examples[after], created=0),
                         server('def', examples[before], created=1),
                         server('ghi', examples[after], created=2)]),
                    set(),
                    0),
                pbag([DeleteServer(server_id='def')] + also))
Example #8
0
    def test_draining_server_has_all_enabled_lb_set_to_draining(self):
        """
        If a draining server is associated with any load balancers, those
        load balancer nodes will be set to draining and the server is not
        deleted.  The metadata on the server is not re-set to draining.

        This can happen for instance if the server was supposed to be deleted
        in a previous convergence run, and the server metadata was set but
        the load balancers update failed.

        Or if the server is set to be manually deleted via the API.
        """
        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=0,
                                  draining_timeout=10.0),
                set([server('abc', state=ServerState.ACTIVE,
                            metadata=dict([DRAINING_METADATA]),
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
                set([CLBNode(node_id='1', address='1.1.1.1',
                             description=self.clb_desc)]),
                1),
            pbag([
                ChangeCLBNode(lb_id='1', node_id='1', weight=1,
                              condition=CLBNodeCondition.DRAINING,
                              type=CLBNodeType.PRIMARY)
            ]))
Example #9
0
 def test_active_server_is_drained_if_not_all_lbs_can_be_removed(self):
     """
     If an active server to be deleted cannot be removed from all the load
     balancers, it is set to draining state and all the nodes are set to
     draining condition.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0,
                               draining_timeout=10.0),
             set([server('abc', state=ServerState.ACTIVE,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=self.clb_desc),
                  RCv3Node(node_id='2', cloud_server_id='abc',
                           description=self.rcv3_desc)]),
             0),
         pbag([
             ChangeCLBNode(lb_id='1', node_id='1', weight=1,
                           condition=CLBNodeCondition.DRAINING,
                           type=CLBNodeType.PRIMARY),
             SetMetadataItemOnServer(server_id='abc',
                                     key=DRAINING_METADATA[0],
                                     value=DRAINING_METADATA[1]),
             BulkRemoveFromRCv3(lb_node_pairs=s(
                 (self.rcv3_desc.lb_id, 'abc')))
         ]))
Example #10
0
 def test_delete_error_state_servers_with_lb_nodes(self):
     """
     If a server we created enters error state and it is attached to one
     or more load balancers, it will be removed from its load balancers
     as well as get deleted.  (Tests that error state servers are not
     excluded from converging load balancer state.)
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.ERROR,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(CLBDescription(lb_id='5', port=80),
                                       CLBDescription(lb_id='5', port=8080),
                                       RCv3Description(lb_id='6')))]),
             set([CLBNode(address='1.1.1.1', node_id='3',
                          description=CLBDescription(lb_id='5',
                                                     port=80)),
                  CLBNode(address='1.1.1.1', node_id='5',
                          description=CLBDescription(lb_id='5',
                                                     port=8080)),
                  RCv3Node(node_id='123', cloud_server_id='abc',
                           description=RCv3Description(lb_id='6'))]),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('5')),
             BulkRemoveFromRCv3(lb_node_pairs=s(('6', 'abc'))),
             CreateServer(server_config=pmap()),
         ]))
Example #11
0
 def test_draining_server_can_be_deleted_if_all_lbs_can_be_removed(self):
     """
     If draining server can be removed from all the load balancers, the
     server can be deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', state=ServerState.ACTIVE,
                         metadata=dict([DRAINING_METADATA]),
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=copy_clb_desc(
                              self.clb_desc,
                              condition=CLBNodeCondition.DRAINING)),
                  RCv3Node(node_id='2', cloud_server_id='abc',
                           description=self.rcv3_desc)]),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             RemoveNodesFromCLB(lb_id='1', node_ids=s('1')),
             BulkRemoveFromRCv3(lb_node_pairs=s(
                 (self.rcv3_desc.lb_id, 'abc')))
         ]))
Example #12
0
 def test_clean_up_deleted_servers_with_lb_nodes(self):
     """
     If a server has been deleted, we want to remove any dangling LB nodes
     referencing the server.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', ServerState.DELETED,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(CLBDescription(lb_id='5', port=80),
                                       CLBDescription(lb_id='5', port=8080),
                                       RCv3Description(lb_id='6')))]),
             set([CLBNode(address='1.1.1.1', node_id='3',
                          description=CLBDescription(lb_id='5',
                                                     port=80)),
                  CLBNode(address='1.1.1.1', node_id='5',
                          description=CLBDescription(lb_id='5',
                                                     port=8080)),
                  RCv3Node(node_id='123', cloud_server_id='abc',
                           description=RCv3Description(lb_id='6'))]),
             0),
         pbag([
             RemoveNodesFromCLB(lb_id='5', node_ids=s('3')),
             RemoveNodesFromCLB(lb_id='5', node_ids=s('5')),
             BulkRemoveFromRCv3(lb_node_pairs=s(('6', 'abc'))),
         ]))
Example #13
0
 def test_scale_down(self):
     """If we have more servers than desired, we delete the oldest."""
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.ACTIVE, created=0),
                  server('def', ServerState.ACTIVE, created=1)]),
             set(),
             0),
         pbag([DeleteServer(server_id='abc')]))
Example #14
0
 def test_converge_give_me_a_server(self):
     """
     A server is added if there are not enough servers to meet
     the desired capacity.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set(),
             set(),
             0),
         pbag([CreateServer(server_config=pmap())]))
Example #15
0
 def test_count_AVOID_REPLACING_as_meeting_capacity(self):
     """
     If a server's destiny is AVOID_REPLACING, we won't provision more
     servers to take up the slack, and just leave it there without causing
     another convergence iteration, because servers in this status are only
     transitioned to other states manually.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.RESCUE)]),
             set(),
             0),
         pbag([]))
Example #16
0
 def test_ignore_ignored(self):
     """
     If a server we created becomes IGNORED, we leave it be and reprovision
     a server.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.UNKNOWN_TO_OTTER)]),
             set(),
             0),
         pbag([
             CreateServer(server_config=pmap()),
         ]))
Example #17
0
 def test_converge_give_me_multiple_servers(self):
     """
     Multiple servers are added at a time if there are not enough servers to
     meet the desired capacity.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=2),
             set(),
             set(),
             0),
         pbag([
             CreateServer(server_config=pmap()),
             CreateServer(server_config=pmap())]))
Example #18
0
 def test_active_server_without_load_balancers_can_be_deleted(self):
     """
     If an active server to be scaled down is not attached to any load
     balancers, even if it should be, it can be deleted.
     It is not first put into draining state.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0,
                               draining_timeout=10.0),
             set([server('abc', state=ServerState.ACTIVE,
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set(),
             0),
         pbag([DeleteServer(server_id='abc')]))
Example #19
0
 def test_delete_nodes_in_error_state(self):
     """
     If a server we created enters error state, it will be deleted if
     necessary, and replaced.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.ERROR)]),
             set(),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             CreateServer(server_config=pmap()),
         ]))
Example #20
0
 def test_count_building_as_meeting_capacity(self):
     """
     No servers are created if there are building servers that sum with
     active servers to meet capacity.  :class:`ConvergeLater` is returned
     as a step if the building servers are not being deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.BUILD)]),
             set(),
             0),
         pbag([
             ConvergeLater(
                 reasons=[ErrorReason.String('waiting for servers')])]))
Example #21
0
 def test_scale_down_building_first(self):
     """
     When scaling down, first we delete building servers, in preference
     to older server.  :class:`ConvergeLater` does not get returned, even
     though there is a building server, because the building server gets
     deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=2),
             set([server('abc', ServerState.ACTIVE, created=0),
                  server('def', ServerState.BUILD, created=1),
                  server('ghi', ServerState.ACTIVE, created=2)]),
             set(),
             0),
         pbag([DeleteServer(server_id='def')]))
Example #22
0
 def test_clean_up_deleted_servers_with_no_lb_nodes(self):
     """
     If a server has been deleted, but it is not attached to any load
     balancers, we do nothing.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', ServerState.DELETED,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(CLBDescription(lb_id='5', port=80),
                                       CLBDescription(lb_id='5', port=8080),
                                       RCv3Description(lb_id='6')))]),
             set(),
             0),
         pbag([]))
Example #23
0
 def test_timeout_replace_only_when_necessary(self):
     """
     If a server is timing out *and* we're over capacity, it will be
     deleted without replacement.  :class:`ConvergeLater` does not get
     returned, even though there is a building server, because the building
     server gets deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=2),
             set([server('slowpoke', ServerState.BUILD, created=0),
                  server('old-ok', ServerState.ACTIVE, created=0),
                  server('new-ok', ServerState.ACTIVE, created=3600)]),
             set(),
             3600),
         pbag([DeleteServer(server_id='slowpoke')]))
Example #24
0
 def test_draing_server_without_load_balancers_can_be_deleted(self):
     """
     If a draining server is not attached to any load balancers, even if
     it should be, it can be deleted.  "Draining" is not re-set on its
     metadata.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0,
                               draining_timeout=10.0),
             set([server('abc', state=ServerState.ACTIVE,
                         metadata=dict([DRAINING_METADATA]),
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set(),
             0),
         pbag([DeleteServer(server_id='abc')]))
Example #25
0
 def test_timeout_building(self):
     """
     Servers that have been building for too long will be deleted and
     replaced. :class:`ConvergeLater` does not get returned, even
     though there is a building server, because the building server gets
     deleted.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=2),
             set([server('slowpoke', ServerState.BUILD, created=0),
                  server('ok', ServerState.ACTIVE, created=0)]),
             set(),
             3600),
         pbag([
             DeleteServer(server_id='slowpoke'),
             CreateServer(server_config=pmap())]))
Example #26
0
 def test_count_waiting_as_meeting_capacity(self):
     """
     If a server's destiny is WAIT, we won't provision more servers to take
     up the slack, but rather just wait for it to come back.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=1),
             set([server('abc', ServerState.HARD_REBOOT)]),
             set(),
             0),
         pbag([
             ConvergeLater(
                 reasons=[ErrorReason.String(
                     'waiting for temporarily unavailable server to become '
                     'ACTIVE')],
                 limited=True)]))
Example #27
0
 def test_draining_server_ignored_if_waiting_for_timeout(self):
     """
     If the server already in draining state is waiting for the draining
     timeout on some load balancers, and no further load balancers can be
     removed, nothing is done to it and ConvergeLater is returned
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0,
                               draining_timeout=10.0),
             set([server('abc', state=ServerState.ACTIVE,
                         metadata=dict([DRAINING_METADATA]),
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=CLBDescription(
                              lb_id='1', port=80,
                              condition=CLBNodeCondition.DRAINING),
                          drained_at=1.0, connections=1)]),
             2),
         pbag([self.clstep]))
Example #28
0
    def test_all_changes(self):
        """
        Remove, change, and add a node to a load balancer all together
        """
        descs = [CLBDescription(lb_id='5', port=80),
                 CLBDescription(lb_id='6', port=80, weight=2),
                 RCv3Description(lb_id='c6fe49fa-114a-4ea4-9425-0af8b30ff1e7')]

        current = [
            CLBNode(node_id='123', address='1.1.1.1',
                    description=CLBDescription(lb_id='5', port=8080)),
            CLBNode(node_id='234', address='1.1.1.1',
                    description=copy_clb_desc(descs[1], weight=1)),
            RCv3Node(node_id='345', cloud_server_id='abc',
                     description=RCv3Description(
                         lb_id='e762e42a-8a4e-4ffb-be17-f9dc672729b2'))
        ]

        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=1),
                set([server('abc', ServerState.ACTIVE,
                            servicenet_address='1.1.1.1',
                            desired_lbs=pset(descs))]),
                set(current),
                0),
            pbag([
                AddNodesToCLB(
                    lb_id='5',
                    address_configs=s(('1.1.1.1',
                                       CLBDescription(lb_id='5', port=80)))),
                ChangeCLBNode(lb_id='6', node_id='234', weight=2,
                              condition=CLBNodeCondition.ENABLED,
                              type=CLBNodeType.PRIMARY),
                RemoveNodesFromCLB(lb_id='5', node_ids=s('123')),
                BulkAddToRCv3(lb_node_pairs=s(
                    ('c6fe49fa-114a-4ea4-9425-0af8b30ff1e7', 'abc'))),
                BulkRemoveFromRCv3(lb_node_pairs=s(
                    ('e762e42a-8a4e-4ffb-be17-f9dc672729b2', 'abc')))
            ]))
Example #29
0
    def test_draining_server_waiting_for_timeout_some_lbs_removed(self):
        """
        Load balancers that can be removed are removed, even if the server is
        already in draining state is waiting for the draining timeout on some
        load balancers.
        """
        other_clb_desc = CLBDescription(lb_id='9', port=80)

        self.assertEqual(
            converge(
                DesiredGroupState(server_config={}, capacity=0,
                                  draining_timeout=2.0),
                set([server('abc', state=ServerState.ACTIVE,
                            metadata=dict([DRAINING_METADATA]),
                            servicenet_address='1.1.1.1',
                            desired_lbs=s(self.clb_desc, self.rcv3_desc,
                                          other_clb_desc))]),
                set([
                    # This node is in draining - nothing will be done to it
                    CLBNode(node_id='1', address='1.1.1.1',
                            description=copy_clb_desc(
                                self.clb_desc,
                                condition=CLBNodeCondition.DRAINING),
                            drained_at=1.0, connections=1),
                    # This node is done draining, it can be removed
                    CLBNode(node_id='2', address='1.1.1.1',
                            description=copy_clb_desc(
                                other_clb_desc,
                                condition=CLBNodeCondition.DRAINING),
                            drained_at=0.0),
                    # This node is not drainable, it can be removed
                    RCv3Node(node_id='3', cloud_server_id='abc',
                             description=self.rcv3_desc)]),
                2),
            pbag([
                RemoveNodesFromCLB(lb_id='9', node_ids=s('2')),
                BulkRemoveFromRCv3(lb_node_pairs=s(
                    (self.rcv3_desc.lb_id, 'abc'))),
                self.clstep
            ]))
Example #30
0
 def test_active_server_can_be_deleted_if_all_lbs_can_be_removed(self):
     """
     If an active server to be scaled down can be removed from all the load
     balancers, the server can be deleted.  It is not first put into
     draining state.
     """
     self.assertEqual(
         converge(
             DesiredGroupState(server_config={}, capacity=0),
             set([server('abc', state=ServerState.ACTIVE,
                         servicenet_address='1.1.1.1',
                         desired_lbs=s(self.clb_desc, self.rcv3_desc))]),
             set([CLBNode(node_id='1', address='1.1.1.1',
                          description=self.clb_desc),
                  RCv3Node(node_id='2', cloud_server_id='abc',
                           description=self.rcv3_desc)]),
             0),
         pbag([
             DeleteServer(server_id='abc'),
             RemoveNodesFromCLB(lb_id='1', node_ids=s('1')),
             BulkRemoveFromRCv3(lb_node_pairs=s(
                 (self.rcv3_desc.lb_id, 'abc')))
         ]))