def add_server_to_lb(server, description, load_balancer): """ Add a server to a load balancing entity as described by `description`. :ivar server: The server to be added :type server: :class:`NovaServer` :ivar description: The description of the load balancer and how to add the server to it. :type description: :class:`ILBDescription` provider """ if isinstance(description, CLBDescription): if server.servicenet_address: if load_balancer is None: return fail_convergence( CLBHealthInfoNotFound(description.lb_id)) if load_balancer.health_monitor: description = assoc_obj(description, condition=CLBNodeCondition.DRAINING) return AddNodesToCLB( lb_id=description.lb_id, address_configs=pset( [(server.servicenet_address, description)])) elif isinstance(description, RCv3Description): return BulkAddToRCv3(lb_node_pairs=pset( [(description.lb_id, server.id)]))
def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: return assoc_obj(node, drained_at=extract_CLB_drained_at(feed)) else: return node
def convergence_remove_server_from_group(log, transaction_id, server_id, replace, purge, group, state): """ Remove a specific server from the group, optionally decrementing the desired capacity. The server may just be scheduled for deletion, or it may be evicted from the group by removing otter-specific metdata from the server. :param log: A bound logger :param bytes trans_id: The transaction id for this operation. :param bytes server_id: The id of the server to be removed. :param bool replace: Should the server be replaced? :param bool purge: Should the server be deleted from Nova? :param group: The scaling group to remove a server from. :type group: :class:`~otter.models.interface.IScalingGroup` :param state: The current state of the group. :type state: :class:`~otter.models.interface.GroupState` :return: The updated state. :rtype: Effect of :class:`~otter.models.interface.GroupState` :raise: :class:`CannotDeleteServerBelowMinError` if the server cannot be deleted without replacement, and :class:`ServerNotFoundError` if there is no such server to be deleted. """ effects = [_is_server_in_group(group, server_id)] if not replace: effects.append(_can_scale_down(group, server_id)) # the (possibly) two checks can happen in parallel, but we want # ServerNotFoundError to take precedence over # CannotDeleteServerBelowMinError both_checks = yield parallel_all_errors(effects) for is_error, result in both_checks: if is_error: reraise(*result) # Remove the server if purge: eff = set_nova_metadata_item(server_id, *DRAINING_METADATA) else: eff = Effect( EvictServerFromScalingGroup(log=log, transaction_id=transaction_id, scaling_group=group, server_id=server_id)) yield Effect( TenantScope( retry_effect(eff, retry_times(3), exponential_backoff_interval(2)), group.tenant_id)) if not replace: yield do_return(assoc_obj(state, desired=state.desired - 1)) else: yield do_return(state)
def convergence_remove_server_from_group( log, transaction_id, server_id, replace, purge, group, state): """ Remove a specific server from the group, optionally decrementing the desired capacity. The server may just be scheduled for deletion, or it may be evicted from the group by removing otter-specific metdata from the server. :param log: A bound logger :param bytes trans_id: The transaction id for this operation. :param bytes server_id: The id of the server to be removed. :param bool replace: Should the server be replaced? :param bool purge: Should the server be deleted from Nova? :param group: The scaling group to remove a server from. :type group: :class:`~otter.models.interface.IScalingGroup` :param state: The current state of the group. :type state: :class:`~otter.models.interface.GroupState` :return: The updated state. :rtype: Effect of :class:`~otter.models.interface.GroupState` :raise: :class:`CannotDeleteServerBelowMinError` if the server cannot be deleted without replacement, and :class:`ServerNotFoundError` if there is no such server to be deleted. """ effects = [_is_server_in_group(group, server_id)] if not replace: effects.append(_can_scale_down(group, server_id)) # the (possibly) two checks can happen in parallel, but we want # ServerNotFoundError to take precedence over # CannotDeleteServerBelowMinError both_checks = yield parallel_all_errors(effects) for is_error, result in both_checks: if is_error: reraise(*result) # Remove the server if purge: eff = set_nova_metadata_item(server_id, *DRAINING_METADATA) else: eff = Effect( EvictServerFromScalingGroup(log=log, transaction_id=transaction_id, scaling_group=group, server_id=server_id)) yield Effect(TenantScope( retry_effect(eff, retry_times(3), exponential_backoff_interval(2)), group.tenant_id)) if not replace: yield do_return(assoc_obj(state, desired=state.desired - 1)) else: yield do_return(state)
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node('11', 'a11', condition='DRAINING') node12 = node('12', 'a12') node21 = node('21', 'a21', weight=3) node22 = node('22', 'a22', weight=None, condition='DRAINING') seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{'id': 1}, {'id': 2}]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])]]), parallel_sequence([[node_feed_req(1, '11', '11feed')], [node_feed_req(2, '22', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), [assoc_obj(CLBNode.from_node_json(1, node11), drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), assoc_obj(CLBNode.from_node_json(2, node22), drained_at=2.0)])
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node("11", "a11", condition="DRAINING") node12 = node("12", "a12") node21 = node("21", "a21", weight=3) node22 = node("22", "a22", weight=None, condition="DRAINING") seq = [ lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])]]), parallel_sequence([[node_feed_req(1, "11", "11feed")], [node_feed_req(2, "22", "22feed")]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), [ assoc_obj(CLBNode.from_node_json(1, node11), drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), assoc_obj(CLBNode.from_node_json(2, node22), drained_at=2.0), ], )
def test_assoc(self): """ Creates a new object that's a copy of the old one, with the specified attributes rebound. Existing attributes are identical. """ class Foo(object): def __init__(self): self.l = [1, 2] self.name = "foo" o = Foo() new = assoc_obj(o, name="bar") self.assertEqual(o.name, "foo") self.assertEqual(new.name, "bar") self.assertIs(o.l, new.l)
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node("21", "a21", condition="DRAINING", weight=None) seq = [ lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}), parallel_sequence( [[nodes_req(1, [node("11", "a11", condition="DRAINING"), node("12", "a12")])], [nodes_req(2, [node21])]] ), parallel_sequence( [[node_feed_req(1, "11", CLBNotFoundError(lb_id=u"1"))], [node_feed_req(2, "21", "22feed")]] ), ] eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), [assoc_obj(CLBNode.from_node_json(2, node21), drained_at=2.0)])
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node('21', 'a21', condition='DRAINING', weight=None) seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{'id': 1}, {'id': 2}]}), parallel_sequence([ [nodes_req(1, [node('11', 'a11', condition='DRAINING'), node('12', 'a12')])], [nodes_req(2, [node21])] ]), parallel_sequence([ [node_feed_req(1, '11', CLBNotFoundError(lb_id=u'1'))], [node_feed_req(2, '21', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), [assoc_obj(CLBNode.from_node_json(2, node21), drained_at=2.0)])
def update_paused(_group, state): return assoc_obj(state, paused=intent.paused)