def test_no_draining(self): """ Doesnt fetch feeds if all nodes are ENABLED """ seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node('11', 'a11')])], [nodes_req(2, [node('21', 'a21')])], [lb_hm_req(1, {})], [lb_hm_req(2, {})]]), parallel_sequence([]) # No nodes to fetch ] make_desc = partial(CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY) eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ CLBNode( node_id='11', address='a11', description=make_desc(lb_id='1')), CLBNode( node_id='21', address='a21', description=make_desc(lb_id='2')) ], { '1': CLB(False), '2': CLB(False) }))
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node('11', 'a11', condition='DRAINING') node12 = node('12', 'a12') node21 = node('21', 'a21', weight=3) node22 = node('22', 'a22', weight=None, condition='DRAINING') seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {})]]), parallel_sequence([[node_feed_req('1', '11', '11feed')], [node_feed_req('2', '22', '22feed')]]), ] eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ attr.assoc(CLBNode.from_node_json(1, node11), _drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), attr.assoc(CLBNode.from_node_json(2, node22), _drained_at=2.0) ], { '1': CLB(True), '2': CLB(False) }))
def test_success(self): """ The data is returned as a tuple of ([NovaServer], [CLBNode/RCv3Node]). """ clb_nodes = [ CLBNode(node_id='node1', address='ip1', description=CLBDescription(lb_id='lb1', port=80)) ] rcv3_nodes = [ RCv3Node(node_id='node2', cloud_server_id='a', description=RCv3Description(lb_id='lb2')) ] eff = get_all_launch_server_data( 'tid', 'gid', self.now, get_scaling_group_servers=_constant_as_eff( ('tid', 'gid', self.now), self.servers), get_clb_contents=_constant_as_eff((), (clb_nodes, { 'lb1': CLB(True), 'lb2': CLB(False) })), get_rcv3_contents=_constant_as_eff((), rcv3_nodes)) expected_servers = [ server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1', links=freeze([{ 'href': 'link1', 'rel': 'self' }]), json=freeze(self.servers[0])), server('b', ServerState.ACTIVE, created=1, servicenet_address='10.0.0.2', links=freeze([{ 'href': 'link2', 'rel': 'self' }]), json=freeze(self.servers[1])) ] self.assertEqual( resolve_stubs(eff), { 'servers': expected_servers, 'lb_nodes': clb_nodes + rcv3_nodes, 'lbs': { 'lb1': CLB(True), 'lb2': CLB(False) } })
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node('21', 'a21', condition='DRAINING', weight=None) seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[ nodes_req(1, [ node('11', 'a11', condition='DRAINING'), node('12', 'a12') ]) ], [nodes_req(2, [node21])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {"type": "CONNECT"})]]), parallel_sequence( [[node_feed_req('1', '11', CLBNotFoundError(lb_id=u'1'))], [node_feed_req('2', '21', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), ([attr.assoc(CLBNode.from_node_json(2, node21), _drained_at=2.0) ], { '2': CLB(True) }))
def test_lb_disappeared_during_node_fetch(self): """ If a load balancer gets deleted while fetching nodes, no nodes will be returned for it. """ seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node('11', 'a11')])], [ lb_req('loadbalancers/2/nodes', True, CLBNotFoundError(lb_id=u'2')) ], [lb_hm_req(1, {"type": "CONNECT"})], [ lb_req('loadbalancers/2/healthmonitor', True, CLBNotFoundError(lb_id=u'2')) ]]), parallel_sequence([]) # No node feeds to fetch ] make_desc = partial(CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY) eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ CLBNode( node_id='11', address='a11', description=make_desc(lb_id='1')) ], { '1': CLB(True) }))
def test_no_nodes(self): """ Return empty if there are LBs but no nodes in them """ seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [])], [nodes_req(2, [])], [lb_hm_req(1, {})], [lb_hm_req(2, {"type": "a"})]]), parallel_sequence([]), # No nodes to fetch ] self.assertEqual(perform_sequence(seq, get_clb_contents()), ([], { '1': CLB(False), '2': CLB(True) }))
def test_no_group_servers(self): """ If there are no servers in a group, get_all_launch_server_data includes an empty list. """ eff = get_all_launch_server_data( 'tid', 'gid', self.now, get_scaling_group_servers=_constant_as_eff( ('tid', 'gid', self.now), []), get_clb_contents=_constant_as_eff((), ([], { 'a': CLB(False) })), get_rcv3_contents=_constant_as_eff((), [])) self.assertEqual(resolve_stubs(eff), { 'servers': [], 'lb_nodes': [], 'lbs': { 'a': CLB(False) } })
def get_clb_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB health monitor information is also returned as a pmap of :obj:`CLB` objects mapped on LB ID. :return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`) :rtype: :obj:`Effect` """ # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [ _retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids ] healthmon_reqs = [ _retry(get_clb_health_monitor(lb_id).on(error=gone(None))) for lb_id in lb_ids ] all_nodes_hms = yield parallel(node_reqs + healthmon_reqs) all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):] lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes) } clbs = { str(lb_id): CLB(bool(health_mon)) for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None } draining = [ n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING ] feeds = yield parallel([ _retry( get_clb_node_feed(n.description.lb_id, n.node_id).on(error=gone(None))) for n in draining ]) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None ]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: node.drained_at = extract_clb_drained_at(feed) return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return((list(filter(bool, nodes)), pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))