def test_active_if_node_is_draining(self): """ If the node is DRAINING, :func:`CLBNode.is_active` returns `True`. """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1', drained_at=0.0, connections=1) self.assertTrue(node.is_active())
def test_no_draining(self): """ Doesnt fetch feeds if all nodes are ENABLED """ seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node('11', 'a11')])], [nodes_req(2, [node('21', 'a21')])], [lb_hm_req(1, {})], [lb_hm_req(2, {})]]), parallel_sequence([]) # No nodes to fetch ] make_desc = partial(CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY) eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ CLBNode( node_id='11', address='a11', description=make_desc(lb_id='1')), CLBNode( node_id='21', address='a21', description=make_desc(lb_id='2')) ], { '1': CLB(False), '2': CLB(False) }))
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node('11', 'a11', condition='DRAINING') node12 = node('12', 'a12') node21 = node('21', 'a21', weight=3) node22 = node('22', 'a22', weight=None, condition='DRAINING') seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{'id': 1}, {'id': 2}]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {})]]), parallel_sequence([[node_feed_req('1', '11', '11feed')], [node_feed_req('2', '22', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), ([attr.assoc(CLBNode.from_node_json(1, node11), _drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), attr.assoc(CLBNode.from_node_json(2, node22), _drained_at=2.0)], {'1': CLB(True), '2': CLB(False)}))
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node('11', 'a11', condition='DRAINING') node12 = node('12', 'a12') node21 = node('21', 'a21', weight=3) node22 = node('22', 'a22', weight=None, condition='DRAINING') seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {})]]), parallel_sequence([[node_feed_req('1', '11', '11feed')], [node_feed_req('2', '22', '22feed')]]), ] eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ attr.assoc(CLBNode.from_node_json(1, node11), _drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), attr.assoc(CLBNode.from_node_json(2, node22), _drained_at=2.0) ], { '1': CLB(True), '2': CLB(False) }))
def test_current_draining_false_if_description_not_draining(self): """ :func:`CLBNode.currently_draining` returns `False` if `CLBNode.description.condition` is not :obj:`CLBNodeCondition.DRAINING` """ node = CLBNode(node_id='1234', description=self.desc, address='10.1.1.1') self.assertFalse(node.currently_draining())
def test_matches_only_if_server_address_matches_node_address(self): """ :func:`CLBNode.matches` returns True only if the :class:`NovaServer` has the same ServiceNet address as the node address """ node = CLBNode(node_id="1234", description=self.desc, address="10.1.1.1") self.assertFalse( node.matches( NovaServer( id="1", state=ServerState.ACTIVE, created=0.0, servicenet_address="10.1.1.2", image_id="image", flavor_id="flavor", ) ) ) self.assertTrue( node.matches( NovaServer( id="1", state=ServerState.ACTIVE, created=0.0, servicenet_address="10.1.1.1", image_id="image", flavor_id="flavor", ) ) )
def test_matches_only_works_with_NovaServers(self): """ :func:`CLBNode.matches` returns false if the server is not an instance of :class:`NovaServer`. """ node = CLBNode(node_id="1234", description=self.desc, address="10.1.1.1") self.assertFalse(node.matches(DummyServer(servicenet_address="10.1.1.1")))
def test_done_draining_past_timeout_even_if_there_are_connections(self): """ If there are still connections, but the node has been in draining past the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id="1234", description=self.drain_desc, address="10.1.1.1", drained_at=0.0, connections=1) self.assertTrue(node.is_done_draining(now=30, timeout=15))
def test_current_draining_false_if_description_not_draining(self): """ :func:`CLBNode.currently_draining` returns `False` if `CLBNode.description.condition` is not :obj:`CLBNodeCondition.DRAINING` """ node = CLBNode(node_id="1234", description=self.desc, address="10.1.1.1") self.assertFalse(node.currently_draining())
def test_current_draining_true_if_description_is_draining(self): """ :func:`CLBNode.currently_draining` returns `True` if `CLBNode.description.condition` is :obj:`CLBNodeCondition.DRAINING` """ node = CLBNode(node_id="1234", description=self.drain_desc, address="10.1.1.1") self.assertTrue(node.currently_draining())
def test_done_draining_before_timeout_if_there_are_no_connections(self): """ If there are zero connections, but the node has been in draining less than the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id="1234", description=self.drain_desc, address="10.1.1.1", drained_at=0.0, connections=0) self.assertTrue(node.is_done_draining(now=15, timeout=30))
def test_done_draining_past_timeout_even_if_no_connection_info(self): """ If connection information is not provided, and the node has been in draining past the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id="1234", description=self.drain_desc, address="10.1.1.1", drained_at=0.0) self.assertTrue(node.is_done_draining(now=30, timeout=15))
def test_done_draining_before_timeout_if_there_are_no_connections(self): """ If there are zero connections, but the node has been in draining less than the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1', drained_at=0.0, connections=0) self.assertTrue(node.is_done_draining(now=15, timeout=30))
def test_done_draining_past_timeout_even_if_there_are_connections(self): """ If there are still connections, but the node has been in draining past the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1', drained_at=0.0, connections=1) self.assertTrue(node.is_done_draining(now=30, timeout=15))
def test_current_draining_true_if_description_is_draining(self): """ :func:`CLBNode.currently_draining` returns `True` if `CLBNode.description.condition` is :obj:`CLBNodeCondition.DRAINING` """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1') self.assertTrue(node.currently_draining())
def test_not_done_draining_before_timeout_if_no_connection_info(self): """ If connection information is not provided, and the node has been in draining less than the timeout, :func:`CLBNode.is_done_draining` returns `False`. """ node = CLBNode(node_id="1234", description=self.drain_desc, address="10.1.1.1", drained_at=0.0) self.assertFalse(node.is_done_draining(now=15, timeout=30))
def test_not_done_draining_before_timeout_if_no_connection_info(self): """ If connection information is not provided, and the node has been in draining less than the timeout, :func:`CLBNode.is_done_draining` returns `False`. """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1', drained_at=0.0) self.assertFalse(node.is_done_draining(now=15, timeout=30))
def test_done_draining_past_timeout_even_if_no_connection_info(self): """ If connection information is not provided, and the node has been in draining past the timeout, :func:`CLBNode.is_done_draining` returns `True`. """ node = CLBNode(node_id='1234', description=self.drain_desc, address='10.1.1.1', drained_at=0.0) self.assertTrue(node.is_done_draining(now=30, timeout=15))
def test_matches_only_works_with_NovaServers(self): """ :func:`CLBNode.matches` returns false if the server is not an instance of :class:`NovaServer`. """ node = CLBNode(node_id='1234', description=self.desc, address='10.1.1.1') self.assertFalse( node.matches(DummyServer(servicenet_address="10.1.1.1")))
def test_inactive_if_node_is_disabled(self): """ If the node is DRAINING, :func:`CLBNode.is_active` returns `True`. """ node = CLBNode(node_id='1234', description=CLBDescription( lb_id='12345', port=80, condition=CLBNodeCondition.DISABLED), address='10.1.1.1', drained_at=0.0, connections=1) self.assertFalse(node.is_active())
def test_matches_only_if_server_address_matches_node_address(self): """ :func:`CLBNode.matches` returns True only if the :class:`NovaServer` has the same ServiceNet address as the node address """ node = CLBNode(node_id='1234', description=self.desc, address='10.1.1.1') self.assertFalse(node.matches( NovaServer(id='1', state=ServerState.ACTIVE, created=0.0, servicenet_address="10.1.1.2", image_id='image', flavor_id='flavor'))) self.assertTrue(node.matches( NovaServer(id='1', state=ServerState.ACTIVE, created=0.0, servicenet_address="10.1.1.1", image_id='image', flavor_id='flavor')))
def test_lb_disappeared_during_node_fetch(self): """ If a load balancer gets deleted while fetching nodes, no nodes will be returned for it. """ seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[nodes_req(1, [node('11', 'a11')])], [ lb_req('loadbalancers/2/nodes', True, CLBNotFoundError(lb_id=u'2')) ], [lb_hm_req(1, {"type": "CONNECT"})], [ lb_req('loadbalancers/2/healthmonitor', True, CLBNotFoundError(lb_id=u'2')) ]]), parallel_sequence([]) # No node feeds to fetch ] make_desc = partial(CLBDescription, port=20, weight=2, condition=CLBNodeCondition.ENABLED, type=CLBNodeType.PRIMARY) eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), ([ CLBNode( node_id='11', address='a11', description=make_desc(lb_id='1')) ], { '1': CLB(True) }))
def test_from_node_json_with_weight(self): """ A node's JSON representation can be parsed to a :obj:`CLBNode` object with a `CLBDescription`. When weight is not specified it defaults to 1. """ node_json = { "id": "node1", "address": "1.2.3.4", "port": 20, "condition": "DRAINING", "type": "SECONDARY", "weight": 50, } node = CLBNode.from_node_json(123, node_json) self.assertEqual( node, CLBNode( node_id="node1", address="1.2.3.4", connections=None, drained_at=0.0, description=CLBDescription( lb_id="123", port=20, weight=50, condition=CLBNodeCondition.DRAINING, type=CLBNodeType.SECONDARY ), ), )
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node('21', 'a21', condition='DRAINING', weight=None) seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{ 'id': 1 }, { 'id': 2 }]}), parallel_sequence([[ nodes_req(1, [ node('11', 'a11', condition='DRAINING'), node('12', 'a12') ]) ], [nodes_req(2, [node21])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {"type": "CONNECT"})]]), parallel_sequence( [[node_feed_req('1', '11', CLBNotFoundError(lb_id=u'1'))], [node_feed_req('2', '21', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), ([attr.assoc(CLBNode.from_node_json(2, node21), _drained_at=2.0) ], { '2': CLB(True) }))
def test_success(self): """ The data is returned as a tuple of ([NovaServer], [CLBNode/RCv3Node]). """ clb_nodes = [CLBNode(node_id='node1', address='ip1', description=CLBDescription(lb_id='lb1', port=80))] rcv3_nodes = [RCv3Node(node_id='node2', cloud_server_id='a', description=RCv3Description(lb_id='lb2'))] eff = get_all_launch_server_data( 'tid', 'gid', self.now, get_scaling_group_servers=_constant_as_eff( ('tid', 'gid', self.now), self.servers), get_clb_contents=_constant_as_eff((), clb_nodes), get_rcv3_contents=_constant_as_eff((), rcv3_nodes)) expected_servers = [ server('a', ServerState.ACTIVE, servicenet_address='10.0.0.1', links=freeze([{'href': 'link1', 'rel': 'self'}]), json=freeze(self.servers[0])), server('b', ServerState.ACTIVE, created=1, servicenet_address='10.0.0.2', links=freeze([{'href': 'link2', 'rel': 'self'}]), json=freeze(self.servers[1])) ] self.assertEqual(resolve_stubs(eff), {'servers': expected_servers, 'lb_nodes': clb_nodes + rcv3_nodes})
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node('21', 'a21', condition='DRAINING', weight=None) seq = [ lb_req('loadbalancers', True, {'loadBalancers': [{'id': 1}, {'id': 2}]}), parallel_sequence([ [nodes_req(1, [node('11', 'a11', condition='DRAINING'), node('12', 'a12')])], [nodes_req(2, [node21])], [lb_hm_req(1, {"type": "CONNECT"})], [lb_hm_req(2, {"type": "CONNECT"})] ]), parallel_sequence([ [node_feed_req('1', '11', CLBNotFoundError(lb_id=u'1'))], [node_feed_req('2', '21', '22feed')]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), ([attr.assoc(CLBNode.from_node_json(2, node21), _drained_at=2.0)], {'2': CLB(True)}))
def test_provides_ILBDescription_and_IDrainable(self): """ An instance of :class:`CLBNode` provides :class:`ILBNode` and :class:`IDrainable`. """ node = CLBNode(node_id='1234', description=self.desc, address='10.1.1.1') self.assertTrue(ILBNode.providedBy(node)) self.assertTrue(IDrainable.providedBy(node))
def test_from_node_json_no_weight(self): """ A node's JSON representation can be parsed to a :obj:`CLBNode` object with a `CLBDescription`. When weight is not specified it defaults to 1. """ node_json = {'id': 'node1', 'address': '1.2.3.4', 'port': 20, 'condition': 'DRAINING', 'type': 'SECONDARY'} node = CLBNode.from_node_json(123, node_json) self.assertEqual( node, CLBNode(node_id='node1', address='1.2.3.4', connections=None, drained_at=0.0, description=CLBDescription( lb_id='123', port=20, weight=1, condition=CLBNodeCondition.DRAINING, type=CLBNodeType.SECONDARY)))
def get_clb_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB health monitor information is also returned as a pmap of :obj:`CLB` objects mapped on LB ID. :return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`) :rtype: :obj:`Effect` """ # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [_retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids] healthmon_reqs = [ _retry(get_clb_health_monitor(lb_id).on(error=gone(None))) for lb_id in lb_ids] all_nodes_hms = yield parallel(node_reqs + healthmon_reqs) all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):] lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes)} clbs = { str(lb_id): CLB(bool(health_mon)) for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None} draining = [n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING] feeds = yield parallel( [_retry(get_clb_node_feed(n.description.lb_id, n.node_id).on( error=gone(None))) for n in draining] ) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: node.drained_at = extract_clb_drained_at(feed) return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return(( list(filter(bool, nodes)), pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))
def test_success(self): """ Gets LB contents with drained_at correctly """ node11 = node("11", "a11", condition="DRAINING") node12 = node("12", "a12") node21 = node("21", "a21", weight=3) node22 = node("22", "a22", weight=None, condition="DRAINING") seq = [ lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}), parallel_sequence([[nodes_req(1, [node11, node12])], [nodes_req(2, [node21, node22])]]), parallel_sequence([[node_feed_req(1, "11", "11feed")], [node_feed_req(2, "22", "22feed")]]), ] eff = get_clb_contents() self.assertEqual( perform_sequence(seq, eff), [ assoc_obj(CLBNode.from_node_json(1, node11), drained_at=1.0), CLBNode.from_node_json(1, node12), CLBNode.from_node_json(2, node21), assoc_obj(CLBNode.from_node_json(2, node22), drained_at=2.0), ], )
def get_clb_contents(): """Get Rackspace Cloud Load Balancer contents as list of `CLBNode`.""" # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [ _retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids ] all_nodes = yield parallel(node_reqs) lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes) } draining = [ n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING ] feeds = yield parallel([ _retry( get_clb_node_feed(n.description.lb_id, n.node_id).on(error=gone(None))) for n in draining ]) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None ]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: return assoc_obj(node, drained_at=extract_CLB_drained_at(feed)) else: return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return(list(filter(bool, nodes)))
def test_lb_disappeared_during_feed_fetch(self): """ If a load balancer gets deleted while fetching feeds, no nodes will be returned for it. """ node21 = node("21", "a21", condition="DRAINING", weight=None) seq = [ lb_req("loadbalancers", True, {"loadBalancers": [{"id": 1}, {"id": 2}]}), parallel_sequence( [[nodes_req(1, [node("11", "a11", condition="DRAINING"), node("12", "a12")])], [nodes_req(2, [node21])]] ), parallel_sequence( [[node_feed_req(1, "11", CLBNotFoundError(lb_id=u"1"))], [node_feed_req(2, "21", "22feed")]] ), ] eff = get_clb_contents() self.assertEqual(perform_sequence(seq, eff), [assoc_obj(CLBNode.from_node_json(2, node21), drained_at=2.0)])
def get_clb_contents(): """Get Rackspace Cloud Load Balancer contents as list of `CLBNode`.""" # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [_retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids] all_nodes = yield parallel(node_reqs) lb_nodes = {lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes)} draining = [n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING] feeds = yield parallel( [_retry(get_clb_node_feed(n.description.lb_id, n.node_id).on( error=gone(None))) for n in draining] ) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: return assoc_obj(node, drained_at=extract_CLB_drained_at(feed)) else: return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return(list(filter(bool, nodes)))
def get_clb_contents(): """ Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB health monitor information is also returned as a pmap of :obj:`CLB` objects mapped on LB ID. :return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`) :rtype: :obj:`Effect` """ # If we get a CLBNotFoundError while fetching feeds, we should throw away # all nodes related to that load balancer, because we don't want to act on # data that we know is invalid/outdated (for example, if we can't fetch a # feed because CLB was deleted, we don't want to say that we have a node in # DRAINING with draining time of 0; we should just say that the node is # gone). def gone(r): return catch(CLBNotFoundError, lambda exc: r) lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))] node_reqs = [ _retry(get_clb_nodes(lb_id).on(error=gone([]))) for lb_id in lb_ids ] healthmon_reqs = [ _retry(get_clb_health_monitor(lb_id).on(error=gone(None))) for lb_id in lb_ids ] all_nodes_hms = yield parallel(node_reqs + healthmon_reqs) all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):] lb_nodes = { lb_id: [CLBNode.from_node_json(lb_id, node) for node in nodes] for lb_id, nodes in zip(lb_ids, all_nodes) } clbs = { str(lb_id): CLB(bool(health_mon)) for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None } draining = [ n for n in concat(lb_nodes.values()) if n.description.condition == CLBNodeCondition.DRAINING ] feeds = yield parallel([ _retry( get_clb_node_feed(n.description.lb_id, n.node_id).on(error=gone(None))) for n in draining ]) nodes_to_feeds = dict(zip(draining, feeds)) deleted_lbs = set([ node.description.lb_id for (node, feed) in nodes_to_feeds.items() if feed is None ]) def update_drained_at(node): feed = nodes_to_feeds.get(node) if node.description.lb_id in deleted_lbs: return None if feed is not None: node.drained_at = extract_clb_drained_at(feed) return node nodes = map(update_drained_at, concat(lb_nodes.values())) yield do_return((list(filter(bool, nodes)), pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))
def test_active_if_node_is_enabled(self): """ If the node is ENABLED, :func:`CLBNode.is_active` returns `True`. """ node = CLBNode(node_id="1234", description=self.desc, address="10.1.1.1", drained_at=0.0, connections=1) self.assertTrue(node.is_active())