Пример #1
0
 def test_context(self):
     # Context must not only rely on node_id to prohibit multiple instances
     # of a node_id on the DSE
     part = helper.get_new_partition()
     n1 = helper.make_dsenode_same_partition(part, 'node_id',
                                             self.messaging_config, [])
     n2 = helper.make_dsenode_same_partition(part, 'node_id',
                                             self.messaging_config, [])
     self.assertEqual(n1._message_context, n1._message_context,
                      "Comparison of context from the same node is equal")
     self.assertNotEqual(n1._message_context, n2._message_context,
                         "Comparison of context from the different nodes "
                         "is not equal")
Пример #2
0
 def test_context(self):
     # Context must not only rely on node_id to prohibit multiple instances
     # of a node_id on the DSE
     part = helper.get_new_partition()
     n1 = helper.make_dsenode_same_partition(part, 'node_id',
                                             self.messaging_config, [])
     n2 = helper.make_dsenode_same_partition(part, 'node_id',
                                             self.messaging_config, [])
     self.assertEqual(n1._message_context, n1._message_context,
                      "Comparison of context from the same node is equal")
     self.assertNotEqual(n1._message_context, n2._message_context,
                         "Comparison of context from the different nodes "
                         "is not equal")
Пример #3
0
    def test_broadcast_service_rpc(self):
        part = helper.get_new_partition()
        nodes = []
        services = []
        for i in range(3):
            nid = 'svc_rpc_node%s' % i
            node = helper.make_dsenode_same_partition(part, nid,
                                                      self.messaging_config)
            service = _PingRpcService('tbsr_svc', nid)
            node.register_service(service)
            nodes.append(node)
            services.append(service)

        # Send from each node to all services
        for i, source in enumerate(nodes):
            scounts = []
            for j, target in enumerate(nodes):
                ep = nodes[j]._services[-1].endpoints[0]
                scounts.append(ep.ping_receive_count)
            source.broadcast_service_rpc('tbsr_svc', 'ping_test', {
                'arg1': 1,
                'arg2': 'a'
            })
            eventlet.sleep(0.5)  # wait for async delivery
            for j, target in enumerate(nodes):
                ep = nodes[j]._services[-1].endpoints[0]
                ecount = ep.ping_receive_count
                self.assertEqual(
                    ecount - scounts[j], 1,
                    "Node %s received ping (%s was sending)" %
                    (nodes[j].node_id, source.node_id))
                self.assertEqual(
                    ep.ping_received_from[-1]['node_id'], source.node_id,
                    "Last ping received on %s was from %s" %
                    (nodes[j].node_id, source.node_id))
Пример #4
0
    def test_service_rpc(self):
        part = helper.get_new_partition()
        nodes = []
        services = []
        for i in range(3):
            nid = 'svc_rpc_node%s' % i
            node = helper.make_dsenode_same_partition(
                part, nid, self.messaging_config)
            service = _PingRpcService('srpc_node_svc%s' % i, nid)
            node.register_service(service)
            nodes.append(node)
            services.append(service)

        # Send from each node to each other node
        for i, source in enumerate(nodes):
            # intentionally including self in RPC target
            for j, service in enumerate(services):
                ep = nodes[j]._services[-1].endpoints[0]
                scount = ep.ping_receive_count
                args = {'arg1': 1, 'arg2': 'a'}
                ret = source.invoke_service_rpc(service.service_id, 'ping',
                                                **args)
                self.assertEqual(ret, args, "Ping echoed arguments")
                ecount = ep.ping_receive_count
                self.assertEqual(ecount - scount, 1,
                                 "Node %s received ping (%s was sending)"
                                 % (nodes[j].node_id, nodes[i].node_id))
                self.assertEqual(
                    ep.ping_received_from[-1]['node_id'],
                    nodes[i].node_id,
                    "Last ping received on %s was from %s" % (
                        nodes[j].node_id, nodes[i].node_id))
Пример #5
0
    def test_broadcast_service_rpc(self):
        part = helper.get_new_partition()
        nodes = []
        services = []
        for i in range(3):
            nid = 'svc_rpc_node%s' % i
            node = helper.make_dsenode_same_partition(
                part, nid, self.messaging_config)
            service = _PingRpcService('tbsr_svc', nid)
            node.register_service(service)
            nodes.append(node)
            services.append(service)

        # Send from each node to all services
        for i, source in enumerate(nodes):
            scounts = []
            for j, target in enumerate(nodes):
                ep = nodes[j]._services[-1].endpoints[0]
                scounts.append(ep.ping_receive_count)
            source.broadcast_service_rpc('tbsr_svc', 'ping', arg1=1, arg2='a')
            eventlet.sleep(0.5)  # wait for async delivery
            for j, target in enumerate(nodes):
                ep = nodes[j]._services[-1].endpoints[0]
                ecount = ep.ping_receive_count
                self.assertEqual(ecount - scounts[j], 1,
                                 "Node %s received ping (%s was sending)"
                                 % (nodes[j].node_id, source.node_id))
                self.assertEqual(
                    ep.ping_received_from[-1]['node_id'],
                    source.node_id,
                    "Last ping received on %s was from %s" % (
                        nodes[j].node_id, source.node_id))
Пример #6
0
    def test_node_rpc(self):
        """Validate calling RPCs on DseNode"""
        part = helper.get_new_partition()
        nodes = []
        endpoints = []
        for i in range(3):
            nid = 'rpcnode%s' % i
            endpoints.append(_PingRpcEndpoint(nid))
            nodes.append(
                helper.make_dsenode_same_partition(
                    part, nid, self.messaging_config, [endpoints[-1]]))

        # Send from each node to each other node
        for i, source in enumerate(nodes):
            # intentionally including self in RPC target
            for j, target in enumerate(nodes):
                scount = endpoints[j].ping_receive_count
                args = {'arg1': 1, 'arg2': 'a'}
                ret = source.invoke_node_rpc(target.node_id, 'ping', **args)
                self.assertEqual(ret, args, "Ping echoed arguments")
                ecount = endpoints[j].ping_receive_count
                self.assertEqual(ecount - scount, 1,
                                 "Node %s received ping (%s was sending)"
                                 % (nodes[j].node_id, nodes[i].node_id))
                self.assertEqual(
                    endpoints[j].ping_received_from[-1]['node_id'],
                    nodes[i].node_id,
                    "Last ping received on %s was from %s" % (
                        nodes[j].node_id, nodes[i].node_id))
Пример #7
0
    def test_node_broadcast_rpc(self):
        """Validate calling RPCs on DseNode"""
        part = helper.get_new_partition()
        nodes = []
        endpoints = []
        for i in range(3):
            nid = 'rpcnode%s' % i
            endpoints.append(_PingRpcEndpoint(nid))
            nodes.append(
                helper.make_dsenode_same_partition(
                    part, nid, self.messaging_config, [endpoints[-1]]))

        # Send from each node to all other nodes
        for i, source in enumerate(nodes):
            scounts = []
            for j, target in enumerate(nodes):
                scounts.append(endpoints[j].ping_receive_count)
            source.broadcast_node_rpc('ping', arg1=1, arg2='a')
            eventlet.sleep(0.5)  # wait for async delivery
            for j, target in enumerate(nodes):
                ecount = endpoints[j].ping_receive_count
                self.assertEqual(ecount - scounts[j], 1,
                                 "Node %s received ping (%s was sending)"
                                 % (nodes[j].node_id, source.node_id))
                self.assertEqual(
                    endpoints[j].ping_received_from[-1]['node_id'],
                    source.node_id,
                    "Last ping received on %s was from %s" % (
                        nodes[j].node_id, source.node_id))
Пример #8
0
    def test_service_rpc(self):
        part = helper.get_new_partition()
        nodes = []
        services = []
        for i in range(3):
            nid = 'svc_rpc_node%s' % i
            node = helper.make_dsenode_same_partition(part, nid,
                                                      self.messaging_config)
            service = _PingRpcService('srpc_node_svc%s' % i, nid)
            node.register_service(service)
            nodes.append(node)
            services.append(service)

        # Send from each node to each other node
        for i, source in enumerate(nodes):
            # intentionally including self in RPC target
            for j, service in enumerate(services):
                ep = nodes[j]._services[-1].endpoints[0]
                scount = ep.ping_receive_count
                args = {'arg1': 1, 'arg2': 'a'}
                ret = source.invoke_service_rpc(service.service_id,
                                                'ping_test', args)
                self.assertEqual(ret, args, "Ping echoed arguments")
                ecount = ep.ping_receive_count
                self.assertEqual(
                    ecount - scount, 1,
                    "Node %s received ping (%s was sending)" %
                    (nodes[j].node_id, nodes[i].node_id))
                self.assertEqual(
                    ep.ping_received_from[-1]['node_id'], nodes[i].node_id,
                    "Last ping received on %s was from %s" %
                    (nodes[j].node_id, nodes[i].node_id))
Пример #9
0
    def test_node_broadcast_rpc(self):
        """Validate calling RPCs on DseNode"""
        part = helper.get_new_partition()
        nodes = []
        endpoints = []
        for i in range(3):
            nid = 'rpcnode%s' % i
            endpoints.append(_PingRpcEndpoint(nid))
            nodes.append(
                helper.make_dsenode_same_partition(part, nid,
                                                   self.messaging_config,
                                                   [endpoints[-1]]))

        # Send from each node to all other nodes
        for i, source in enumerate(nodes):
            scounts = []
            for j, target in enumerate(nodes):
                scounts.append(endpoints[j].ping_receive_count)
            source.broadcast_node_rpc('ping_test', {'arg1': 1, 'arg2': 'a'})
            eventlet.sleep(0.5)  # wait for async delivery
            for j, target in enumerate(nodes):
                ecount = endpoints[j].ping_receive_count
                self.assertEqual(
                    ecount - scounts[j], 1,
                    "Node %s received ping (%s was sending)" %
                    (nodes[j].node_id, source.node_id))
                self.assertEqual(
                    endpoints[j].ping_received_from[-1]['node_id'],
                    source.node_id, "Last ping received on %s was from %s" %
                    (nodes[j].node_id, source.node_id))
Пример #10
0
    def test_node_rpc(self):
        """Validate calling RPCs on DseNode"""
        part = helper.get_new_partition()
        nodes = []
        endpoints = []
        for i in range(3):
            nid = 'rpcnode%s' % i
            endpoints.append(_PingRpcEndpoint(nid))
            nodes.append(
                helper.make_dsenode_same_partition(part, nid,
                                                   self.messaging_config,
                                                   [endpoints[-1]]))

        # Send from each node to each other node
        for i, source in enumerate(nodes):
            # intentionally including self in RPC target
            for j, target in enumerate(nodes):
                scount = endpoints[j].ping_receive_count
                args = {'arg1': 1, 'arg2': 'a'}
                ret = source.invoke_node_rpc(target.node_id, 'ping_test', args)
                self.assertEqual(ret, args, "Ping echoed arguments")
                ecount = endpoints[j].ping_receive_count
                self.assertEqual(
                    ecount - scount, 1,
                    "Node %s received ping (%s was sending)" %
                    (nodes[j].node_id, nodes[i].node_id))
                self.assertEqual(
                    endpoints[j].ping_received_from[-1]['node_id'],
                    nodes[i].node_id, "Last ping received on %s was from %s" %
                    (nodes[j].node_id, nodes[i].node_id))
Пример #11
0
def setup_config(with_fake_datasource=True,
                 node_id='testnode',
                 same_partition_as_node=None,
                 api=True,
                 policy=True,
                 datasources=True):
    """Setup DseNode for testing.

    :param: services is an array of DataServices
    :param: api is a dictionary mapping api name to API model instance
    """
    config.set_config_defaults()
    # Load the fake driver.
    cfg.CONF.set_override('drivers',
                          ['congress.tests.fake_datasource.FakeDataSource'])

    if same_partition_as_node is None:
        node = helper.make_dsenode_new_partition(node_id)
    else:
        node = helper.make_dsenode_same_partition(same_partition_as_node,
                                                  node_id)

    if datasources:
        cfg.CONF.set_override('datasources', True)

    with mock.patch.object(periodics, 'PeriodicWorker', autospec=True):
        services = harness.create2(existing_node=node,
                                   policy_engine=policy,
                                   api=api,
                                   datasources=datasources)

    data = None
    if with_fake_datasource:
        data = fake_datasource.FakeDataSource('data')
        # FIXME(ekcs): this is a hack to prevent the synchronizer from
        # attempting to delete this DSD because it's not in DB
        data.type = 'no_sync_datasource_driver'
        node.register_service(data)

    engine_service = None
    library_service = None
    api_service = None
    if policy:
        engine_service = services[api_base.ENGINE_SERVICE_ID]
        library_service = services[api_base.LIBRARY_SERVICE_ID]
    if api:
        api_service = services['api']
    if datasources:
        ds_manager = services['ds_manager']

    return {
        'node': node,
        'engine': engine_service,
        'library': library_service,
        'data': data,
        'api': api_service,
        'ds_manager': ds_manager
    }
Пример #12
0
    def test_replicated_pe_exec(self):
        """Test correct local leader behavior with 2 PEs requesting exec"""
        node1 = helper.make_dsenode_new_partition('testnode1')
        node2 = helper.make_dsenode_same_partition(node1, 'testnode2')
        dsd = fake_datasource.FakeDataSource('dsd')
        # faster time-out for testing
        dsd.LEADER_TIMEOUT = 2
        pe1 = agnostic.DseRuntime('pe1')
        pe2 = agnostic.DseRuntime('pe2')
        node1.register_service(pe1)
        node2.register_service(pe2)
        node1.register_service(dsd)
        assert dsd._running
        assert node1._running
        assert node2._running
        assert node1._control_bus._running

        # first exec request obeyed and leader set
        pe2.rpc('dsd', 'request_execute',
                {'action': 'fake_act', 'action_args': {'name': 'testnode2'},
                 'wait': True})
        helper.retry_check_function_return_value(
            lambda: len(dsd.exec_history), 1)
        self.assertEqual(dsd._leader_node_id, 'testnode2')

        # second exec request from leader obeyed and leader remains
        pe2.rpc('dsd', 'request_execute',
                {'action': 'fake_act', 'action_args': {'name': 'testnode2'},
                 'wait': True})
        helper.retry_check_function_return_value(
            lambda: len(dsd.exec_history), 2)
        self.assertEqual(dsd._leader_node_id, 'testnode2')

        # exec request from non-leader not obeyed
        pe1.rpc('dsd', 'request_execute',
                {'action': 'fake_act', 'action_args': {'name': 'testnode1'},
                 'wait': True})
        self.assertRaises(
            tenacity.RetryError,
            helper.retry_check_function_return_value,
            lambda: len(dsd.exec_history), 3)

        # leader vacated after heartbeat stops
        node2.stop()
        node2.wait()
        helper.retry_check_function_return_value(
            lambda: dsd._leader_node_id, None)

        # next exec request obeyed and new leader set
        pe1.rpc('dsd', 'request_execute',
                {'action': 'fake_act', 'action_args': {'name': 'testnode1'},
                 'wait': True})
        helper.retry_check_function_return_value(
            lambda: len(dsd.exec_history), 3)
        self.assertEqual(dsd._leader_node_id, 'testnode1')
        node1.stop()
        node2.stop()
Пример #13
0
 def _create_node_with_services(self, nodes, services, num, partition_id):
     nid = 'cbd_node%s' % num
     nodes.append(helper.make_dsenode_same_partition(partition_id, nid))
     ns = []
     for s in range(num):
         # intentionally starting different number services
         ns.append(FakeDataSource('cbd-%d_svc-%d' % (num, s)))
         nodes[-1].register_service(ns[-1])
     services.append(ns)
     return nodes[-1]
Пример #14
0
def setup_config(with_fake_datasource=True, node_id='testnode',
                 same_partition_as_node=None, api=True, policy=True,
                 datasources=True, with_fake_json_ingester=False):
    """Setup DseNode for testing.

    :param: services is an array of DataServices
    :param: api is a dictionary mapping api name to API model instance
    """
    config.set_config_defaults()
    # Load the fake driver.
    cfg.CONF.set_override(
        'drivers',
        ['congress.tests.fake_datasource.FakeDataSource'])

    if same_partition_as_node is None:
        node = helper.make_dsenode_new_partition(node_id)
    else:
        node = helper.make_dsenode_same_partition(
            same_partition_as_node, node_id)

    if datasources:
        cfg.CONF.set_override('datasources', True)

    with mock.patch.object(periodics, 'PeriodicWorker', autospec=True):
        services = harness.create2(
            existing_node=node, policy_engine=policy, api=api,
            datasources=datasources)

    data = None
    if with_fake_datasource:
        data = fake_datasource.FakeDataSource('data')
        # FIXME(ekcs): this is a hack to prevent the synchronizer from
        # attempting to delete this DSD because it's not in DB
        data.type = 'no_sync_datasource_driver'
        node.register_service(data)

    ingester = None
    if with_fake_json_ingester:
        ingester = fake_datasource.FakeJsonIngester()
        node.register_service(ingester)

    engine_service = None
    library_service = None
    api_service = None
    if policy:
        engine_service = services[api_base.ENGINE_SERVICE_ID]
        library_service = services[api_base.LIBRARY_SERVICE_ID]
    if api:
        api_service = services['api']
    if datasources:
        ds_manager = services['ds_manager']

    return {'node': node, 'engine': engine_service, 'library': library_service,
            'data': data, 'api': api_service, 'ds_manager': ds_manager,
            'json_ingester': ingester}
Пример #15
0
 def _create_node_with_services(self, nodes, services, num, partition_id):
     nid = 'cbd_node%s' % num
     nodes.append(helper.make_dsenode_same_partition(partition_id, nid))
     ns = []
     for s in range(num):
         # intentionally starting different number services
         ns.append(
             fake_datasource.FakeDataSource('cbd-%d_svc-%d' % (num, s)))
         nodes[-1].register_service(ns[-1])
     services.append(ns)
     return nodes[-1]
Пример #16
0
    def test_internode_pubsub(self):
        node1 = helper.make_dsenode_new_partition('testnode1')
        test1 = fake_datasource.FakeDataSource('test1')
        node1.register_service(test1)
        node2 = helper.make_dsenode_same_partition(node1, 'testnode2')
        test2 = fake_datasource.FakeDataSource('test2')
        node2.register_service(test2)

        test1.subscribe('test2', 'p')
        helper.retry_check_function_return_value(
            lambda: hasattr(test1, 'last_msg'), True)
        test2.publish('p', 42)
        helper.retry_check_function_return_value(
            lambda: test1.last_msg['data'], 42)
        self.assertFalse(hasattr(test2, "last_msg"))
Пример #17
0
    def test_internode_pubsub(self):
        node1 = helper.make_dsenode_new_partition('testnode1')
        test1 = fake_datasource.FakeDataSource('test1')
        node1.register_service(test1)
        node2 = helper.make_dsenode_same_partition(node1, 'testnode2')
        test2 = fake_datasource.FakeDataSource('test2')
        node2.register_service(test2)

        test1.subscribe('test2', 'p')
        helper.retry_check_function_return_value(
            lambda: hasattr(test1, 'last_msg'), True)
        test2.publish('p', 42, use_snapshot=True)
        helper.retry_check_function_return_value(
            lambda: test1.last_msg['data'], 42)
        self.assertFalse(hasattr(test2, "last_msg"))
        node1.stop()
        node2.stop()
Пример #18
0
    def test_internode_partial_unsub(self):
        node1 = helper.make_dsenode_new_partition('testnode1')
        node2 = helper.make_dsenode_same_partition(node1, 'testnode2')
        test1 = fake_datasource.FakeDataSource('test1')
        test2 = fake_datasource.FakeDataSource('test2')
        node1.register_service(test1)
        node2.register_service(test2)

        test1.subscribe('test2', 'p')
        test1.subscribe('test2', 'q')
        test1.unsubscribe('test2', 'q')  # unsub from q should not affect p
        helper.retry_check_function_return_value(
            lambda: hasattr(test1, 'last_msg'), True)
        test2.publish('p', [42], use_snapshot=True)
        helper.retry_check_function_return_value(
            lambda: test1.last_msg['data'], [42])
        self.assertFalse(hasattr(test2, "last_msg"))
        node1.stop()
        node2.stop()
Пример #19
0
    def test_replicated_pe_exec(self):
        """Test correct local leader behavior with 2 PEs requesting exec"""
        node1 = helper.make_dsenode_new_partition('testnode1')
        node2 = helper.make_dsenode_same_partition(node1, 'testnode2')
        dsd = fake_datasource.FakeDataSource('dsd')
        # faster time-out for testing
        dsd.LEADER_TIMEOUT = 2
        pe1 = agnostic.DseRuntime('pe1')
        pe2 = agnostic.DseRuntime('pe2')
        node1.register_service(pe1)
        node2.register_service(pe2)
        node1.register_service(dsd)
        assert dsd._running
        assert node1._running
        assert node2._running
        assert node1._control_bus._running

        # first exec request obeyed and leader set
        pe2.rpc(
            'dsd', 'request_execute', {
                'action': 'fake_act',
                'action_args': {
                    'name': 'testnode2'
                },
                'wait': True
            })
        helper.retry_check_function_return_value(lambda: len(dsd.exec_history),
                                                 1)
        self.assertEqual(dsd._leader_node_id, 'testnode2')

        # second exec request from leader obeyed and leader remains
        pe2.rpc(
            'dsd', 'request_execute', {
                'action': 'fake_act',
                'action_args': {
                    'name': 'testnode2'
                },
                'wait': True
            })
        helper.retry_check_function_return_value(lambda: len(dsd.exec_history),
                                                 2)
        self.assertEqual(dsd._leader_node_id, 'testnode2')

        # exec request from non-leader not obeyed
        pe1.rpc(
            'dsd', 'request_execute', {
                'action': 'fake_act',
                'action_args': {
                    'name': 'testnode1'
                },
                'wait': True
            })
        self.assertRaises(tenacity.RetryError,
                          helper.retry_check_function_return_value,
                          lambda: len(dsd.exec_history), 3)

        # leader vacated after heartbeat stops
        node2.stop()
        node2.wait()
        helper.retry_check_function_return_value(lambda: dsd._leader_node_id,
                                                 None)

        # next exec request obeyed and new leader set
        pe1.rpc(
            'dsd', 'request_execute', {
                'action': 'fake_act',
                'action_args': {
                    'name': 'testnode1'
                },
                'wait': True
            })
        helper.retry_check_function_return_value(lambda: len(dsd.exec_history),
                                                 3)
        self.assertEqual(dsd._leader_node_id, 'testnode1')
        node1.stop()
        node2.stop()