Exemple #1
0
 def __init__(self,
              context,
              num_nodes,
              zk,
              security_protocol=SecurityConfig.PLAINTEXT,
              interbroker_security_protocol=SecurityConfig.PLAINTEXT,
              topics=None,
              quota_config=None,
              jmx_object_names=None,
              jmx_attributes=[]):
     """
     :type context
     :type zk: ZookeeperService
     :type topics: dict
     """
     Service.__init__(self, context, num_nodes)
     JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
     self.zk = zk
     if security_protocol == SecurityConfig.SSL or interbroker_security_protocol == SecurityConfig.SSL:
         self.security_config = SecurityConfig(SecurityConfig.SSL)
     else:
         self.security_config = SecurityConfig(SecurityConfig.PLAINTEXT)
     self.security_protocol = security_protocol
     self.interbroker_security_protocol = interbroker_security_protocol
     self.port = 9092 if security_protocol == SecurityConfig.PLAINTEXT else 9093
     self.topics = topics
     self.quota_config = quota_config
Exemple #2
0
    def __init__(self, context, nodes, target, mirror, persist="/mnt/kibosh"):
        """
        Create a Kibosh service.

        :param context:             The TestContext object.
        :param nodes:               The nodes to put the Kibosh FS on.  Kibosh allocates no
                                    nodes of its own.
        :param target:              The target directory, which Kibosh exports a view of.
        :param mirror:              The mirror directory, where Kibosh injects faults.
        :param persist:             Where the log files and pid files will be created.
        """
        Service.__init__(self, context, num_nodes=0)
        if (len(nodes) == 0):
            raise RuntimeError("You must supply at least one node to run the service on.")
        for node in nodes:
            self.nodes.append(node)

        self.target = target
        self.mirror = mirror
        self.persist = persist

        self.control_path = os.path.join(self.mirror, "kibosh_control")
        self.pidfile_path = os.path.join(self.persist, "pidfile")
        self.stdout_stderr_path = os.path.join(self.persist, "kibosh-stdout-stderr.log")
        self.log_path = os.path.join(self.persist, "kibosh.log")
        self.logs = {
            "kibosh-stdout-stderr.log": {
                "path": self.stdout_stderr_path,
                "collect_default": True},
            "kibosh.log": {
                "path": self.log_path,
                "collect_default": True}
        }
    def __init__(self,
                 context,
                 num_nodes,
                 zk,
                 security_protocol=SecurityConfig.PLAINTEXT,
                 interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
                 topics=None,
                 version=TRUNK,
                 quota_config=None,
                 jmx_object_names=None,
                 jmx_attributes=[]):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
        self.log_level = "DEBUG"

        self.zk = zk
        self.quota_config = quota_config

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.sasl_mechanism = sasl_mechanism
        self.topics = topics

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(
                **{config_property.BROKER_ID: self.idx(node)})
Exemple #4
0
    def __init__(self,
                 context,
                 agent_nodes=None,
                 client_services=None,
                 agent_port=DEFAULT_AGENT_PORT,
                 coordinator_port=DEFAULT_COORDINATOR_PORT):
        """
        Create a Trogdor service.

        :param context:             The test context.
        :param agent_nodes:         The nodes to run the agents on.
        :param client_services:     Services whose nodes we should run agents on.
        :param agent_port:          The port to use for the trogdor_agent daemons.
        :param coordinator_port:    The port to use for the trogdor_coordinator daemons.
        """
        Service.__init__(self, context, num_nodes=1)
        self.coordinator_node = self.nodes[0]
        if client_services is not None:
            for client_service in client_services:
                for node in client_service.nodes:
                    self.nodes.append(node)
        if agent_nodes is not None:
            for agent_node in agent_nodes:
                self.nodes.append(agent_node)
        if (len(self.nodes) == 1):
            raise RuntimeError(
                "You must supply at least one agent node to run the service on."
            )
        self.agent_port = agent_port
        self.coordinator_port = coordinator_port
Exemple #5
0
    def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
                 authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
                 jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
                          root=KafkaService.PERSISTENT_ROOT)

        self.zk = zk

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.client_sasl_mechanism = client_sasl_mechanism
        self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
        self.topics = topics
        self.minikdc = None
        self.authorizer_class_name = authorizer_class_name
        self.zk_set_acl = False
        if server_prop_overides is None:
            self.server_prop_overides = []
        else:
            self.server_prop_overides = server_prop_overides
        self.log_level = "DEBUG"
        self.zk_chroot = zk_chroot

        #
        # In a heavily loaded and not very fast machine, it is
        # sometimes necessary to give more time for the zk client
        # to have its session established, especially if the client
        # is authenticating and waiting for the SaslAuthenticated
        # in addition to the SyncConnected event.
        #
        # The default value for zookeeper.connect.timeout.ms is
        # 2 seconds and here we increase it to 5 seconds, but
        # it can be overridden by setting the corresponding parameter
        # for this constructor.
        self.zk_connect_timeout = zk_connect_timeout

        # Also allow the session timeout to be provided explicitly,
        # primarily so that test cases can depend on it when waiting
        # e.g. brokers to deregister after a hard kill.
        self.zk_session_timeout = zk_session_timeout

        self.port_mappings = {
            'PLAINTEXT': Port('PLAINTEXT', 9092, False),
            'SSL': Port('SSL', 9093, False),
            'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
            'SASL_SSL': Port('SASL_SSL', 9095, False)
        }

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
Exemple #6
0
    def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
                 authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
                 jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
                          root=KafkaService.PERSISTENT_ROOT)

        self.zk = zk

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.client_sasl_mechanism = client_sasl_mechanism
        self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
        self.topics = topics
        self.minikdc = None
        self.authorizer_class_name = authorizer_class_name
        self.zk_set_acl = False
        if server_prop_overides is None:
            self.server_prop_overides = []
        else:
            self.server_prop_overides = server_prop_overides
        self.log_level = "DEBUG"
        self.zk_chroot = zk_chroot

        #
        # In a heavily loaded and not very fast machine, it is
        # sometimes necessary to give more time for the zk client
        # to have its session established, especially if the client
        # is authenticating and waiting for the SaslAuthenticated
        # in addition to the SyncConnected event.
        #
        # The default value for zookeeper.connect.timeout.ms is
        # 2 seconds and here we increase it to 5 seconds, but
        # it can be overridden by setting the corresponding parameter
        # for this constructor.
        self.zk_connect_timeout = zk_connect_timeout

        # Also allow the session timeout to be provided explicitly,
        # primarily so that test cases can depend on it when waiting
        # e.g. brokers to deregister after a hard kill.
        self.zk_session_timeout = zk_session_timeout

        self.port_mappings = {
            'PLAINTEXT': Port('PLAINTEXT', 9092, False),
            'SSL': Port('SSL', 9093, False),
            'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
            'SASL_SSL': Port('SASL_SSL', 9095, False)
        }

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
Exemple #7
0
 def __init__(self, test_context, kafka, streams_class_name, user_test_args1, user_test_args2=None, user_test_args3=None, user_test_args4=None):
     Service.__init__(self, test_context, num_nodes=1)
     self.kafka = kafka
     self.args = {'streams_class_name': streams_class_name,
                  'user_test_args1': user_test_args1,
                  'user_test_args2': user_test_args2,
                  'user_test_args3': user_test_args3,
                  'user_test_args4': user_test_args4}
     self.log_level = "DEBUG"
Exemple #8
0
    def __init__(self,
                 context,
                 num_nodes,
                 zk,
                 security_protocol=SecurityConfig.PLAINTEXT,
                 interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
                 authorizer_class_name=None,
                 topics=None,
                 version=TRUNK,
                 quota_config=None,
                 jmx_object_names=None,
                 jmx_attributes=[],
                 zk_connect_timeout=5000):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)

        self.zk = zk
        self.quota_config = quota_config

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.sasl_mechanism = sasl_mechanism
        self.topics = topics
        self.minikdc = None
        self.authorizer_class_name = authorizer_class_name
        #
        # In a heavily loaded and not very fast machine, it is
        # sometimes necessary to give more time for the zk client
        # to have its session established, especially if the client
        # is authenticating and waiting for the SaslAuthenticated
        # in addition to the SyncConnected event.
        #
        # The defaut value for zookeeper.connect.timeout.ms is
        # 2 seconds and here we increase it to 5 seconds, but
        # it can be overriden by setting the corresponding parameter
        # for this constructor.
        self.zk_connect_timeout = zk_connect_timeout

        self.port_mappings = {
            'PLAINTEXT': Port('PLAINTEXT', 9092, False),
            'SSL': Port('SSL', 9093, False),
            'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
            'SASL_SSL': Port('SASL_SSL', 9095, False)
        }

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(
                **{config_property.BROKER_ID: self.idx(node)})
    def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
                 authorizer_class_name=None, topics=None, version=TRUNK, quota_config=None, jmx_object_names=None,
                 jmx_attributes=[], zk_connect_timeout=5000):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)

        self.zk = zk
        self.quota_config = quota_config

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.client_sasl_mechanism = client_sasl_mechanism
        self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
        self.topics = topics
        self.minikdc = None
        self.authorizer_class_name = authorizer_class_name
        #
        # In a heavily loaded and not very fast machine, it is
        # sometimes necessary to give more time for the zk client
        # to have its session established, especially if the client
        # is authenticating and waiting for the SaslAuthenticated
        # in addition to the SyncConnected event.
        #
        # The defaut value for zookeeper.connect.timeout.ms is
        # 2 seconds and here we increase it to 5 seconds, but
        # it can be overriden by setting the corresponding parameter
        # for this constructor.
        self.zk_connect_timeout = zk_connect_timeout

        self.port_mappings = {
            'PLAINTEXT': Port('PLAINTEXT', 9092, False),
            'SSL': Port('SSL', 9093, False),
            'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
            'SASL_SSL': Port('SASL_SSL', 9095, False)
        }

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
Exemple #10
0
    def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
                 sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, topics=None, version=TRUNK, quota_config=None, jmx_object_names=None, jmx_attributes=[]):
        """
        :type context
        :type zk: ZookeeperService
        :type topics: dict
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
        self.log_level = "DEBUG"

        self.zk = zk
        self.quota_config = quota_config

        self.security_protocol = security_protocol
        self.interbroker_security_protocol = interbroker_security_protocol
        self.sasl_mechanism = sasl_mechanism
        self.topics = topics

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
Exemple #11
0
    def __init__(self,
                 context,
                 agent_nodes,
                 agent_port=DEFAULT_AGENT_PORT,
                 coordinator_port=DEFAULT_COORDINATOR_PORT):
        """
        Create a Trogdor service.

        :param context:             The test context.
        :param agent_nodes:         The nodes to run the agents on.
        :param agent_port:          The port to use for the trogdor_agent daemons.
        :param coordinator_port:    The port to use for the trogdor_coordinator daemons.
        """
        Service.__init__(self, context, num_nodes=1)
        self.coordinator_node = self.nodes[0]
        if (len(agent_nodes) == 0):
            raise RuntimeError(
                "You must supply at least one node to run the service on.")
        for agent_node in agent_nodes:
            self.nodes.append(agent_node)
        self.agent_port = agent_port
        self.coordinator_port = coordinator_port
Exemple #12
0
    def __init__(
            self,
            context,
            num_nodes,
            zk,
            security_protocol=SecurityConfig.PLAINTEXT,
            interbroker_security_protocol=SecurityConfig.PLAINTEXT,
            client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
            interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
            authorizer_class_name=None,
            topics=None,
            version=DEV_BRANCH,
            jmx_object_names=None,
            jmx_attributes=None,
            zk_connect_timeout=5000,
            zk_session_timeout=6000,
            server_prop_overides=None,
            zk_chroot=None,
            listener_security_config=ListenerSecurityConfig(),
            per_node_server_prop_overrides={}):
        """
        :param context: test context
        :param ZookeeperService zk:
        :param dict topics: which topics to create automatically
        :param str security_protocol: security protocol for clients to use
        :param str interbroker_security_protocol: security protocol to use for broker-to-broker communication
        :param str client_sasl_mechanism: sasl mechanism for clients to use
        :param str interbroker_sasl_mechanism: sasl mechanism to use for broker-to-broker communication
        :param str authorizer_class_name: which authorizer class to use
        :param str version: which kafka version to use. Defaults to "dev" branch
        :param jmx_object_names:
        :param jmx_attributes:
        :param int zk_connect_timeout:
        :param int zk_session_timeout:
        :param dict server_prop_overides: overrides for kafka.properties file
        :param zk_chroot:
        :param ListenerSecurityConfig listener_security_config: listener config to use
        """
        Service.__init__(self, context, num_nodes)
        JmxMixin.__init__(self,
                          num_nodes=num_nodes,
                          jmx_object_names=jmx_object_names,
                          jmx_attributes=(jmx_attributes or []),
                          root=KafkaService.PERSISTENT_ROOT)

        self.zk = zk

        self.security_protocol = security_protocol
        self.client_sasl_mechanism = client_sasl_mechanism
        self.topics = topics
        self.minikdc = None
        self.authorizer_class_name = authorizer_class_name
        self.zk_set_acl = False
        if server_prop_overides is None:
            self.server_prop_overides = []
        else:
            self.server_prop_overides = server_prop_overides
        if per_node_server_prop_overrides is None:
            self.per_node_server_prop_overrides = {}
        else:
            self.per_node_server_prop_overrides = per_node_server_prop_overrides
        self.log_level = "DEBUG"
        self.zk_chroot = zk_chroot
        self.listener_security_config = listener_security_config

        #
        # In a heavily loaded and not very fast machine, it is
        # sometimes necessary to give more time for the zk client
        # to have its session established, especially if the client
        # is authenticating and waiting for the SaslAuthenticated
        # in addition to the SyncConnected event.
        #
        # The default value for zookeeper.connect.timeout.ms is
        # 2 seconds and here we increase it to 5 seconds, but
        # it can be overridden by setting the corresponding parameter
        # for this constructor.
        self.zk_connect_timeout = zk_connect_timeout

        # Also allow the session timeout to be provided explicitly,
        # primarily so that test cases can depend on it when waiting
        # e.g. brokers to deregister after a hard kill.
        self.zk_session_timeout = zk_session_timeout

        self.port_mappings = {
            'PLAINTEXT':
            KafkaListener('PLAINTEXT', 9092, 'PLAINTEXT', False),
            'SSL':
            KafkaListener('SSL', 9093, 'SSL', False),
            'SASL_PLAINTEXT':
            KafkaListener('SASL_PLAINTEXT', 9094, 'SASL_PLAINTEXT', False),
            'SASL_SSL':
            KafkaListener('SASL_SSL', 9095, 'SASL_SSL', False),
            KafkaService.INTERBROKER_LISTENER_NAME:
            KafkaListener(KafkaService.INTERBROKER_LISTENER_NAME, 9099, None,
                          False)
        }

        self.interbroker_listener = None
        self.setup_interbroker_listener(
            interbroker_security_protocol,
            self.listener_security_config.use_separate_interbroker_listener)
        self.interbroker_sasl_mechanism = interbroker_sasl_mechanism

        for node in self.nodes:
            node.version = version
            node.config = KafkaConfig(
                **{config_property.BROKER_ID: self.idx(node)})
Exemple #13
0
 def __init__(self, context, kafka):
     Service.__init__(self, context, num_nodes=1)
     self.bootstrap_servers = kafka.bootstrap_servers(validate=False)
     self.consumer_node = self.nodes[0].account.hostname
Exemple #14
0
 def __init__(self, context, kafka):
     Service.__init__(self, context, num_nodes=1)
     self.bootstrap_servers = kafka.bootstrap_servers(validate=False)
     self.producer_node = self.nodes[0].account.hostname