Exemplo n.º 1
0
 def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None,
              host=None, port=None,
              transport='PLAINTEXT', replicas=1, partitions=2):
     if zk_chroot is None:
         zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
     if "KAFKA_URI" in os.environ:
         parse = urlparse(os.environ["KAFKA_URI"])
         (host, port) = (parse.hostname, parse.port)
         fixture = ExternalService(host, port)
     else:
         if port is None:
             port = get_open_port()
         # force IPv6 here because of a confusing point:
         #
         #  - if the string "localhost" is passed, Kafka will *only* bind to the IPv4 address of localhost
         #    (127.0.0.1); however, kafka-python will attempt to connect on ::1 and fail
         #
         #  - if the address literal 127.0.0.1 is passed, the metadata request during bootstrap will return
         #    the name "localhost" and we'll go back to the first case. This is odd!
         #
         # Ideally, Kafka would bind to all loopback addresses when we tell it to listen on "localhost" the
         # way it makes an IPv6 socket bound to both 0.0.0.0/0 and ::/0 when we tell it to bind to "" (that is
         # to say, when we make a listener of PLAINTEXT://:port.
         #
         # Note that even though we specify the bind host in bracket notation, Kafka responds to the bootstrap
         # metadata request without square brackets later.
         if host is None:
             host = "[::1]"
         fixture = KafkaFixture(host, port, broker_id,
                                zk_host, zk_port, zk_chroot,
                                transport=transport,
                                replicas=replicas, partitions=partitions)
         fixture.open()
     return fixture
Exemplo n.º 2
0
    def instance(cls):
        if "ZOOKEEPER_URI" in os.environ:
            parse = urlparse(os.environ["ZOOKEEPER_URI"])
            (host, port) = (parse.hostname, parse.port)
            fixture = ExternalService(host, port)
        else:
            (host, port) = ("127.0.0.1", get_open_port())
            fixture = cls(host, port)

        fixture.open()
        return fixture
Exemplo n.º 3
0
    def instance(cls):
        if "ZOOKEEPER_URI" in os.environ:
            parse = urlparse(os.environ["ZOOKEEPER_URI"])
            (host, port) = (parse.hostname, parse.port)
            fixture = ExternalService(host, port)
        else:
            (host, port) = ("127.0.0.1", get_open_port())
            fixture = cls(host, port)

        fixture.open()
        return fixture
Exemplo n.º 4
0
 def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None, replicas=1, partitions=2):
     if zk_chroot is None:
         zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
     if "KAFKA_URI" in os.environ:
         parse = urlparse(os.environ["KAFKA_URI"])
         (host, port) = (parse.hostname, parse.port)
         fixture = ExternalService(host, port)
     else:
         (host, port) = ("127.0.0.1", get_open_port())
         fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port, zk_chroot, replicas, partitions)
         fixture.open()
     return fixture
Exemplo n.º 5
0
 def instance(cls,
              broker_id,
              zk_host,
              zk_port,
              zk_chroot=None,
              host=None,
              port=None,
              transport='PLAINTEXT',
              replicas=1,
              partitions=2):
     if zk_chroot is None:
         zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
     if "KAFKA_URI" in os.environ:
         parse = urlparse(os.environ["KAFKA_URI"])
         (host, port) = (parse.hostname, parse.port)
         fixture = ExternalService(host, port)
     else:
         if port is None:
             port = get_open_port()
         # force IPv6 here because of a confusing point:
         #
         #  - if the string "localhost" is passed, Kafka will *only* bind to the IPv4 address of localhost
         #    (127.0.0.1); however, kafka-python will attempt to connect on ::1 and fail
         #
         #  - if the address literal 127.0.0.1 is passed, the metadata request during bootstrap will return
         #    the name "localhost" and we'll go back to the first case. This is odd!
         #
         # Ideally, Kafka would bind to all loopback addresses when we tell it to listen on "localhost" the
         # way it makes an IPv6 socket bound to both 0.0.0.0/0 and ::/0 when we tell it to bind to "" (that is
         # to say, when we make a listener of PLAINTEXT://:port.
         #
         # Note that even though we specify the bind host in bracket notation, Kafka responds to the bootstrap
         # metadata request without square brackets later.
         if host is None:
             host = "[::1]"
         fixture = KafkaFixture(host,
                                port,
                                broker_id,
                                zk_host,
                                zk_port,
                                zk_chroot,
                                transport=transport,
                                replicas=replicas,
                                partitions=partitions)
         fixture.open()
     return fixture
Exemplo n.º 6
0
 def instance(cls,
              broker_id,
              zk_host,
              zk_port,
              zk_chroot=None,
              replicas=1,
              partitions=2):
     if zk_chroot is None:
         zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_")
     if "KAFKA_URI" in os.environ:
         parse = urlparse(os.environ["KAFKA_URI"])
         (host, port) = (parse.hostname, parse.port)
         fixture = ExternalService(host, port)
     else:
         (host, port) = ("127.0.0.1", get_open_port())
         fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port,
                                zk_chroot, replicas, partitions)
         fixture.open()
     return fixture
Exemplo n.º 7
0
    def open(self):
        self.tmp_dir = tempfile.mkdtemp()
        self.out("Running local instance...")
        log.info("  host    = %s", self.host)
        log.info("  port    = %s", self.port or '(auto)')
        log.info("  tmp_dir = %s", self.tmp_dir)

        # Configure Zookeeper child process
        template = self.test_resource("zookeeper.properties")
        properties = os.path.join(self.tmp_dir, "zookeeper.properties")
        args = self.kafka_run_class_args(
            "org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
        env = self.kafka_run_class_env()

        # Party!
        timeout = 5
        max_timeout = 120
        backoff = 1
        end_at = time.time() + max_timeout
        tries = 1
        auto_port = (self.port is None)
        while time.time() < end_at:
            if auto_port:
                self.port = get_open_port()
            self.out('Attempting to start on port %d (try #%d)' %
                     (self.port, tries))
            self.render_template(template, properties, vars(self))
            self.child = SpawnedService(args, env)
            self.child.start()
            timeout = min(timeout, max(end_at - time.time(), 0))
            if self.child.wait_for(r"binding to port", timeout=timeout):
                break
            self.child.dump_logs()
            self.child.stop()
            timeout *= 2
            time.sleep(backoff)
            tries += 1
            backoff += 1
        else:
            raise RuntimeError('Failed to start Zookeeper before max_timeout')
        self.out("Done!")
        atexit.register(self.close)
Exemplo n.º 8
0
    def open(self):
        self.tmp_dir = tempfile.mkdtemp()
        self.out("Running local instance...")
        log.info("  host    = %s", self.host)
        log.info("  port    = %s", self.port or '(auto)')
        log.info("  tmp_dir = %s", self.tmp_dir)

        # Configure Zookeeper child process
        template = self.test_resource("zookeeper.properties")
        properties = os.path.join(self.tmp_dir, "zookeeper.properties")
        args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
        env = self.kafka_run_class_env()

        # Party!
        timeout = 5
        max_timeout = 30
        backoff = 1
        end_at = time.time() + max_timeout
        tries = 1
        auto_port = (self.port is None)
        while time.time() < end_at:
            if auto_port:
                self.port = get_open_port()
            self.out('Attempting to start on port %d (try #%d)' % (self.port, tries))
            self.render_template(template, properties, vars(self))
            self.child = SpawnedService(args, env)
            self.child.start()
            timeout = min(timeout, max(end_at - time.time(), 0))
            if self.child.wait_for(r"binding to port", timeout=timeout):
                break
            self.child.dump_logs()
            self.child.stop()
            timeout *= 2
            time.sleep(backoff)
            tries += 1
        else:
            raise Exception('Failed to start Zookeeper before max_timeout')
        self.out("Done!")
        atexit.register(self.close)
Exemplo n.º 9
0
    def open(self):
        if self.running:
            self.out("Instance already running")
            return

        self.tmp_dir = tempfile.mkdtemp()
        self.out("Running local instance...")
        log.info("  host       = %s", self.host)
        log.info("  port       = %s", self.port or '(auto)')
        log.info("  transport  = %s", self.transport)
        log.info("  broker_id  = %s", self.broker_id)
        log.info("  zk_host    = %s", self.zk_host)
        log.info("  zk_port    = %s", self.zk_port)
        log.info("  zk_chroot  = %s", self.zk_chroot)
        log.info("  replicas   = %s", self.replicas)
        log.info("  partitions = %s", self.partitions)
        log.info("  tmp_dir    = %s", self.tmp_dir)

        # Create directories
        os.mkdir(os.path.join(self.tmp_dir, "logs"))
        os.mkdir(os.path.join(self.tmp_dir, "data"))

        self.out("Creating Zookeeper chroot node...")
        args = self.kafka_run_class_args(
            "org.apache.zookeeper.ZooKeeperMain", "-server",
            "%s:%d" % (self.zk_host, self.zk_port), "create",
            "/%s" % self.zk_chroot, "kafka-python")
        env = self.kafka_run_class_env()
        proc = subprocess.Popen(args,
                                env=env,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)

        if proc.wait() != 0:
            self.out("Failed to create Zookeeper chroot node")
            self.out(proc.stdout.read())
            self.out(proc.stderr.read())
            raise RuntimeError("Failed to create Zookeeper chroot node")
        self.out("Done!")

        # Configure Kafka child process
        properties = os.path.join(self.tmp_dir, "kafka.properties")
        template = self.test_resource("kafka.properties")
        args = self.kafka_run_class_args("kafka.Kafka", properties)
        env = self.kafka_run_class_env()

        timeout = 5
        max_timeout = 30
        backoff = 1
        end_at = time.time() + max_timeout
        tries = 1
        auto_port = (self.port is None)
        while time.time() < end_at:
            # We have had problems with port conflicts on travis
            # so we will try a different port on each retry
            # unless the fixture was passed a specific port
            if auto_port:
                self.port = get_open_port()
            self.out('Attempting to start on port %d (try #%d)' %
                     (self.port, tries))
            self.render_template(template, properties, vars(self))
            self.child = SpawnedService(args, env)
            self.child.start()
            timeout = min(timeout, max(end_at - time.time(), 0))
            if self.child.wait_for(r"\[Kafka Server %d\], Started" %
                                   self.broker_id,
                                   timeout=timeout):
                break
            self.child.dump_logs()
            self.child.stop()
            timeout *= 2
            time.sleep(backoff)
            tries += 1
        else:
            raise Exception('Failed to start KafkaInstance before max_timeout')
        self.out("Done!")
        self.running = True
        atexit.register(self.close)
Exemplo n.º 10
0
    def open(self):
        if self.running:
            self.out("Instance already running")
            return

        self.tmp_dir = tempfile.mkdtemp()
        self.out("Running local instance...")
        log.info("  host       = %s", self.host)
        log.info("  port       = %s", self.port or '(auto)')
        log.info("  transport  = %s", self.transport)
        log.info("  broker_id  = %s", self.broker_id)
        log.info("  zk_host    = %s", self.zk_host)
        log.info("  zk_port    = %s", self.zk_port)
        log.info("  zk_chroot  = %s", self.zk_chroot)
        log.info("  replicas   = %s", self.replicas)
        log.info("  partitions = %s", self.partitions)
        log.info("  tmp_dir    = %s", self.tmp_dir)

        # Create directories
        os.mkdir(os.path.join(self.tmp_dir, "logs"))
        os.mkdir(os.path.join(self.tmp_dir, "data"))

        self.out("Creating Zookeeper chroot node...")
        args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain",
                                         "-server", "%s:%d" % (self.zk_host, self.zk_port),
                                         "create",
                                         "/%s" % self.zk_chroot,
                                         "kafka-python")
        env = self.kafka_run_class_env()
        proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        if proc.wait() != 0:
            self.out("Failed to create Zookeeper chroot node")
            self.out(proc.stdout.read())
            self.out(proc.stderr.read())
            raise RuntimeError("Failed to create Zookeeper chroot node")
        self.out("Done!")

        # Configure Kafka child process
        properties = os.path.join(self.tmp_dir, "kafka.properties")
        template = self.test_resource("kafka.properties")
        args = self.kafka_run_class_args("kafka.Kafka", properties)
        env = self.kafka_run_class_env()

        timeout = 5
        max_timeout = 30
        backoff = 1
        end_at = time.time() + max_timeout
        tries = 1
        auto_port = (self.port is None)
        while time.time() < end_at:
            # We have had problems with port conflicts on travis
            # so we will try a different port on each retry
            # unless the fixture was passed a specific port
            if auto_port:
                self.port = get_open_port()
            self.out('Attempting to start on port %d (try #%d)' % (self.port, tries))
            self.render_template(template, properties, vars(self))
            self.child = SpawnedService(args, env)
            self.child.start()
            timeout = min(timeout, max(end_at - time.time(), 0))
            if self.child.wait_for(r"\[Kafka Server %d\], Started" %
                                   self.broker_id, timeout=timeout):
                break
            self.child.dump_logs()
            self.child.stop()
            timeout *= 2
            time.sleep(backoff)
            tries += 1
        else:
            raise Exception('Failed to start KafkaInstance before max_timeout')
        self.out("Done!")
        self.running = True
        atexit.register(self.close)