Ejemplo n.º 1
0
    def prop_file(self):
        properties = {STATE_DIR: self.PERSISTENT_ROOT}
        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 2
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['add.operations'] = self.ADD_ADDITIONAL_OPS

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 3
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['add.operations'] = self.ADD_ADDITIONAL_OPS

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 4
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM
        if self.UPGRADE_TO == "future_version":
            properties['test.future.metadata'] = "any_value"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 5
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
            streams_property.NUM_THREADS: self.NUM_THREADS
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 6
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
                      streams_property.NUM_THREADS: self.NUM_THREADS,
                      consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
                      consumer_property.SESSION_TIMEOUT_MS: 60000}

        properties['input.topic'] = self.INPUT_TOPIC

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 7
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        properties['topology.optimization'] = self.OPTIMIZED_CONFIG
        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['reduce.topic'] = self.REDUCE_TOPIC
        properties['join.topic'] = self.JOIN_TOPIC

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 8
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        properties['topology.optimization'] = self.OPTIMIZED_CONFIG
        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['reduce.topic'] = self.REDUCE_TOPIC
        properties['join.topic'] = self.JOIN_TOPIC

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 9
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE
        }

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 10
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
                      streams_property.NUM_THREADS: self.NUM_THREADS,
                      consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
                      consumer_property.SESSION_TIMEOUT_MS: 60000}

        properties['input.topic'] = self.INPUT_TOPIC
        # TODO KIP-441: consider rewriting the test for HighAvailabilityTaskAssignor
        properties['internal.task.assignor.class'] = "org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 11
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            # the old broker (< 2.4) does not support configuration replication.factor=-1
            "replication.factor": 1
        }

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 12
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
            "acceptable.recovery.lag":
            "9223372036854775807",  # enable a one-shot assignment
            "session.timeout.ms":
            "10000"  # set back to 10s for tests. See KIP-735
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 13
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            # the old broker (< 2.4) does not support configuration replication.factor=-1
            "replication.factor": 1,
            "acceptable.recovery.lag":
            "9223372036854775807",  # enable a one-shot assignment
            "session.timeout.ms":
            "10000"  # set back to 10s for tests. See KIP-735
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 14
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.NUM_THREADS: self.NUM_THREADS,
            consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
            consumer_property.SESSION_TIMEOUT_MS:
            60000,  # set longer session timeout for static member test
            'input.topic': self.INPUT_TOPIC,
            "acceptable.recovery.lag":
            "9223372036854775807"  # enable a one-shot assignment
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 15
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            'input.topic': self.INPUT_TOPIC,
            'aggregation.topic': self.AGGREGATION_TOPIC,
            'add.operations': self.ADD_ADDITIONAL_OPS,
            "acceptable.recovery.lag":
            "9223372036854775807",  # enable a one-shot assignment
            "session.timeout.ms":
            "10000"  # set back to 10s for tests. See KIP-735
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 16
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        properties['topology.optimization'] = self.OPTIMIZED_CONFIG
        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['reduce.topic'] = self.REDUCE_TOPIC
        properties['join.topic'] = self.JOIN_TOPIC

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 17
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()
        }

        properties['input.topic'] = self.INPUT_TOPIC
        properties['aggregation.topic'] = self.AGGREGATION_TOPIC
        properties['add.operations'] = self.ADD_ADDITIONAL_OPS

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 18
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.NUM_THREADS: self.NUM_THREADS,
            consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
            consumer_property.SESSION_TIMEOUT_MS: 60000
        }

        properties['input.topic'] = self.INPUT_TOPIC

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 19
0
    def prop_file(self):
        properties = self.extra_properties.copy()
        properties[streams_property.STATE_DIR] = self.PERSISTENT_ROOT
        properties[
            streams_property.KAFKA_SERVERS] = self.kafka.bootstrap_servers()

        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM
        if self.UPGRADE_TO == "future_version":
            properties['test.future.metadata'] = "any_value"

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 20
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            'topology.optimization': self.OPTIMIZED_CONFIG,
            'input.topic': self.INPUT_TOPIC,
            'aggregation.topic': self.AGGREGATION_TOPIC,
            'reduce.topic': self.REDUCE_TOPIC,
            'join.topic': self.JOIN_TOPIC,
            "acceptable.recovery.lag":
            "9223372036854775807",  # enable a one-shot assignment
            "session.timeout.ms":
            "10000"  # set back to 10s for tests. See KIP-735
        }

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 21
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
                      streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
                      streams_property.NUM_THREADS: self.NUM_THREADS,
                      "replication.factor": self.REPLICATION_FACTOR,
                      "num.standby.replicas": 2,
                      "buffered.records.per.partition": 100,
                      "commit.interval.ms": 1000,
                      "auto.offset.reset": "earliest",
                      "acks": "all"}

        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 22
0
    def prop_file(self):
        properties = self.extra_properties.copy()
        properties[streams_property.STATE_DIR] = self.PERSISTENT_ROOT
        properties[
            streams_property.KAFKA_SERVERS] = self.kafka.bootstrap_servers()

        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM
        if (self.UPGRADE_FROM is not None and KafkaVersion(self.UPGRADE_FROM).supports_fk_joins()) or \
            (self.KAFKA_STREAMS_VERSION is not None and KafkaVersion(self.KAFKA_STREAMS_VERSION).supports_fk_joins()):
            properties['test.run_fk_join'] = "true"
        if self.UPGRADE_TO == "future_version":
            properties['test.future.metadata'] = "any_value"

        # Long.MAX_VALUE lets us do the assignment without a warmup
        properties['acceptable.recovery.lag'] = "9223372036854775807"
        properties[
            "session.timeout.ms"] = "10000"  # set back to 10s for tests. See KIP-735

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 23
0
    def prop_file(self):
        properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
                      streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}

        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM
        else:
            try:
                del properties['upgrade.from']
            except KeyError:
                self.logger.info("Key 'upgrade.from' not there, better safe than sorry")

        if self.upgrade_phase is not None:
            properties['upgrade.phase'] = self.upgrade_phase

        properties['source.topic'] = self.SOURCE_TOPIC
        properties['sink.topic'] = self.SINK_TOPIC
        properties['task.delimiter'] = self.TASK_DELIMITER
        properties['report.interval'] = self.REPORT_INTERVAL

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 24
0
    def prop_file(self):
        properties = {
            streams_property.STATE_DIR: self.PERSISTENT_ROOT,
            streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
            streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
            streams_property.NUM_THREADS: self.NUM_THREADS,
            "replication.factor": self.REPLICATION_FACTOR,
            "num.standby.replicas": 2,
            "buffered.records.per.partition": 100,
            "commit.interval.ms": 1000,
            "auto.offset.reset": "earliest",
            "acks": "all",
            "acceptable.recovery.lag":
            "9223372036854775807",  # enable a one-shot assignment
            "session.timeout.ms":
            "10000"  # set back to 10s for tests. See KIP-735
        }

        if self.UPGRADE_FROM is not None:
            properties['upgrade.from'] = self.UPGRADE_FROM

        cfg = KafkaConfig(**properties)
        return cfg.render()
Ejemplo n.º 25
0
 def prop_file(self):
     cfg = KafkaConfig(**{streams_property.STATE_DIR: self.PERSISTENT_ROOT, streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()})
     return cfg.render()
Ejemplo n.º 26
0
 def prop_file(self, node):
     cfg = KafkaConfig(**{STATE_DIR: self.PERSISTENT_ROOT})
     return cfg.render()