Exemple #1
0
    def __init__(self,
                 cluster,
                 topic,
                 group,
                 backoff_increment=1,
                 connect_retries=4,
                 fetch_size=307200,
                 offset_reset='nearest',
                 rebalance_retries=4,
                 ):
        """
        For more info see: samsa.topics.Topic.subscribe

        :param cluster:
        :type cluster: :class:`samsa.cluster.Cluster`.
        :param topic: The topic to consume messages from.
        :type topic: :class:`samsa.topics.Topic`.
        :param group: The consumer group to join.
        :param backoff_increment: How fast to incrementally backoff when a
                                  partition has no messages to read.
        :param connect_retries: Retries before giving up on connecting
        :param fetch_size: Default fetch size (in bytes) to get from Kafka
        :param offset_reset: Where to reset when an OffsetOutOfRange happens
        :param rebalance_retries: Retries before giving up on rebalance
        :rtype: :class:`samsa.consumer.consumer.Consumer`
        """
        self.connect_retries = connect_retries
        self.rebalance_retries = rebalance_retries

        self.cluster = cluster
        self.topic = topic
        self.group = group
        self.id = "%s:%s" % (socket.gethostname(), uuid4())

        self.id_path = '/consumers/%s/ids' % self.group

        self.partition_owner_registry = PartitionOwnerRegistry(
            self, cluster, topic, group, backoff_increment=backoff_increment,
            fetch_size=fetch_size, offset_reset=offset_reset)
        self.partitions = self.partition_owner_registry.get()

        # Keep track of the partition being read and how much has been read
        self._current_partition = None
        self._current_read_ct = 0

        # Watches
        # TODO: This is a *ton* of watches, some of which are duplicated
        #       elsewhere. This should be cleaned up and all watches put
        #       in a single zookeeper connector, like in the Scala driver.
        self._broker_watcher = None
        self._consumer_watcher = None
        self._topic_watcher = None
        self._topics_watcher = None
        self._rebalancing = True # To stop rebalance while setting watches

        self._add_self()
    def setUp(self, bm, *args):
        super(TestPartitionOwnerRegistry, self).setUp()
        self.c = Cluster(self.client)
        broker = mock.Mock()
        broker.id = 1
        self.c.brokers.__getitem__.return_value = broker

        self.consumer = mock.Mock()
        self.consumer.id = '1234'
        self.topic = mock.Mock()
        self.topic.name = 'topic'

        self.por = PartitionOwnerRegistry(self.consumer, self.c, self.topic,
                                          'group')

        # Create 5 partitions with on the same topic and broker
        self.partitions = []
        self.message_set_queue = Queue.Queue()
        for i in xrange(5):
            self.partitions.append(
                OwnedPartition(Partition(self.c, self.topic, broker, i),
                               'group', self.message_set_queue))