def test_wait_on_event_uses_no_timeout_on_py3(self, mock_six): mock_six.PY2 = False mock_event = Mock() events.wait_on_event(mock_event) mock_event.wait.assert_called_once_with()
def test_wait_on_event_set_event_is_noop_on_py2(self, mock_six, mock_sys): mock_six.PY2 = True mock_event = Mock() mock_event.is_set.return_value = True events.wait_on_event(mock_event) assert mock_event.wait.called is False
def test_wait_on_event_uses_maxint_on_py2(self, mock_six, mock_sys): mock_six.PY2 = True mock_event = Mock() mock_event.is_set.return_value = False def set_event(*args): mock_event.is_set.return_value = True # set the event when we wait, otherwise the while loop would go forever mock_event.wait.side_effect = set_event events.wait_on_event(mock_event) mock_event.wait.assert_called_once_with(mock_sys.maxint)
def remove_partitions(self, old_partitions): """ Ensures that the ``SharedSet`` does *not* contain the given partitions. The ``partitions`` argument should be a dictionary keyed on topic names who's values are lists of associated partition IDs. """ log.info("Attempting to remove %d partitions from consumer group '%s'", len(old_partitions), self.group_name) wait_on_event(self.connected) self.shared_set.remove_items( set([ ":".join([topic, partition_id]) for topic, partition_id in six.iteritems(old_partitions) ]))
def remove_partitions(self, old_partitions): """ Ensures that the ``SharedSet`` does *not* contain the given partitions. The ``partitions`` argument should be a dictionary keyed on topic names who's values are lists of associated partition IDs. """ log.info( "Attempting to remove %d partitions from consumer group '%s'", len(old_partitions), self.group_name ) wait_on_event(self.connected) self.shared_set.remove_items(set([ ":".join([topic, partition_id]) for topic, partition_id in six.iteritems(old_partitions) ]))
def add_partitions(self, partitions): """ Ensures that the ``SharedSet`` contains the given partitions. The ``partitions`` argument should be a dictionary keyed on topic names who's values are lists of associated partition IDs. """ new_partitions = set() for topic, partition_ids in six.iteritems(partitions): new_partitions.update( set([ ":".join([topic, str(partition_id)]) for partition_id in partition_ids ])) log.info("Attempting to add %d partitions to consumer group '%s'", len(new_partitions), self.group_name) wait_on_event(self.connected) self.shared_set.add_items(new_partitions)
def start(self, seed_partitions): """ Connects to zookeeper and collects member and partition data. Leverages the `create_attempt()` and ``wait_on_event()`` helper functions in order to bridge the gap between threaded async and tornado async. Returns a ``Future`` instance once done so that coroutine methods may yield to it. """ log.info("Starting partitioner for group '%s'", self.group_name) f = concurrent.Future() attempt = create_attempter(f) attempt(self.connect) wait_on_event(self.connected) attempt(self.party.start) attempt(self.shared_set.start) attempt(self.party.join) attempt(self.add_partitions, seed_partitions) if f.done(): return f wait_on_event(self.members_collected) wait_on_event(self.partitions_collected) f.set_result(None) return f
def add_partitions(self, partitions): """ Ensures that the ``SharedSet`` contains the given partitions. The ``partitions`` argument should be a dictionary keyed on topic names who's values are lists of associated partition IDs. """ new_partitions = set() for topic, partition_ids in six.iteritems(partitions): new_partitions.update(set([ ":".join([topic, str(partition_id)]) for partition_id in partition_ids ])) log.info( "Attempting to add %d partitions to consumer group '%s'", len(new_partitions), self.group_name ) wait_on_event(self.connected) self.shared_set.add_items(new_partitions)
def test_wait_on_event_with_timeout(self): mock_event = Mock() events.wait_on_event(mock_event, timeout=60) mock_event.wait.assert_called_once_with(60)