def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05

        self.tup_dicts = [{
            'id': 14,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [1, 2, 3]
        }, {
            'id': 15,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [4, 5, 6]
        }, {
            'id': 16,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [7, 8, 9]
        }]
        tups_json = '\nend\n'.join(
            [json.dumps(tup_dict) for tup_dict in self.tup_dicts] + [''])
        self.tups = [
            Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                  tup_dict['task'], tup_dict['tuple'])
            for tup_dict in self.tup_dicts
        ]
        self.bolt = BatchingBolt(input_stream=StringIO(tups_json),
                                 output_stream=BytesIO())
        self.bolt.initialize({}, {})
예제 #2
0
    def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05

        self.tup_dicts = [{'id': 14,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [1, 2, 3]},
                          {'id': 15,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [4, 5, 6]},
                          {'id': 16,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [7, 8, 9]}]
        tups_json = '\nend\n'.join([json.dumps(tup_dict) for tup_dict in
                                    self.tup_dicts] + [''])
        self.tups = [Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                           tup_dict['task'], tup_dict['tuple']) for tup_dict in
                     self.tup_dicts]
        self.bolt = BatchingBolt(
            input_stream=itertools.cycle(tups_json.splitlines(True)),
            output_stream=BytesIO())
        self.bolt.initialize({}, {})
예제 #3
0
 def setUp(self):
     self.ticks_between_batches = 1
     self.tup_dicts = [{
         'id': 14,
         'comp': 'some_spout',
         'stream': 'default',
         'task': 'some_bolt',
         'tuple': [1, 2, 3]
     }, {
         'id': 15,
         'comp': 'some_spout',
         'stream': 'default',
         'task': 'some_bolt',
         'tuple': [4, 5, 6]
     }, {
         'id': None,
         'comp': '__system',
         'stream': '__tick',
         'task': -1,
         'tuple': [1]
     }, {
         'id': 16,
         'comp': 'some_spout',
         'stream': 'default',
         'task': 'some_bolt',
         'tuple': [7, 8, 9]
     }, {
         'id': None,
         'comp': '__system',
         'stream': '__tick',
         'task': -1,
         'tuple': [2]
     }]
     tups_json = '\nend\n'.join(
         [json.dumps(tup_dict) for tup_dict in self.tup_dicts] + [''])
     self.tups = [
         Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
               tup_dict['task'], tup_dict['tuple'])
         for tup_dict in self.tup_dicts
     ]
     self.nontick_tups = [
         tup for tup in self.tups if tup.stream != '__tick'
     ]
     self.bolt = BatchingBolt(input_stream=BytesIO(
         tups_json.encode('utf-8')),
                              output_stream=BytesIO())
     self.bolt.initialize({}, {})
예제 #4
0
    def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05
        self.bolt = BatchingBolt(output_stream=BytesIO())
        self.bolt.initialize({}, {})

        # Mock read_tuple and manually since it all needs to be mocked
        self.tups = [
            Tuple(14, 'some_spout', 'default', 'some_bolt', [1, 2, 3]),
            Tuple(15, 'some_spout', 'default', 'some_bolt', [4, 5, 6]),
            Tuple(16, 'some_spout', 'default', 'some_bolt', [7, 8, 9])
        ]
        self._orig_read_tuple = self.bolt.read_tuple
        tups_cycle = itertools.cycle(self.tups)
        self.bolt.read_tuple = lambda: next(tups_cycle)
예제 #5
0
 def setUp(self):
     self.ticks_between_batches = 1
     self.tup_dicts = [{'id': 14,
                        'comp': 'some_spout',
                        'stream': 'default',
                        'task': 'some_bolt',
                        'tuple': [1, 2, 3]},
                       {'id': 15,
                        'comp': 'some_spout',
                        'stream': 'default',
                        'task': 'some_bolt',
                        'tuple': [4, 5, 6]},
                       {'id': None,
                        'comp': '__system',
                        'stream': '__tick',
                        'task': -1,
                        'tuple': [1]},
                       {'id': 16,
                        'comp': 'some_spout',
                        'stream': 'default',
                        'task': 'some_bolt',
                        'tuple': [7, 8, 9]},
                       {'id': None,
                        'comp': '__system',
                        'stream': '__tick',
                        'task': -1,
                        'tuple': [1]}]
     tups_json = '\nend\n'.join([json.dumps(tup_dict) for tup_dict in
                                 self.tup_dicts] + [''])
     self.tups = [Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                        tup_dict['task'], tup_dict['tuple']) for tup_dict in
                  self.tup_dicts]
     self.nontick_tups = [tup for tup in self.tups if tup.stream != '__tick']
     self.bolt = BatchingBolt(input_stream=StringIO(tups_json),
                              output_stream=BytesIO())
     self.bolt.initialize({}, {})
예제 #6
0
class BatchingBoltTests(unittest.TestCase):
    def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05
        self.bolt = BatchingBolt(output_stream=BytesIO())
        self.bolt.initialize({}, {})

        # Mock read_tuple and manually since it all needs to be mocked
        self.tups = [
            Tuple(14, 'some_spout', 'default', 'some_bolt', [1, 2, 3]),
            Tuple(15, 'some_spout', 'default', 'some_bolt', [4, 5, 6]),
            Tuple(16, 'some_spout', 'default', 'some_bolt', [7, 8, 9])
        ]
        self._orig_read_tuple = self.bolt.read_tuple
        tups_cycle = itertools.cycle(self.tups)
        self.bolt.read_tuple = lambda: next(tups_cycle)

    def tearDown(self):
        # undo the mocking
        BatchingBolt.secs_between_batches = self._orig_secs
        self.bolt.read_tuple = self._orig_read_tuple

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_batching(self, process_batch_mock):
        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called
        time.sleep(0.5)
        process_batch_mock.assert_called_with(self.bolt, None, self.tups[:3])

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_group_key(self, process_batch_mock):
        # Change the group key to even/odd grouping
        self.bolt.group_key = lambda t: sum(t.values) % 2

        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called correctly
        time.sleep(0.5)
        process_batch_mock.assert_has_calls([
            mock.call(self.bolt, 0, [self.tups[0], self.tups[2]]),
            mock.call(self.bolt, 1, [self.tups[1]])
        ],
                                            any_order=True)

    def test_exception_handling(self):
        # Make sure the exception gets from the worker thread to the main
        with self.assertRaises(NotImplementedError):
            self.bolt._run()
            time.sleep(0.5)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack(self, ack_mock):
        # Test auto-ack on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        ack_mock.assert_has_calls([
            mock.call(self.bolt, self.tups[0]),
            mock.call(self.bolt, self.tups[1]),
            mock.call(self.bolt, self.tups[2])
        ],
                                  any_order=True)
        ack_mock.reset_mock()

        # Test auto-ack off
        self.bolt.auto_ack = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(ack_mock.call_args_list, [])

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail(self, fail_mock, worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)

        # All waiting tuples should have failed at this point
        fail_mock.assert_has_calls([
            mock.call(self.bolt, self.tups[0]),
            mock.call(self.bolt, self.tups[1]),
            mock.call(self.bolt, self.tups[2])
        ],
                                   any_order=True)
        self.assertEqual(worker_exception_mock.call_count, 1)
        fail_mock.reset_mock()
        worker_exception_mock.reset_mock()

        # Test auto-fail off
        self.bolt.auto_fail = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(fail_mock.call_args_list, [])
        self.assertListEqual(worker_exception_mock.call_args_list, [])

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_partial(self, fail_mock, process_batch_mock,
                               worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Change the group key just be the sum of values, which makes 3 separate
        # batches
        self.bolt.group_key = lambda t: sum(t.values)
        # Make sure we fail on the second batch
        work = {'status': True}  # to avoid scoping problems

        def work_once(*args):
            if work['status']:
                work['status'] = False
            else:
                raise Exception('borkt')

        process_batch_mock.side_effect = work_once
        # Run the batches
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Only some tuples should have failed at this point. The key is that
        # all un-acked tuples should be failed, even for batches we haven't
        # started processing yet.
        self.assertEqual(fail_mock.call_count, 2)
        self.assertEqual(worker_exception_mock.call_count, 1)
예제 #7
0
class BatchingBoltTests(unittest.TestCase):

    def setUp(self):
        self.ticks_between_batches = 1
        self.tup_dicts = [{'id': 14,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [1, 2, 3]},
                          {'id': 15,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [4, 5, 6]},
                          {'id': None,
                           'comp': '__system',
                           'stream': '__tick',
                           'task': -1,
                           'tuple': [1]},
                          {'id': 16,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [7, 8, 9]},
                          {'id': None,
                           'comp': '__system',
                           'stream': '__tick',
                           'task': -1,
                           'tuple': [1]}]
        tups_json = '\nend\n'.join([json.dumps(tup_dict) for tup_dict in
                                    self.tup_dicts] + [''])
        self.tups = [Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                           tup_dict['task'], tup_dict['tuple']) for tup_dict in
                     self.tup_dicts]
        self.nontick_tups = [tup for tup in self.tups if tup.stream != '__tick']
        self.bolt = BatchingBolt(input_stream=StringIO(tups_json),
                                 output_stream=BytesIO())
        self.bolt.initialize({}, {})

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_batching(self, process_batch_mock):
        # Add a bunch of tuples
        for __ in self.tups:
            self.bolt._run()

        process_batch_mock.assert_called_with(self.bolt, None,
                                              self.nontick_tups)

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_group_key(self, process_batch_mock):
        # Change the group key to even/odd grouping
        self.bolt.group_key = lambda t: sum(t.values) % 2

        # Add a bunch of tuples
        for __ in self.tups:
            self.bolt._run()

        process_batch_mock.assert_has_calls([mock.call(self.bolt, 0,
                                                       [self.nontick_tups[0],
                                                        self.nontick_tups[2]]),
                                             mock.call(self.bolt, 1,
                                                       [self.nontick_tups[1]])],
                                            any_order=True)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_on(self, ack_mock):
        # Test auto-ack on (the default)
        for __ in self.tups:
            self.bolt._run()
        ack_mock.assert_has_calls([mock.call(self.bolt, self.nontick_tups[0]),
                                   mock.call(self.bolt, self.nontick_tups[1]),
                                   mock.call(self.bolt, self.nontick_tups[2])],
                                  any_order=True)
        self.assertEqual(ack_mock.call_count, 3)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_off(self, ack_mock):
        # Test auto-ack off
        self.bolt.auto_ack = False
        for __ in self.tups:
            self.bolt._run()
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(ack_mock.call_args_list, [])
        self.assertEqual(ack_mock.call_count, 0)

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch.object(BatchingBolt, 'raise_exception', new=lambda *a: None)
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_on(self, fail_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail on (the default)
        self.bolt.run()

        # All waiting tuples should have failed at this point
        fail_mock.assert_has_calls([mock.call(self.bolt, self.nontick_tups[0]),
                                    mock.call(self.bolt, self.nontick_tups[1]),
                                    mock.call(self.bolt, self.nontick_tups[2])],
                                   any_order=True)
        self.assertEqual(fail_mock.call_count, 3)
        self.assertEqual(exit_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch.object(BatchingBolt, 'raise_exception', new=lambda *a: None)
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_off(self, fail_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail off
        self.bolt.auto_fail = False
        self.bolt.run()

        # All waiting tuples should have failed at this point
        self.assertEqual(exit_mock.call_count, 1)
        self.assertListEqual(fail_mock.call_args_list, [])

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_partial(self, fail_mock, process_batch_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Change the group key just be the sum of values, which makes 3 separate
        # batches
        self.bolt.group_key = lambda t: sum(t.values)
        # Make sure we fail on the second batch
        work = {'status': True} # to avoid scoping problems
        def work_once(*args):
            if work['status']:
                work['status'] = False
            else:
                raise Exception('borkt')
        process_batch_mock.side_effect = work_once
        # Run the batches
        self.bolt.run()
        # Only some tuples should have failed at this point. The key is that
        # all un-acked tuples should be failed, even for batches we haven't
        # started processing yet.
        self.assertEqual(fail_mock.call_count, 2)
        self.assertEqual(exit_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'send_message', autospec=True)
    def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo', task=-1,
                                             stream='__heartbeat', values=[],
                                             component='__system')
        self.bolt._run()
        send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'process_tick', autospec=True)
    def test_process_tick(self, process_tick_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id=None, task=-1,
                                             component='__system',
                                             stream='__tick', values=[50])
        self.bolt._run()
        process_tick_mock.assert_called_with(self.bolt,
                                             read_tuple_mock.return_value)
예제 #8
0
class BatchingBoltTests(unittest.TestCase):
    def setUp(self):
        self.ticks_between_batches = 1
        self.tup_dicts = [{
            'id': 14,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [1, 2, 3]
        }, {
            'id': 15,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [4, 5, 6]
        }, {
            'id': None,
            'comp': '__system',
            'stream': '__tick',
            'task': -1,
            'tuple': [1]
        }, {
            'id': 16,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [7, 8, 9]
        }, {
            'id': None,
            'comp': '__system',
            'stream': '__tick',
            'task': -1,
            'tuple': [2]
        }]
        tups_json = '\nend\n'.join(
            [json.dumps(tup_dict) for tup_dict in self.tup_dicts] + [''])
        self.tups = [
            Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                  tup_dict['task'], tup_dict['tuple'])
            for tup_dict in self.tup_dicts
        ]
        self.nontick_tups = [
            tup for tup in self.tups if tup.stream != '__tick'
        ]
        self.bolt = BatchingBolt(input_stream=BytesIO(
            tups_json.encode('utf-8')),
                                 output_stream=BytesIO())
        self.bolt.initialize({}, {})

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_batching(self, process_batch_mock):
        # Add a bunch of tuples
        for __ in self.tups:
            self.bolt._run()

        process_batch_mock.assert_called_with(self.bolt, None,
                                              self.nontick_tups)

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_group_key(self, process_batch_mock):
        # Change the group key to even/odd grouping
        self.bolt.group_key = lambda t: sum(t.values) % 2

        # Add a bunch of tuples
        for __ in self.tups:
            self.bolt._run()

        process_batch_mock.assert_has_calls([
            mock.call(self.bolt, 0,
                      [self.nontick_tups[0], self.nontick_tups[2]]),
            mock.call(self.bolt, 1, [self.nontick_tups[1]])
        ],
                                            any_order=True)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_on(self, ack_mock):
        # Test auto-ack on (the default)
        for __ in self.tups:
            self.bolt._run()
        ack_mock.assert_has_calls(
            [mock.call(self.bolt, tup) for tup in self.tups], any_order=True)
        self.assertEqual(ack_mock.call_count, 5)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_off(self, ack_mock):
        # Test auto-ack off
        self.bolt.auto_ack = False
        for __ in self.tups:
            self.bolt._run()
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        ack_mock.assert_has_calls([
            mock.call(self.bolt, tup)
            for tup in self.tups if self.bolt.is_tick(tup)
        ],
                                  any_order=True)
        self.assertEqual(ack_mock.call_count, 2)

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch.object(BatchingBolt, 'raise_exception', new=lambda *a: None)
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_on(self, fail_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail on (the default)
        self.bolt.run()

        # All waiting tuples should have failed at this point
        fail_mock.assert_has_calls([
            mock.call(self.bolt, self.nontick_tups[0]),
            mock.call(self.bolt, self.nontick_tups[1]),
            mock.call(self.bolt, self.nontick_tups[2])
        ],
                                   any_order=True)
        self.assertEqual(fail_mock.call_count, 3)
        self.assertEqual(exit_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch.object(BatchingBolt, 'raise_exception', new=lambda *a: None)
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_off(self, fail_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail off
        self.bolt.auto_fail = False
        self.bolt.run()

        # All waiting tuples should have failed at this point
        self.assertEqual(exit_mock.call_count, 1)
        self.assertListEqual(fail_mock.call_args_list, [])

    @patch.object(BatchingBolt, 'read_handshake', new=lambda x: ({}, {}))
    @patch('sys.exit', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_partial(self, fail_mock, process_batch_mock, exit_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Change the group key just be the sum of values, which makes 3 separate
        # batches
        self.bolt.group_key = lambda t: sum(t.values)
        # Make sure we fail on the second batch
        work = {'status': True}  # to avoid scoping problems

        def work_once(*args):
            if work['status']:
                work['status'] = False
            else:
                raise Exception('borkt')

        process_batch_mock.side_effect = work_once
        # Run the batches
        self.bolt.run()
        # Only some tuples should have failed at this point. The key is that
        # all un-acked tuples should be failed, even for batches we haven't
        # started processing yet.
        self.assertEqual(fail_mock.call_count, 2)
        self.assertEqual(exit_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'send_message', autospec=True)
    def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo',
                                             task=-1,
                                             stream='__heartbeat',
                                             values=[],
                                             component='__system')
        self.bolt._run()
        send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'process_tick', autospec=True)
    def test_process_tick(self, process_tick_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id=None,
                                             task=-1,
                                             component='__system',
                                             stream='__tick',
                                             values=[50])
        self.bolt._run()
        process_tick_mock.assert_called_with(self.bolt,
                                             read_tuple_mock.return_value)
예제 #9
0
class BatchingBoltTests(unittest.TestCase):

    def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05

        self.tup_dicts = [{'id': 14,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [1, 2, 3]},
                          {'id': 15,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [4, 5, 6]},
                          {'id': 16,
                           'comp': 'some_spout',
                           'stream': 'default',
                           'task': 'some_bolt',
                           'tuple': [7, 8, 9]}]
        tups_json = '\nend\n'.join([json.dumps(tup_dict) for tup_dict in
                                    self.tup_dicts] + [''])
        self.tups = [Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                           tup_dict['task'], tup_dict['tuple']) for tup_dict in
                     self.tup_dicts]
        self.bolt = BatchingBolt(
            input_stream=itertools.cycle(tups_json.splitlines(True)),
            output_stream=BytesIO())
        self.bolt.initialize({}, {})

    def tearDown(self):
        # undo the mocking
        BatchingBolt.secs_between_batches = self._orig_secs

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_batching(self, process_batch_mock):
        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called
        time.sleep(0.5)
        process_batch_mock.assert_called_with(self.bolt, None, self.tups[:3])

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_group_key(self, process_batch_mock):
        # Change the group key to even/odd grouping
        self.bolt.group_key = lambda t: sum(t.values) % 2

        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called correctly
        time.sleep(0.5)
        process_batch_mock.assert_has_calls([mock.call(self.bolt, 0,
                                                       [self.tups[0],
                                                        self.tups[2]]),
                                             mock.call(self.bolt, 1,
                                                       [self.tups[1]])],
                                            any_order=True)

    def test_exception_handling(self):
        # Make sure the exception gets from the worker thread to the main
        with self.assertRaises(NotImplementedError):
            self.bolt._run()
            time.sleep(0.5)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack(self, ack_mock):
        # Test auto-ack on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        ack_mock.assert_has_calls([mock.call(self.bolt, self.tups[0]),
                                   mock.call(self.bolt, self.tups[1]),
                                   mock.call(self.bolt, self.tups[2])],
                                  any_order=True)
        ack_mock.reset_mock()

        # Test auto-ack off
        self.bolt.auto_ack = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(ack_mock.call_args_list, [])

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail(self, fail_mock, worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)

        # All waiting tuples should have failed at this point
        fail_mock.assert_has_calls([mock.call(self.bolt, self.tups[0]),
                                    mock.call(self.bolt, self.tups[1]),
                                    mock.call(self.bolt, self.tups[2])],
                                   any_order=True)
        self.assertEqual(worker_exception_mock.call_count, 1)
        fail_mock.reset_mock()
        worker_exception_mock.reset_mock()

        # Test auto-fail off
        self.bolt.auto_fail = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(fail_mock.call_args_list, [])
        self.assertListEqual(worker_exception_mock.call_args_list, [])

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_partial(self, fail_mock, process_batch_mock,
                               worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Change the group key just be the sum of values, which makes 3 separate
        # batches
        self.bolt.group_key = lambda t: sum(t.values)
        # Make sure we fail on the second batch
        work = {'status': True} # to avoid scoping problems
        def work_once(*args):
            if work['status']:
                work['status'] = False
            else:
                raise Exception('borkt')
        process_batch_mock.side_effect = work_once
        # Run the batches
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Only some tuples should have failed at this point. The key is that
        # all un-acked tuples should be failed, even for batches we haven't
        # started processing yet.
        self.assertEqual(fail_mock.call_count, 2)
        self.assertEqual(worker_exception_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'send_message', autospec=True)
    def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo', task=-1,
                                             stream='__heartbeat', values=[],
                                             component='__system')
        self.bolt._run()
        send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'process_tick', autospec=True)
    def test_process_tick(self, process_tick_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo', task=-1,
                                             component='__system',
                                             stream='__tick', values=[50])
        self.bolt._run()
        process_tick_mock.assert_called_with(self.bolt, 50)
class BatchingBoltTests(unittest.TestCase):
    def setUp(self):
        # mock seconds between batches to speed the tests up
        self._orig_secs = BatchingBolt.secs_between_batches

        BatchingBolt.secs_between_batches = 0.05

        self.tup_dicts = [{
            'id': 14,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [1, 2, 3]
        }, {
            'id': 15,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [4, 5, 6]
        }, {
            'id': 16,
            'comp': 'some_spout',
            'stream': 'default',
            'task': 'some_bolt',
            'tuple': [7, 8, 9]
        }]
        tups_json = '\nend\n'.join(
            [json.dumps(tup_dict) for tup_dict in self.tup_dicts] + [''])
        self.tups = [
            Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
                  tup_dict['task'], tup_dict['tuple'])
            for tup_dict in self.tup_dicts
        ]
        self.bolt = BatchingBolt(input_stream=StringIO(tups_json),
                                 output_stream=BytesIO())
        self.bolt.initialize({}, {})

    def tearDown(self):
        # undo the mocking
        BatchingBolt.secs_between_batches = self._orig_secs

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_batching(self, process_batch_mock):
        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called
        time.sleep(0.5)
        process_batch_mock.assert_called_with(self.bolt, None, self.tups[:3])

    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    def test_group_key(self, process_batch_mock):
        # Change the group key to even/odd grouping
        self.bolt.group_key = lambda t: sum(t.values) % 2

        # Add a bunch of tuples
        for __ in range(3):
            self.bolt._run()

        # Wait a bit, and see if process_batch was called correctly
        time.sleep(0.5)
        process_batch_mock.assert_has_calls([
            mock.call(self.bolt, 0, [self.tups[0], self.tups[2]]),
            mock.call(self.bolt, 1, [self.tups[1]])
        ],
                                            any_order=True)

    def test_exception_handling(self):
        # Make sure the exception gets from the worker thread to the main
        with self.assertRaises(NotImplementedError):
            self.bolt._run()
            time.sleep(0.5)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_on(self, ack_mock):
        # Test auto-ack on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        ack_mock.assert_has_calls([
            mock.call(self.bolt, self.tups[0]),
            mock.call(self.bolt, self.tups[1]),
            mock.call(self.bolt, self.tups[2])
        ],
                                  any_order=True)

    @patch.object(BatchingBolt, 'ack', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', new=lambda *args: None)
    def test_auto_ack_off(self, ack_mock):
        # Test auto-ack off
        self.bolt.auto_ack = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(ack_mock.call_args_list, [])

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_on(self, fail_mock, worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Test auto-fail on (the default)
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)

        # All waiting tuples should have failed at this point
        fail_mock.assert_has_calls([
            mock.call(self.bolt, self.tups[0]),
            mock.call(self.bolt, self.tups[1]),
            mock.call(self.bolt, self.tups[2])
        ],
                                   any_order=True)
        self.assertEqual(worker_exception_mock.call_count, 1)

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_off(self, fail_mock, worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        self.bolt.auto_fail = False
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Assert that this wasn't called, and print out what it was called with
        # otherwise.
        self.assertListEqual(fail_mock.call_args_list, [])
        self.assertEqual(worker_exception_mock.call_count, 1)

    @patch.object(BatchingBolt, '_handle_worker_exception', autospec=True)
    @patch.object(BatchingBolt, 'process_batch', autospec=True)
    @patch.object(BatchingBolt, 'fail', autospec=True)
    def test_auto_fail_partial(self, fail_mock, process_batch_mock,
                               worker_exception_mock):
        # Need to re-register signal handler with mocked version, because
        # mock gets created after handler was originally registered.
        self.setUp()
        # Change the group key just be the sum of values, which makes 3 separate
        # batches
        self.bolt.group_key = lambda t: sum(t.values)
        # Make sure we fail on the second batch
        work = {'status': True}  # to avoid scoping problems

        def work_once(*args):
            if work['status']:
                work['status'] = False
            else:
                raise Exception('borkt')

        process_batch_mock.side_effect = work_once
        # Run the batches
        for __ in range(3):
            self.bolt._run()
        time.sleep(0.5)
        # Only some tuples should have failed at this point. The key is that
        # all un-acked tuples should be failed, even for batches we haven't
        # started processing yet.
        self.assertEqual(fail_mock.call_count, 2)
        self.assertEqual(worker_exception_mock.call_count, 1)

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'send_message', autospec=True)
    def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo',
                                             task=-1,
                                             stream='__heartbeat',
                                             values=[],
                                             component='__system')
        self.bolt._run()
        send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})

    @patch.object(BatchingBolt, 'read_tuple', autospec=True)
    @patch.object(BatchingBolt, 'process_tick', autospec=True)
    def test_process_tick(self, process_tick_mock, read_tuple_mock):
        # Make sure we send sync for heartbeats
        read_tuple_mock.return_value = Tuple(id='foo',
                                             task=-1,
                                             component='__system',
                                             stream='__tick',
                                             values=[50])
        self.bolt._run()
        process_tick_mock.assert_called_with(self.bolt, 50)