def start(self): super().start() # Like crontab, check to run jobs every minute self._job = Job(self._cron, timedelta(minutes=1), True) # Run a cron cycle immediately, but in a new thread since it # might take some time and we don't want it to hold up start spawn(self._cron)
def _cron(self): """ Called every minute to check if cron job should notify signals """ self._logger.debug("Checking if cron emit should run") now = datetime.utcnow() now = [str(now.minute), str(now.hour), str(now.day), str(now.month), str(now.weekday())] if self._check_cron(now): spawn(self._emit)
def test_successful_retry(self): """All signals are notified and a retry is called for each one""" e = Event() block = RetryBlock(e, 5) self.configure_block(block, {}) block.start() for _ in range(5): spawn(block.process_signals, [Signal({'error': 1})]) e.wait(2) block.stop() self.assert_num_signals_notified(5) # retry_count is 0 because before_retry was called on 0th fail self.assertEqual(block._retry_count, 0)
def test_with_no_retry(self): """All signals are notified and no retry is ever called""" e = Event() block = RetryBlock(e, 5) self.configure_block(block, {}) block.start() for _ in range(5): spawn(block.process_signals, [Signal()]) e.wait(2) block.stop() self.assert_num_signals_notified(5) # retry_count was never set because we had no failed calls self.assertEqual(block._retry_count, None)
def test_with_enough_locks(self): """All the signals get through since we have enought locks""" e = Event() block = LockBlock(e, 5, 5) self.configure_block(block, {}) block.start() for _ in range(5): spawn(block.process_signals, [Signal()]) e.wait(2) block.stop() self.assert_num_signals_notified(5) self.assertEqual(block._number_of_locks, 0) # All 5 signals are notified and none get blocked self.assertEqual(block._number_of_lock_queue_full_errors, 0) # Confirm that all signals were processed self.assertEqual(block._num_processes_count, 5)
def test_with_one_lock(self): """Only the first signal is notified and the others are dropped""" e = Event() block = LockBlock(e, 5, 1) self.configure_block(block, {}) block.start() for _ in range(5): spawn(block.process_signals, [Signal()]) e.wait(2) block.stop() self.assert_num_signals_notified(1) self.assertEqual(block._number_of_locks, 0) # One signal is notifed and 4 are blocked self.assertEqual(block._number_of_lock_queue_full_errors, 4) # Confirm that all signals were processed self.assertEqual(block._num_processes_count, 5)
def test_failed_retry(self): """No signals are notified and a retry is called for each one""" e = Event() block = RetryBlock(e, 5) self.configure_block(block, {}) # Let the block retry up to 100 times block.num_retries = 100 block.start() for _ in range(5): spawn(block.process_signals, [Signal({'error': 99})]) e.wait(2) block.stop() # No signals get notified since we retry forever self.assert_num_signals_notified(0) # Assert that at least 10 retries (2 for each signal) self.assertTrue(block._retry_count >= 10)
def test_count_simultanious(self, back_patch): block = Counter() self.configure_block(block, {}) block.start() signals = list(Signal() for _ in range(100000)) process_times = 5 spawns = [] for _ in range(process_times): spawns.append(spawn(block.process_signals, signals)) # it should take a while to complete with self.assertRaises(Exception): self.assertEqual(block._cumulative_count['null'], 100000 * process_times) self.assert_num_signals_notified(process_times) # wait for spawns to be done while spawns: time.sleep(0.1) spawns = tuple(s for s in spawns if s.isAlive()) # make sure everything works as expected self.assertEqual(block._cumulative_count['null'], 100000 * process_times) self.assert_num_signals_notified(process_times) block.stop()
def _reconnect(self): self._logger.debug('Attempting reconnect in {} seconds'.format( self._reconnect_delay)) sleep(self._reconnect_delay) self._reconnect_delay *= 2 spawn(self._connect)
def start(self): super().start() self.counter = 0 self._stop_event.clear() spawn(self.run)
def _reconnect(self): self._logger.debug( 'Attempting reconnect in {} seconds'.format(self._reconnect_delay)) sleep(self._reconnect_delay) self._reconnect_delay *= 2 spawn(self._connect)
def start(self): super().start() self._job = Job(self._emit, self.interval, True) # Run an emit cycle immediately, but in a new thread since it # might take some time and we don't want it to hold up start spawn(self._emit)
def put(self, name, value): '''Use this function to put new data in''' spawn(self._put, name, value)