def test_caching(): URL = 'http://www.google.com' DATA = b'This is google.com!' clock = ThreadedClock() m = mox.Mox() m.StubOutWithMock(os.path, 'getmtime') os.path.getmtime(mox.IgnoreArg()).MultipleTimes().AndReturn(0) m.ReplayAll() opener = MockOpener(DATA) web = CachedWeb(clock=clock, opener=opener) assert not os.path.exists(web.translate_url(URL)) with contextlib.closing(web.open(URL)) as fp: assert fp.read() == DATA assert os.path.exists(web.translate_url(URL)) assert opener.opened.is_set() opener.clear() assert web.expired(URL, ttl=0.5) is False clock.tick(1) assert web.expired(URL, ttl=0.5) with contextlib.closing(web.open(URL)) as fp: assert fp.read() == DATA assert not opener.opened.is_set() with contextlib.closing(web.open(URL, ttl=0.5)) as fp: assert fp.read() == DATA assert opener.opened.is_set(), 'expect expired url to cause http get' m.UnsetStubs() m.VerifyAll()
def test_ttl_decrement_works(): clock = ThreadedClock() pps = TestPingPongServer('foo', 31337, clock=clock) pps.ping('hello world', ttl=1) clock.tick(TestPingPongServer.PING_DELAY.as_(Time.SECONDS)) pps.expect_calls()
def run_gc(self, root, task_id, retain=False): """Run the garbage collection process against the given task_id in the given checkpoint root""" class FakeTaskKiller(object): def __init__(self, task_id, checkpoint_root): pass def kill(self): pass def lose(self): pass class FakeTaskGarbageCollector(object): def __init__(self, root): pass def erase_logs(self, task_id): pass def erase_metadata(self, task_id): pass class FastThermosGCExecutor(ThermosGCExecutor): POLL_WAIT = Amount(1, Time.MICROSECONDS) detector = functools.partial( FakeExecutorDetector, task_id) if retain else FakeExecutorDetector executor = FastThermosGCExecutor( checkpoint_root=root, task_killer=FakeTaskKiller, executor_detector=detector, task_garbage_collector=FakeTaskGarbageCollector, clock=ThreadedClock(time.time())) return executor.garbage_collect()
def test_sampler_base(): class TestSampler(SamplerBase): def __init__(self, period, clock): self.count = 0 SamplerBase.__init__(self, period, clock) def iterate(self): self.count += 1 test_clock = ThreadedClock() sampler = TestSampler(Amount(1, Time.SECONDS), clock=test_clock) sampler.start() assert test_clock.converge(threads=[sampler]) test_clock.assert_waiting(sampler, 1) test_clock.tick(0.5) assert test_clock.converge(threads=[sampler]) assert sampler.count == 0 test_clock.tick(0.5) assert test_clock.converge(threads=[sampler]) assert sampler.count == 1 test_clock.tick(5) assert test_clock.converge(threads=[sampler]) assert sampler.count == 6 assert not sampler.is_stopped() sampler.stop() # make sure that stopping the sampler short circuits any sampling test_clock.tick(5) assert test_clock.converge(threads=[sampler]) assert sampler.count == 6
def test_announcer_under_abnormal_circumstances(): mock_serverset = create_autospec(spec=ServerSet, instance=True) mock_serverset.join = MagicMock() mock_serverset.join.side_effect = [ KazooException('Whoops the ensemble is down!'), 'member0001', ] mock_serverset.cancel = MagicMock() endpoint = Endpoint('localhost', 12345) clock = ThreadedClock(31337.0) announcer = Announcer(mock_serverset, endpoint, clock=clock, exception_wait=Amount(2, Time.SECONDS)) announcer.start() try: clock.tick(1.0) assert announcer.disconnected_time() == 1.0 clock.tick(2.0) assert announcer.disconnected_time() == 0.0, ( 'Announcer should recover after an exception thrown internally.') assert announcer._membership == 'member0001' finally: announcer.stop()
def test_caching(getmtime_mock): URL = 'http://www.google.com' DATA = b'This is google.com!' clock = ThreadedClock() getmtime_mock.return_value = 0 opener = MockOpener(DATA) web = CachedWeb(clock=clock, opener=opener) assert not os.path.exists(web.translate_url(URL)) with contextlib.closing(web.open(URL)) as fp: assert fp.read() == DATA assert os.path.exists(web.translate_url(URL)) assert opener.opened.is_set() opener.clear() assert web.expired(URL, ttl=0.5) is False clock.tick(1) assert web.expired(URL, ttl=0.5) with contextlib.closing(web.open(URL)) as fp: assert fp.read() == DATA assert not opener.opened.is_set() with contextlib.closing(web.open(URL, ttl=0.5)) as fp: assert fp.read() == DATA assert opener.opened.is_set(), 'expect expired url to cause http get'
def __init__(self, checkpoint_root, active_executors=[]): self._active_executors = active_executors self._kills = set() self._losses = set() self._gcs = set() ThermosGCExecutor.__init__(self, checkpoint_root, clock=ThreadedClock(time.time()), executor_detector=lambda: list)
def setUp(self): self._clock = ThreadedClock() self._checker = mock.Mock(spec=HttpSignaler) self.fake_health_checks = [] def mock_health_check(): return self.fake_health_checks.pop(0) self._checker.health = mock.Mock(spec=self._checker.health) self._checker.health.side_effect = mock_health_check
def test_ping(): clock = ThreadedClock() pps = TestPingPongServer('foo', 31337, clock=clock) pps.ping('hello world', ttl=2) clock.tick(TestPingPongServer.PING_DELAY.as_(Time.SECONDS)) pps.expect_calls(('ping', 'hello world', 1)) pps.ping('hello world', ttl=3) clock.tick(TestPingPongServer.PING_DELAY.as_(Time.SECONDS)) pps.expect_calls(('ping', 'hello world', 2))
def test_defer(): clock = ThreadedClock() DELAY = 3 results = Queue(maxsize=1) def func(): results.put_nowait('success') defer(func, delay=DELAY, clock=clock) with Timer(clock=clock) as timer: clock.tick(4) assert results.get() == 'success' assert timer.elapsed >= DELAY
def test_connect_timeout_using_open(): URL = 'http://www.google.com' DATA = b'This is google.com!' clock = ThreadedClock() opener = MockOpener(DATA) web = CachedWeb(clock=clock, opener=opener) assert not os.path.exists(web.translate_url(URL)) with pytest.raises(FetchError): with contextlib.closing(web.open(URL, conn_timeout=0)): pass with contextlib.closing(web.open(URL, conn_timeout=0.01)) as fp: assert fp.read() == DATA
def test_not_converged(): clock1 = ThreadedClock(0) clock2 = ThreadedClock(0) def run(): clock1.sleep(1) clock2.sleep(1) th = threading.Thread(target=run) th.daemon = True th.start() assert clock1.converge(threads=[th]) clock1.assert_waiting(th, 1) assert clock2.converge(threads=[th], timeout=0.1) is False clock2.assert_not_waiting(th) clock1.tick(1) clock2.tick(2) clock1.converge(threads=[th]) clock2.converge(threads=[th]) clock1.assert_not_waiting(th) clock2.assert_not_waiting(th)
def test_sleep_0(): clock = ThreadedClock(0) event = threading.Event() def run(): clock.sleep(0) event.set() th = threading.Thread(target=run) th.daemon = True th.start() assert clock.converge(threads=[th]) assert event.is_set()
def test_announcer_on_expiration(): joined = threading.Event() operations = [] def joined_side_effect(*args, **kw): # 'global' does not work within python nested functions, so we cannot use a # counter here, so instead we do append/len (see PEP-3104) operations.append(1) if len(operations) == 1 or len(operations) == 3: joined.set() return 'membership %d' % len(operations) else: raise KazooException('Failed to reconnect') mock_serverset = create_autospec(spec=ServerSet, instance=True) mock_serverset.join = MagicMock() mock_serverset.join.side_effect = joined_side_effect mock_serverset.cancel = MagicMock() endpoint = Endpoint('localhost', 12345) clock = ThreadedClock(31337.0) announcer = Announcer(mock_serverset, endpoint, clock=clock, exception_wait=Amount(2, Time.SECONDS)) announcer.start() try: joined.wait(timeout=1.0) assert joined.is_set() assert announcer._membership == 'membership 1' assert announcer.disconnected_time() == 0.0 clock.tick(1.0) assert announcer.disconnected_time() == 0.0 announcer.on_expiration() # expect exception clock.tick(1.0) assert announcer.disconnected_time() == 1.0, ( 'Announcer should be disconnected on expiration.') clock.tick(10.0) assert announcer.disconnected_time() == 0.0, ( 'Announcer should not advance disconnection time when connected.') assert announcer._membership == 'membership 3' finally: announcer.stop()
def test_defer(): DELAY = 3 clock = ThreadedClock() results = Queue(maxsize=1) def func(): results.put_nowait('success') defer(func, delay=DELAY, clock=clock) with Timer(clock=clock) as timer: with pytest.raises(Empty): results.get_nowait() clock.tick(DELAY + 1) assert results.get() == 'success' assert timer.elapsed == DELAY + 1
def test_announcer_under_normal_circumstances(): joined = threading.Event() def joined_side_effect(*args, **kw): joined.set() return 'membership foo' mock_serverset = create_autospec(spec=ServerSet, instance=True) mock_serverset.join = MagicMock() mock_serverset.join.side_effect = joined_side_effect mock_serverset.cancel = MagicMock() endpoint = Endpoint('localhost', 12345) clock = ThreadedClock(31337.0) announcer = Announcer(mock_serverset, endpoint, clock=clock) assert announcer.disconnected_time() == 0.0 clock.tick(1.0) assert announcer.disconnected_time() == 1.0, ( 'Announcer should advance disconnection time when not yet initially connected.' ) announcer.start() try: joined.wait(timeout=1.0) assert joined.is_set() assert announcer.disconnected_time() == 0.0 clock.tick(1.0) assert announcer.disconnected_time() == 0.0, ( 'Announcer should not advance disconnection time when connected.') assert announcer._membership == 'membership foo' finally: announcer.stop() mock_serverset.cancel.assert_called_with('membership foo') assert announcer.disconnected_time() == 0.0 clock.tick(1.0) assert announcer.disconnected_time() == 0.0, ( 'Announcer should not advance disconnection time when stopped.')
def setup_task(self, task, root, finished=False, corrupt=False): """Set up the checkpoint stream for the given task in the given checkpoint root, optionally finished and/or with a corrupt stream""" class FastTaskRunner(TaskRunner): COORDINATOR_INTERVAL_SLEEP = Amount(1, Time.MILLISECONDS) tr = FastTaskRunner( task=task, checkpoint_root=root, sandbox=os.path.join(root, 'sandbox', task.name().get()), clock=ThreadedClock(time.time())) with tr.control(): # initialize checkpoint stream pass if finished: tr.kill() if corrupt: ckpt_file = TaskPath(root=root, tr=tr.task_id).getpath('runner_checkpoint') with open(ckpt_file, 'w') as f: f.write("definitely not a valid checkpoint stream") return tr.task_id
def test_with_events(num_threads): event = threading.Event() hits = [] hits_before, hits_after = 0, 0 clock = ThreadedClock(0) def hit_me(): clock.sleep(0.1) hits.append(True) threads = [] for _ in range(num_threads): th = threading.Thread(target=hit_me) th.daemon = True th.start() threads.append(th) clock.converge(threads=threads) for th in threads: clock.assert_waiting(th, 0.1) clock.tick(0.05) clock.converge(threads=threads) hits_before += len(hits) with pytest.raises(AssertionError): clock.assert_waiting(threads[0], 234) clock.tick(0.05) clock.converge(threads=threads) hits_after += len(hits) for th in threads: clock.assert_not_waiting(th) with pytest.raises(AssertionError): clock.assert_waiting(th, 0.1) assert hits_before == 0 assert hits_after == num_threads
def test_tracing_timed(): sio = Compatibility.StringIO() clock = ThreadedClock() final_trace = [] class PrintTraceInterceptor(Tracer): def print_trace(self, *args, **kw): final_trace.append(self._local.parent) tracer = PrintTraceInterceptor(output=sio, clock=clock, predicate=lambda v: False) assert not hasattr(tracer._local, 'parent') with tracer.timed('hello'): clock.tick(1.0) with tracer.timed('world 1'): clock.tick(1.0) with tracer.timed('world 2'): clock.tick(1.0) assert len(final_trace) == 1 final_trace = final_trace[0] assert final_trace._start == 0 assert final_trace._stop == 3 assert final_trace.duration() == 3 assert final_trace.msg == 'hello' assert len(final_trace.children) == 2 child = final_trace.children[0] assert child._start == 1 assert child._stop == 2 assert child.parent is final_trace assert child.msg == 'world 1' child = final_trace.children[1] assert child._start == 2 assert child._stop == 3 assert child.parent is final_trace assert child.msg == 'world 2' # should not log if verbosity low assert sio.getvalue() == ''
def test_sleep_negative(): with pytest.raises(IOError): ThreadedClock(0).sleep(-1)