def test_simple_interval_scheduler(stub_broker, stub_worker, scheduler, scheduler_thread, mul, add): result = 0 @remoulade.actor() def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) write_loaded_at.send, event_write = mock_func(write_loaded_at.send) mul.send, event_mul = mock_func(mul.send) start = time.time() # Run scheduler scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", interval=1, ), ScheduledJob(actor_name="mul", kwargs={"x": 1, "y": 2}, interval=3600), ] scheduler_thread.start() event_write.wait(10) event_mul.wait(10) stub_broker.join(mul.queue_name) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() end = time.time() # should have written ~1 line per second assert end - start - 1 <= result <= end - start + 1 # get the last_queued date for this slow task, this should not change when reloading schedule with new config tasks = scheduler.get_redis_schedule().values() (slow_task,) = [job for job in tasks if job.actor_name == "mul"] last_queued = slow_task.last_queued assert {j.actor_name for j in tasks} == {"mul", "write_loaded_at"} scheduler.schedule = [ ScheduledJob(actor_name="add", kwargs={"x": 1, "y": 2}, interval=1), ScheduledJob(actor_name="mul", kwargs={"x": 1, "y": 2}, interval=3600), ] scheduler.sync_config() tasks = scheduler.get_redis_schedule().values() # One item was deleted assert {j.actor_name for j in tasks} == {"add", "mul"} # The other one was not updated (slow_task,) = [job for job in tasks if job.actor_name == "mul"] assert slow_task.last_queued == last_queued
def test_started_state_message(self, stub_broker, stub_worker, state_middleware, frozen_datetime): state_middleware.before_process_message, event_started = mock_func(state_middleware.before_process_message) state_middleware.backend.get_state, event_get_state = mock_func(state_middleware.backend.get_state) @remoulade.actor def wait(): event_get_state.wait(10) stub_broker.declare_actor(wait) msg = wait.send() # We wait the message be started event_started.wait(10) state = state_middleware.backend.get_state(msg.message_id) assert state.status == StateStatusesEnum.Started assert state.started_datetime.isoformat() == "2020-02-03T00:00:00+00:00"
def test_scheduler_daily_time(stub_broker, stub_worker, scheduler, scheduler_thread, tz): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) scheduler.get_redis_schedule, event_sch = mock_func(scheduler.get_redis_schedule) stub_broker.enqueue, event_enqueue = mock_func(stub_broker.enqueue) if tz: scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", daily_time=( datetime.datetime.now(pytz.timezone("Europe/Paris")) + datetime.timedelta(milliseconds=100) ).time(), tz="Europe/Paris", ) ] else: scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", daily_time=(datetime.datetime.utcnow() + datetime.timedelta(milliseconds=100)).time(), ) ] scheduler_thread.start() # should not have run yet assert result == 0 time.sleep(0.1) event_enqueue.wait(10) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() assert result == 1 event_sch.wait(10) event_sch.clear() event_sch.wait(10) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # should not rerun assert result == 1
def test_pipelines_expose_completion_stats(stub_broker, stub_worker, result_backend): # And a broker with the results middleware stub_broker.add_middleware(Results(backend=result_backend)) # And an actor that waits some amount of time result_backend.store_results, event_result = mock_func( result_backend.store_results) event_count = [threading.Event() for _ in range(4)] @remoulade.actor(store_results=True) def wait(n): event_count[n].wait(3) return n + 1 # And this actor is declared stub_broker.declare_actor(wait) # When I pipe some messages intended for that actor together and run the pipeline pipe = wait.message(0) | wait.message() | wait.message() | wait.message() pipe.run() # Then every time a job in the pipeline completes, the completed_count should increase for count in range(0, len(pipe)): event_count[count].set() event_result.wait(2) event_result.clear() assert pipe.results.completed_count == count + 1 # Finally, completed should be true assert pipe.results.completed
def test_scheduler_right_weekday(stub_broker, stub_worker, scheduler, scheduler_thread): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", iso_weekday=datetime.datetime.now().isoweekday(), ) ] write_loaded_at.send, event = mock_func(write_loaded_at.send) scheduler_thread.start() event.wait(2) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # Should have ran assert result == 1
def test_scheduler_wrong_weekday(stub_broker, stub_worker, scheduler, scheduler_thread): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", iso_weekday=datetime.datetime.now().isoweekday() + 1, ) ] scheduler.get_redis_schedule, event = mock_func(scheduler.get_redis_schedule) scheduler_thread.start() event.wait(2) event.clear() event.wait(2) # do nothing assert result == 0
def test_scheduler_new_daily_time(stub_broker, stub_worker, scheduler, scheduler_thread): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", daily_time=(datetime.datetime.utcnow() - datetime.timedelta(seconds=1)).time(), ) ] scheduler.get_redis_schedule, event = mock_func(scheduler.get_redis_schedule) scheduler_thread.start() event.wait(3) event.clear() event.wait(2) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # should not have ran, will run tomorrow assert result == 0
def test_scheduler_daily_time(stub_broker, stub_worker, scheduler, scheduler_thread, tz, frozen_datetime): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) scheduler.get_redis_schedule, event_sch = mock_func(scheduler.get_redis_schedule) write_loaded_at.send, event_send = mock_func(write_loaded_at.send) scheduler.schedule = [ ScheduledJob( actor_name="write_loaded_at", daily_time=( datetime.datetime.now(pytz.timezone(tz) if tz else None) + datetime.timedelta(seconds=1) ).time(), tz=tz, ) ] scheduler_thread.start() # Wait for sync_config + a complete scheduler iteration for _ in range(3): event_sch.wait(10) event_sch.clear() stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # should not have run yet assert result == 0 frozen_datetime.tick(2) # Wait for the ScheduledJob to be sent event_send.wait(1) stub_broker.join(write_loaded_at.queue_name) stub_worker.join() assert result == 1 # Wait for a complete scheduler iteration for _ in range(2): event_sch.wait(10) event_sch.clear() stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # should not rerun assert result == 1
def test_multiple_schedulers(stub_broker, stub_worker): result = 0 @remoulade.actor def write_loaded_at(): nonlocal result result += 1 stub_broker.declare_actor(write_loaded_at) schedule = [ ScheduledJob( actor_name="write_loaded_at", interval=3600, ) ] scheduler_list = [] event_list = [] thread_list = [] for _ in range(5): sch = new_scheduler(stub_broker) sch.get_redis_schedule, event = mock_func(sch.get_redis_schedule) event_list.append(event) if not scheduler_list: check_redis(sch.client) sch.schedule = schedule scheduler_list.append(sch) t = threading.Thread(target=sch.start) thread_list.append(t) t.start() for _ in range(2): for event in event_list: event.wait(2) event.clear() stub_broker.join(write_loaded_at.queue_name) stub_worker.join() # slow task should run exactly once, even if we launched 2 schedulers assert result == 1 for scheduler in scheduler_list: scheduler.stop() for thread in thread_list: thread.join(10)