def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() init_logger(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) ddb = FlatFileDB(args.ddb) pdb = FlatFileDB(args.pdb) rtr = Notifier(dict()) log = Log(1000) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, log.log) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device": ddb.get, "get_parameter": pdb.get, "set_parameter": pdb.set, "update_rt_results": lambda mod: process_mod(rtr, mod), "log": log.log } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start( args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": ddb.data, "parameters": pdb.data, "rt_results": rtr, "explist": repository.explist, "log": log.data }) loop.run_until_complete(server_notify.start( args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def test_flush(self): loop = self.loop scheduler = Scheduler(_RIDCounter(0), dict(), None) expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid, 1, True) expect.insert(1, {"key": "status", "path": [1], "value": "flushing", "action": "setitem"}) first_preparing = asyncio.Event() done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == {"path": [0], "value": "preparing", "key": "status", "action": "setitem"}: first_preparing.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(first_preparing.wait()) scheduler.submit("main", expid, 1, None, True) loop.run_until_complete(done.wait()) loop.run_until_complete(scheduler.stop())
def test_pause(self): loop = self.loop scheduler = Scheduler(0, _handlers, None) expid_bg = _get_expid("BackgroundExperiment") expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) background_running = asyncio.Event() done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == { "path": [0], "value": "running", "key": "status", "action": "setitem" }: background_running.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(done.wait()) loop.run_until_complete(scheduler.stop())
def test_pause(self): loop = self.loop termination_ok = False def check_termination(mod): nonlocal termination_ok self.assertEqual( mod, {"action": "setitem", "key": "termination_ok", "value": (False, True), "path": []}) termination_ok = True handlers = { "update_dataset": check_termination } scheduler = Scheduler(0, handlers, None) expid_bg = _get_expid("BackgroundExperiment") expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) background_running = asyncio.Event() empty_completed = asyncio.Event() background_completed = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == {"path": [0], "value": "running", "key": "status", "action": "setitem"}: background_running.set() if mod == {"path": [0], "value": "deleting", "key": "status", "action": "setitem"}: background_completed.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): empty_completed.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(empty_completed.wait()) self.assertFalse(termination_ok) scheduler.request_termination(0) loop.run_until_complete(background_completed.wait()) self.assertTrue(termination_ok) loop.run_until_complete(scheduler.stop())
def test_steps(self): loop = self.loop scheduler = Scheduler(_RIDCounter(0), dict(), None) expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() # Verify that a timed experiment far in the future does not # get run, even if it has high priority. late = time() + 100000 expect.insert(0, {"action": "setitem", "key": 0, "value": {"pipeline": "main", "status": "pending", "priority": 99, "expid": expid, "due_date": late, "flush": False, "repo_msg": None}, "path": []}) scheduler.submit("main", expid, 99, late, False) # This one (RID 1) gets run instead. scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(done.wait()) scheduler.notifier.publish = None loop.run_until_complete(scheduler.stop())
def main(): args = get_argparser().parse_args() init_logger(args) ddb = FlatFileDB("ddb.pyon") pdb = FlatFileDB("pdb.pyon") simplephist = SimpleHistory(30) pdb.hooks.append(simplephist) rtr = RTResults() repository = Repository() explist = FlatFileDB("explist.pyon") loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) def run_cb(rid, run_params): rtr.current_group = run_params["rtr_group"] scheduler = Scheduler( { "req_device": ddb.request, "req_parameter": pdb.request, "set_parameter": pdb.set, "init_rt_results": rtr.init, "update_rt_results": rtr.update }, run_cb) loop.run_until_complete(scheduler.start()) atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository, "master_explist": explist }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "queue": scheduler.queue, "timed": scheduler.timed, "devices": ddb.data, "parameters": pdb.data, "parameters_simplehist": simplephist.history, "rt_results": rtr.groups, "explist": explist.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def test_steps(self): loop = self.loop scheduler = Scheduler(0, dict(), None) expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() # Verify that a timed experiment far in the future does not # get run, even if it has high priority. late = time() + 100000 expect.insert(0, {"action": "setitem", "key": 0, "value": {"pipeline": "main", "status": "pending", "priority": 99, "expid": expid, "due_date": late, "flush": False, "repo_msg": None}, "path": []}) scheduler.submit("main", expid, 99, late, False) # This one (RID 1) gets run instead. scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(done.wait()) scheduler.notifier.publish = None loop.run_until_complete(scheduler.stop())
def test_flush(self): loop = self.loop scheduler = Scheduler(0, dict(), None) expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid, 1, True) expect.insert(1, {"key": "status", "path": [1], "value": "flushing", "action": "setitem"}) first_preparing = asyncio.Event() done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == {"path": [0], "value": "preparing", "key": "status", "action": "setitem"}: first_preparing.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(first_preparing.wait()) scheduler.submit("main", expid, 1, None, True) loop.run_until_complete(done.wait()) loop.run_until_complete(scheduler.stop())
def test_pause(self): loop = self.loop scheduler = Scheduler(0, _handlers, None) expid_bg = _get_expid("BackgroundExperiment") expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) background_running = asyncio.Event() done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == {"path": [0], "value": "running", "key": "status", "action": "setitem"}: background_running.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(done.wait()) loop.run_until_complete(scheduler.stop())
def test_close_with_active_runs(self): """Check scheduler exits with experiments still running""" loop = self.loop scheduler = Scheduler(_RIDCounter(0), {}, None) expid_bg = _get_expid("BackgroundExperiment") # Suppress the SystemExit backtrace when worker process is killed. expid_bg["log_level"] = logging.CRITICAL expid = _get_expid("EmptyExperiment") background_running = asyncio.Event() empty_ready = asyncio.Event() background_completed = asyncio.Event() def notify(mod): if mod == { "path": [0], "value": "running", "key": "status", "action": "setitem" }: background_running.set() if mod == { "path": [0], "value": "deleting", "key": "status", "action": "setitem" }: background_completed.set() if mod == { "path": [1], "value": "prepare_done", "key": "status", "action": "setitem" }: empty_ready.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(empty_ready.wait()) # At this point, (at least) BackgroundExperiment is still running; make # sure we can stop the scheduler without hanging. loop.run_until_complete(scheduler.stop())
def test_close_with_active_runs(self): """Check scheduler exits with experiments still running""" loop = self.loop scheduler = Scheduler(_RIDCounter(0), {}, None) expid_bg = _get_expid("BackgroundExperiment") # Suppress the SystemExit backtrace when worker process is killed. expid_bg["log_level"] = logging.CRITICAL expid = _get_expid("EmptyExperiment") background_running = asyncio.Event() empty_ready = asyncio.Event() background_completed = asyncio.Event() def notify(mod): if mod == {"path": [0], "value": "running", "key": "status", "action": "setitem"}: background_running.set() if mod == {"path": [0], "value": "deleting", "key": "status", "action": "setitem"}: background_completed.set() if mod == {"path": [1], "value": "prepare_done", "key": "status", "action": "setitem"}: empty_ready.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(empty_ready.wait()) # At this point, (at least) BackgroundExperiment is still running; make # sure we can stop the scheduler without hanging. loop.run_until_complete(scheduler.stop())
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) print("ARTIQ master is now ready.") loop.run_forever()
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) atexit.register(loop.close) signal_handler = SignalHandler() signal_handler.setup() atexit.register(signal_handler.teardown) bind = common_args.bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB( repo_backend, worker_handlers, args.experiment_subdir) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db, args.log_submissions) scheduler.start() atexit_register_coroutine(scheduler.stop) config = MasterConfig(args.name) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "scheduler_check_termination": scheduler.check_termination, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_config": config, "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) print("ARTIQ master is now ready.") loop.run_until_complete(signal_handler.wait_terminate())
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status }) experiment_db.scan_repository_async() bind = bind_address_from_args(args) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status, "log": log_buffer.data }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def test_pause(self): loop = self.loop termination_ok = False def check_termination(mod): nonlocal termination_ok self.assertEqual( mod, { "action": "setitem", "key": "termination_ok", "value": (False, True), "path": [] }) termination_ok = True handlers = {"update_dataset": check_termination} scheduler = Scheduler(0, handlers, None) expid_bg = _get_expid("BackgroundExperiment") expid = _get_expid("EmptyExperiment") expect = _get_basic_steps(1, expid) background_running = asyncio.Event() empty_completed = asyncio.Event() background_completed = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx if mod == { "path": [0], "value": "running", "key": "status", "action": "setitem" }: background_running.set() if mod == { "path": [0], "value": "deleting", "key": "status", "action": "setitem" }: background_completed.set() if mod["path"] == [1] or (mod["path"] == [] and mod["key"] == 1): self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): empty_completed.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, -99, None, False) loop.run_until_complete(background_running.wait()) scheduler.submit("main", expid, 0, None, False) loop.run_until_complete(empty_completed.wait()) self.assertFalse(termination_ok) scheduler.request_termination(0) loop.run_until_complete(background_completed.wait()) self.assertTrue(termination_ok) loop.run_until_complete(scheduler.stop())
def test_pending_priority(self): """Check due dates take precedence over priorities when waiting to prepare.""" loop = self.loop handlers = {} scheduler = Scheduler(_RIDCounter(0), handlers, None) handlers["scheduler_check_pause"] = scheduler.check_pause expid_empty = _get_expid("EmptyExperiment") expid_bg = _get_expid("CheckPauseBackgroundExperiment") # Suppress the SystemExit backtrace when worker process is killed. expid_bg["log_level"] = logging.CRITICAL high_priority = 3 middle_priority = 2 low_priority = 1 late = time() + 100000 early = time() + 1 expect = [ { "path": [], "action": "setitem", "value": { "repo_msg": None, "priority": low_priority, "pipeline": "main", "due_date": None, "status": "pending", "expid": expid_bg, "flush": False }, "key": 0 }, { "path": [], "action": "setitem", "value": { "repo_msg": None, "priority": high_priority, "pipeline": "main", "due_date": late, "status": "pending", "expid": expid_empty, "flush": False }, "key": 1 }, { "path": [], "action": "setitem", "value": { "repo_msg": None, "priority": middle_priority, "pipeline": "main", "due_date": early, "status": "pending", "expid": expid_empty, "flush": False }, "key": 2 }, { "path": [0], "action": "setitem", "value": "preparing", "key": "status" }, { "path": [0], "action": "setitem", "value": "prepare_done", "key": "status" }, { "path": [0], "action": "setitem", "value": "running", "key": "status" }, { "path": [2], "action": "setitem", "value": "preparing", "key": "status" }, { "path": [2], "action": "setitem", "value": "prepare_done", "key": "status" }, { "path": [0], "action": "setitem", "value": "paused", "key": "status" }, { "path": [2], "action": "setitem", "value": "running", "key": "status" }, { "path": [2], "action": "setitem", "value": "run_done", "key": "status" }, { "path": [0], "action": "setitem", "value": "running", "key": "status" }, { "path": [2], "action": "setitem", "value": "analyzing", "key": "status" }, { "path": [2], "action": "setitem", "value": "deleting", "key": "status" }, { "path": [], "action": "delitem", "key": 2 }, ] done = asyncio.Event() expect_idx = 0 def notify(mod): nonlocal expect_idx self.assertEqual(mod, expect[expect_idx]) expect_idx += 1 if expect_idx >= len(expect): done.set() scheduler.notifier.publish = notify scheduler.start() scheduler.submit("main", expid_bg, low_priority) scheduler.submit("main", expid_empty, high_priority, late) scheduler.submit("main", expid_empty, middle_priority, early) loop.run_until_complete(done.wait()) scheduler.notifier.publish = None loop.run_until_complete(scheduler.stop())
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker, } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer( { "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository, } ) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher( { "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data, } ) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()