def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() init_logger(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) ddb = FlatFileDB(args.ddb) pdb = FlatFileDB(args.pdb) rtr = Notifier(dict()) log = Log(1000) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, log.log) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device": ddb.get, "get_parameter": pdb.get, "set_parameter": pdb.set, "update_rt_results": lambda mod: process_mod(rtr, mod), "log": log.log } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start( args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": ddb.data, "parameters": pdb.data, "rt_results": rtr, "explist": repository.explist, "log": log.data }) loop.run_until_complete(server_notify.start( args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() init_logger(args) ddb = FlatFileDB("ddb.pyon") pdb = FlatFileDB("pdb.pyon") simplephist = SimpleHistory(30) pdb.hooks.append(simplephist) rtr = RTResults() repository = Repository() explist = FlatFileDB("explist.pyon") loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) def run_cb(rid, run_params): rtr.current_group = run_params["rtr_group"] scheduler = Scheduler( { "req_device": ddb.request, "req_parameter": pdb.request, "set_parameter": pdb.set, "init_rt_results": rtr.init, "update_rt_results": rtr.update }, run_cb) loop.run_until_complete(scheduler.start()) atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository, "master_explist": explist }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "queue": scheduler.queue, "timed": scheduler.timed, "devices": ddb.data, "parameters": pdb.data, "parameters_simplehist": simplephist.history, "rt_results": rtr.groups, "explist": explist.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def __init__(self): self.args = args = get_argparser().parse_args() init_logger(args) self.config = load_config(args, "_server") self.lasers = self.config["lasers"].keys() for laser in self.lasers: self.config["lasers"][laser]["lock_ready"] = False # connect to hardware self.wlm = WLM(args.simulation) self.osas = OSAs(self.config["osas"], args.simulation) self.exp_min = self.wlm.get_exposure_min() self.exp_max = self.wlm.get_exposure_max() self.num_ccds = self.wlm.get_num_ccds() if self.config["switch"]["type"] == "internal": self.switch = self.wlm.get_switch() elif self.config["switch"]["type"] == "leoni": self.switch = LeoniSwitch( self.config["switch"]["ip"], args.simulation) else: raise ValueError("Unrecognised switch type: {}".format( self.config["switch"]["type"])) # measurement queue, processed by self.measurement_task self.measurement_ids = task_id_generator() self.measurements_queued = asyncio.Event() self.queue = [] self.wake_locks = {laser: asyncio.Event() for laser in self.lasers} # schedule initial frequency/osa readings all lasers self.measurements_queued.set() for laser in self.lasers: self.queue.append({ "laser": laser, "priority": 0, "expiry": time.time(), "id": next(self.measurement_ids), "get_osa_trace": True, "done": asyncio.Event() }) # "notify" interface self.laser_db = Notifier(self.config["lasers"]) self.freq_db = Notifier({laser: { "freq": None, "status": WLMMeasurementStatus.ERROR, "timestamp": 0 } for laser in self.lasers}) self.osa_db = Notifier({laser: { "trace": None, "timestamp": 0 } for laser in self.lasers}) self.server_notify = Publisher({ "laser_db": self.laser_db, # laser settings "freq_db": self.freq_db, # most recent frequency measurements "osa_db": self.osa_db # most recent osa traces }) # "control" interface self.control_interface = ControlInterface(self) self.server_control = RPCServer({"control": self.control_interface}, allow_parallel=True) self.running = False