def main(): args = get_argparser().parse_args() init_logger(args) loop = asyncio.get_event_loop() atexit.register(loop.close) writer = DBWriter(args.baseurl_db, args.user_db, args.password_db, args.database, args.table) writer.start() atexit_register_coroutine(writer.stop) log = Log(writer) server = Logger() rpc_server = Server({"schedule_logger": server}, builtin_terminate=True) loop.run_until_complete( rpc_server.start(bind_address_from_args(args), args.port_control)) atexit_register_coroutine(rpc_server.stop) reader = MasterReader(args.server_master, args.port_master, args.retry_master, log) reader.start() atexit_register_coroutine(reader.stop) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() tools.init_logger(args) loop = asyncio.get_event_loop() try: get_logs_task = asyncio.ensure_future( get_logs_sim(args.core_addr) if args. simulation else get_logs(args.core_addr)) try: server = Server({"corelog": PingTarget()}, None, True) loop.run_until_complete( server.start(tools.bind_address_from_args(args), args.port)) try: loop.run_until_complete(server.wait_terminate()) finally: loop.run_until_complete(server.stop()) finally: get_logs_task.cancel() try: loop.run_until_complete(get_logs_task) except asyncio.CancelledError: pass finally: loop.close()
def main(): args = get_argparser().parse_args() init_logger(args) loop = asyncio.get_event_loop() atexit.register(loop.close) writer = DBWriter(args.baseurl_db, args.user_db, args.password_db, args.database, args.table) writer.start() atexit_register_coroutine(writer.stop) filter = Filter(args.pattern_file) rpc_server = Server({"influxdb_filter": filter}, builtin_terminate=True) loop.run_until_complete(rpc_server.start(bind_address_from_args(args), args.port_control)) atexit_register_coroutine(rpc_server.stop) reader = MasterReader(args.server_master, args.port_master, args.retry_master, filter._filter, writer) reader.start() atexit_register_coroutine(reader.stop) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() init_logger(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) ddb = FlatFileDB(args.ddb) pdb = FlatFileDB(args.pdb) rtr = Notifier(dict()) log = Log(1000) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, log.log) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device": ddb.get, "get_parameter": pdb.get, "set_parameter": pdb.set, "update_rt_results": lambda mod: process_mod(rtr, mod), "log": log.log } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start( args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": ddb.data, "parameters": pdb.data, "rt_results": rtr, "explist": repository.explist, "log": log.data }) loop.run_until_complete(server_notify.start( args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() init_logger(args) ddb = FlatFileDB("ddb.pyon") pdb = FlatFileDB("pdb.pyon") simplephist = SimpleHistory(30) pdb.hooks.append(simplephist) rtr = RTResults() repository = Repository() explist = FlatFileDB("explist.pyon") loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) def run_cb(rid, run_params): rtr.current_group = run_params["rtr_group"] scheduler = Scheduler( { "req_device": ddb.request, "req_parameter": pdb.request, "set_parameter": pdb.set, "init_rt_results": rtr.init, "update_rt_results": rtr.update }, run_cb) loop.run_until_complete(scheduler.start()) atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = Server({ "master_ddb": ddb, "master_pdb": pdb, "master_schedule": scheduler, "master_repository": repository, "master_explist": explist }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "queue": scheduler.queue, "timed": scheduler.timed, "devices": ddb.data, "parameters": pdb.data, "parameters_simplehist": simplephist.history, "rt_results": rtr.groups, "explist": explist.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() root_logger = logging.getLogger() root_logger.setLevel(logging.NOTSET) source_adder = SourceFilter(logging.WARNING + args.quiet*10 - args.verbose*10, "ctlmgr({})".format(platform.node())) console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter( "%(levelname)s:%(source)s:%(name)s:%(message)s")) console_handler.addFilter(source_adder) root_logger.addHandler(console_handler) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) logfwd = LogForwarder(args.server, args.port_logging, args.retry_master) logfwd.addFilter(source_adder) root_logger.addHandler(logfwd) logfwd.start() atexit_register_coroutine(logfwd.stop) ctlmgr = ControllerManager(args.server, args.port_notify, args.retry_master) ctlmgr.start() atexit_register_coroutine(ctlmgr.stop) class CtlMgrRPC: retry_now = ctlmgr.retry_now rpc_target = CtlMgrRPC() rpc_server = Server({"ctlmgr": rpc_target}, builtin_terminate=True) loop.run_until_complete(rpc_server.start(bind_address_from_args(args), args.port_control)) atexit_register_coroutine(rpc_server.stop) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() root_logger = logging.getLogger() root_logger.setLevel(logging.NOTSET) source_adder = SourceFilter( logging.WARNING + args.quiet * 10 - args.verbose * 10, "ctlmgr({})".format(platform.node())) console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("%(levelname)s:%(source)s:%(name)s:%(message)s")) console_handler.addFilter(source_adder) root_logger.addHandler(console_handler) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) logfwd = LogForwarder(args.server, args.port_logging, args.retry_master) logfwd.addFilter(source_adder) root_logger.addHandler(logfwd) logfwd.start() atexit_register_coroutine(logfwd.stop) ctlmgr = ControllerManager(args.server, args.port_notify, args.retry_master) ctlmgr.start() atexit_register_coroutine(ctlmgr.stop) class CtlMgrRPC: retry_now = ctlmgr.retry_now rpc_target = CtlMgrRPC() rpc_server = Server({"ctlmgr": rpc_target}, builtin_terminate=True) loop.run_until_complete( rpc_server.start(bind_address_from_args(args), args.port_control)) atexit_register_coroutine(rpc_server.stop) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() loop = asyncio.get_event_loop() try: get_logs_task = asyncio.ensure_future(get_logs(args.core_addr)) try: server = Server({"corelog": PingTarget()}, None, True) loop.run_until_complete(server.start(bind_address_from_args(args), args.port)) try: multiline_log_config(logging.DEBUG) loop.run_until_complete(server.wait_terminate()) finally: loop.run_until_complete(server.stop()) finally: get_logs_task.cancel() try: loop.run_until_complete(get_logs_task) except asyncio.CancelledError: pass finally: loop.close()
def run_simple_rpc_server(port, setup_args, interface_name, setup_interface): parser = ArgumentParser() influxdb_args(parser) simple_network_args(parser, port) verbosity_args(parser) if setup_args: setup_args(parser) args = parser.parse_args() init_logger(args) loop = asyncio.get_event_loop() atexit.register(loop.close) influx_pusher = influxdb_pusher_from_args(args) if influx_pusher: t = asyncio.ensure_future(influx_pusher.run()) def stop(): t.cancel() try: loop.run_until_complete(t) except asyncio.CancelledError: pass atexit.register(stop) interface = setup_interface(args, influx_pusher, loop) # Provide a default ping() method, which ARTIQ calls regularly for # heartbeating purposes. if not hasattr(interface, "ping"): setattr(interface, "ping", lambda: True) rpc_server = Server({interface_name: interface}, builtin_terminate=True) loop.run_until_complete(rpc_server.start(bind_address_from_args(args), args.port)) atexit_register_coroutine(rpc_server.stop) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() init_logger(args) loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) writer = DBWriter(args.baseurl_db, args.user_db, args.password_db, args.database, args.table) writer.start() atexit.register(lambda: loop.run_until_complete(writer.stop())) filter = Filter(args.pattern_file) rpc_server = Server({"influxdb_filter": filter}, builtin_terminate=True) loop.run_until_complete(rpc_server.start(args.bind, args.bind_port)) atexit.register(lambda: loop.run_until_complete(rpc_server.stop())) reader = MasterReader(args.server_master, args.port_master, args.retry_master, filter._filter, writer) reader.start() atexit.register(lambda: loop.run_until_complete(reader.stop())) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() loop = asyncio.get_event_loop() try: get_logs_task = asyncio.ensure_future(get_logs(args.core_addr)) try: server = Server({"corelog": PingTarget()}, None, True) loop.run_until_complete( server.start(bind_address_from_args(args), args.port)) try: multiline_log_config(logging.TRACE) loop.run_until_complete(server.wait_terminate()) finally: loop.run_until_complete(server.stop()) finally: get_logs_task.cancel() try: loop.run_until_complete(get_logs_task) except asyncio.CancelledError: pass finally: loop.close()
def main(): args = get_argparser().parse_args() init_logger(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) ctlmgr = ControllerManager(args.server, args.port, args.retry_master) ctlmgr.start() atexit.register(lambda: loop.run_until_complete(ctlmgr.stop())) class CtlMgrRPC: retry_now = ctlmgr.retry_now rpc_target = CtlMgrRPC() rpc_server = Server({"ctlmgr": rpc_target}, builtin_terminate=True) loop.run_until_complete(rpc_server.start(args.bind, args.bind_port)) atexit.register(lambda: loop.run_until_complete(rpc_server.stop())) loop.run_until_complete(rpc_server.wait_terminate())
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status }) experiment_db.scan_repository_async() bind = bind_address_from_args(args) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status, "log": log_buffer.data }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker, } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer( { "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository, } ) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher( { "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data, } ) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()
class WandServer: def __init__(self): self.args = args = get_argparser().parse_args() init_logger(args) self.config = load_config(args, "_server") self.lasers = self.config["lasers"].keys() for laser in self.lasers: self.config["lasers"][laser]["lock_ready"] = False # connect to hardware self.wlm = WLM(args.simulation) self.osas = OSAs(self.config["osas"], args.simulation) self.exp_min = self.wlm.get_exposure_min() self.exp_max = self.wlm.get_exposure_max() self.num_ccds = self.wlm.get_num_ccds() if self.config["switch"]["type"] == "internal": self.switch = self.wlm.get_switch() elif self.config["switch"]["type"] == "leoni": self.switch = LeoniSwitch( self.config["switch"]["ip"], args.simulation) else: raise ValueError("Unrecognised switch type: {}".format( self.config["switch"]["type"])) # measurement queue, processed by self.measurement_task self.measurement_ids = task_id_generator() self.measurements_queued = asyncio.Event() self.queue = [] self.wake_locks = {laser: asyncio.Event() for laser in self.lasers} # schedule initial frequency/osa readings all lasers self.measurements_queued.set() for laser in self.lasers: self.queue.append({ "laser": laser, "priority": 0, "expiry": time.time(), "id": next(self.measurement_ids), "get_osa_trace": True, "done": asyncio.Event() }) # "notify" interface self.laser_db = Notifier(self.config["lasers"]) self.freq_db = Notifier({laser: { "freq": None, "status": WLMMeasurementStatus.ERROR, "timestamp": 0 } for laser in self.lasers}) self.osa_db = Notifier({laser: { "trace": None, "timestamp": 0 } for laser in self.lasers}) self.server_notify = Publisher({ "laser_db": self.laser_db, # laser settings "freq_db": self.freq_db, # most recent frequency measurements "osa_db": self.osa_db # most recent osa traces }) # "control" interface self.control_interface = ControlInterface(self) self.server_control = RPCServer({"control": self.control_interface}, allow_parallel=True) self.running = False def start(self): """ Start the server """ self.executor = ThreadPoolExecutor(max_workers=2) self.loop = loop = asyncio.get_event_loop() atexit.register(loop.close) # start control server bind = bind_address_from_args(self.args) loop.run_until_complete( self.server_control.start(bind, self.args.port_control)) atexit_register_coroutine(self.server_control.stop) # start notify server loop.run_until_complete( self.server_notify.start(bind, self.args.port_notify)) atexit_register_coroutine(self.server_notify.stop) asyncio.ensure_future(self.measurement_task()) for laser in self.lasers: asyncio.ensure_future(self.lock_task(laser)) # backup of configuration file backup_config(self.args, "_server") asyncio.ensure_future(regular_config_backup(self.args, "_server")) atexit.register(backup_config, self.args, "_server") logger.info("server started") self.running = True loop.run_forever() async def lock_task(self, laser): conf = self.laser_db.raw_view[laser] # only try to lock lasers with a controller specified if not conf.get("host") or self.args.simulation: return while self.running: conf["lock_ready"] = False try: iface = DLPro(conf["host"], target=conf.get("target", "laser1")) except OSError: logger.warning( "could not connect to laser '{}', retrying in 60s" .format(laser)) if conf["locked"]: self.control_interface.unlock(laser, conf["lock_owner"]) await asyncio.sleep(60) continue self.wake_locks[laser].set() conf["lock_ready"] = True while self.running: if not conf["locked"]: await self.wake_locks[laser].wait() self.wake_locks[laser].clear() continue poll_time = conf["lock_poll_time"] locked_at = conf["locked_at"] timeout = conf["lock_timeout"] set_point = conf["lock_set_point"] gain = conf["lock_gain"]*poll_time capture_range = conf["lock_capture_range"] await asyncio.wait({self.wake_locks[laser].wait()}, timeout=poll_time) self.wake_locks[laser].clear() if timeout is not None and time.time() > (locked_at + timeout): logger.info("'{}'' lock timed out".format(laser)) self.control_interface.unlock(laser, conf["lock_owner"]) await asyncio.sleep(0) continue status, delta, _ = await self.control_interface.get_freq( laser, age=0, priority=5, get_osa_trace=False, blocking=True, mute=False, offset_mode=True) if status != WLMMeasurementStatus.OKAY: continue f_error = delta - set_point V_error = f_error * gain if abs(f_error) > capture_range: logger.warning("'{}'' outside capture range".format(laser)) self.control_interface.unlock(laser, conf["lock_owner"]) await asyncio.sleep(0) continue # don't drive the PZT too far in a single step V_error = min(V_error, 0.25) V_error = max(V_error, -0.25) try: v_pzt = iface.get_pzt_voltage() v_pzt -= V_error if v_pzt > 100 or v_pzt < 25: logger.warning("'{}'' lock railed".format(laser)) self.control_interface.unlock(laser, conf["lock_owner"]) await asyncio.sleep(0) continue iface.set_pzt_voltage(v_pzt) except OSError: logger.warning("Connection to laser '{}' lost" .format(laser)) self.control_interface.unlock(laser, conf["lock_owner"]) await asyncio.sleep(0) break try: iface.close() except Exception: pass finally: conf["lock_ready"] = False async def measurement_task(self): """ Process queued measurements """ active_laser = "" while True: if self.queue == []: self.measurements_queued.clear() await self.measurements_queued.wait() # process in order of priority, followed by submission time priorities = [meas["priority"] for meas in self.queue] meas = self.queue[priorities.index(max(priorities))] laser = meas["laser"] laser_conf = self.laser_db.raw_view[laser] if laser != active_laser: self.switch.set_active_channel(laser_conf["channel"]) # Switching is slow so we might as well take an OSA trace! meas["get_osa_trace"] = True active_laser = meas["laser"] await asyncio.sleep(self.config["switch"]["dead_time"]) exposure = laser_conf["exposure"] for ccd, exp in enumerate(exposure): self.wlm.set_exposure(exposure[ccd], ccd) freq_measurement = self.loop.run_in_executor( self.executor, self.take_freq_measurement, laser, laser_conf["f_ref"]) osa_measurement = self.loop.run_in_executor( self.executor, self.take_osa_measurement, laser, laser_conf["osa"], meas["get_osa_trace"]) wlm_data, osa = (await asyncio.gather(freq_measurement, osa_measurement))[:] freq, peaks = wlm_data self.freq_db[laser] = freq if meas["get_osa_trace"]: self.osa_db[laser] = osa # fast mode timeout if laser_conf["fast_mode"]: t_en = laser_conf["fast_mode_set_at"] if time.time() > (t_en + self.args.fast_mode_timeout): self.laser_db[laser]["fast_mode"] = False self.save_config_file() logger.info("{} fast mode timeout".format(laser)) # auto-exposure if laser_conf["auto_exposure"]: new_exp = laser_conf["exposure"] for ccd, peak in enumerate(peaks): # don't try to find a suitable exposure for lasers that # aren't on! if peak < 0.05: break if not (0.4 < peak < 0.6): exp = laser_conf["exposure"][ccd] new_exp[ccd] = exp + 1 if peak < 0.4 else exp - 1 new_exp[ccd] = min(new_exp[ccd], self.exp_max[ccd]) new_exp[ccd] = max(new_exp[ccd], self.exp_min[ccd]) if new_exp != exp: self.laser_db[laser]["exposure"] = new_exp self.save_config_file() # check which other measurements wanted this data for task in self.queue: if task["laser"] == laser \ and (meas["get_osa_trace"] or not task["get_osa_trace"]): task["done"].set() self.queue.remove(task) logger.info("task {} complete".format(task["id"])) def take_freq_measurement(self, laser, f0): """ Preform a single frequency measurement """ logger.info("Taking new frequency measurement for {}".format(laser)) status, freq = self.wlm.get_frequency() freq = { "freq": freq, "status": int(status), "timestamp": time.time() } # make simulation data more interesting! if self.args.simulation: freq["freq"] = f0 + np.random.normal(loc=0, scale=10e6) peaks = [self.wlm.get_fringe_peak(ccd) for ccd in range(self.num_ccds)] return freq, peaks def take_osa_measurement(self, laser, osa, get_osa_trace): """ Capture an osa trace """ if not get_osa_trace: return {} osa = {"trace": self.osas.get_trace(osa).tolist(), "timestamp": time.time() } return osa def save_config_file(self): self.config["lasers"] = self.laser_db.raw_view config_path, _ = get_config_path(self.args, "_server") pyon.store_file(config_path, self.config)
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) print("ARTIQ master is now ready.") loop.run_forever()