def setUp(self): self.ddb_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".py", delete=False) print(DUMMY_DDB_FILE, file=self.ddb_file, flush=True) self.ddb = DeviceDB(self.ddb_file.name)
def main(): args = get_argparser().parse_args() init_logger(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: comm = device_mgr.get("comm") dump = comm.get_analyzer_dump() decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump) finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("core").comm comm.check_system_info() if args.action == "read": value = comm.flash_storage_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) elif args.action == "write": for key, value in args.string: comm.flash_storage_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: comm.flash_storage_write(key, fi.read()) elif args.action == "delete": for key in args.key: comm.flash_storage_remove(key) elif args.action == "erase": comm.flash_storage_erase() finally: device_mgr.close_devices()
def main(): if len(sys.argv) > 1 and sys.argv[1] == "+diag": del sys.argv[1] diag = True else: diag = False if len(sys.argv) > 1 and sys.argv[1] == "+compile": del sys.argv[1] compile_only = True else: compile_only = False ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py") dmgr = DeviceManager(DeviceDB(ddb_path)) with open(sys.argv[1]) as f: testcase_code = compile(f.read(), f.name, "exec") testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr} exec(testcase_code, testcase_vars) try: core = dmgr.get("core") if compile_only: core.compile(testcase_vars["entrypoint"], (), {}) else: core.run(testcase_vars["entrypoint"], (), {}) print(core.comm.get_log()) core.comm.clear_log() except CompileError as error: if not diag: exit(1)
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() except CompileError as error: return except Exception as exn: if hasattr(exn, "artiq_core_exception"): print(exn.artiq_core_exception, file=sys.stderr) raise exn finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB(os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db)
def main(): args = get_argparser().parse_args() device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") if args.action == "log": print(comm.get_log()) elif args.action == "cfg-read": value = comm.flash_storage_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) elif args.action == "cfg-write": for key, value in args.string: comm.flash_storage_write(key, value) for key, filename in args.file: with open(filename, "rb") as fi: comm.flash_storage_write(key, fi.read()) elif args.action == "cfg-delete": for key in args.key: comm.flash_storage_remove(key) elif args.action == "cfg-erase": comm.flash_storage_erase() finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() common_args.init_logger_from_args(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: core_addr = device_mgr.get_desc("core")["arguments"]["host"] dump = get_analyzer_dump(core_addr) decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump, uniform_interval=args.vcd_uniform_interval) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump)
def main(): args = get_argparser().parse_args() log_buffer = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(lambda: loop.close()) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit.register(lambda: loop.run_until_complete(dataset_db.stop())) if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) repository = Repository(repo_backend, device_db.get_device_db, log_worker) atexit.register(repository.close) repository.scan_async() worker_handlers = { "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "log": log_worker } scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend) worker_handlers["scheduler_submit"] = scheduler.submit scheduler.start() atexit.register(lambda: loop.run_until_complete(scheduler.stop())) server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_repository": repository }) loop.run_until_complete(server_control.start(args.bind, args.port_control)) atexit.register(lambda: loop.run_until_complete(server_control.stop())) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": repository.explist, "log": log_buffer.data }) loop.run_until_complete(server_notify.start(args.bind, args.port_notify)) atexit.register(lambda: loop.run_until_complete(server_notify.stop())) server_logging = LoggingServer() loop.run_until_complete(server_logging.start(args.bind, args.port_logging)) atexit.register(lambda: loop.run_until_complete(server_logging.stop())) loop.run_forever()
def main(): args = get_argparser().parse_args() common_args.init_logger_from_args(args) if args.device is None: ddb = DeviceDB(args.device_db) core_addr = ddb.get("core", resolve_alias=True)["arguments"]["host"] else: core_addr = args.device mgmt = CommMgmt(core_addr) if args.tool == "log": if args.action == "set_level": mgmt.set_log_level(args.level) if args.action == "set_uart_level": mgmt.set_uart_log_level(args.level) if args.action == "clear": mgmt.clear_log() if args.action == None: print(mgmt.get_log(), end="") if args.tool == "config": if args.action == "read": value = mgmt.config_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) if args.action == "write": for key, value in args.string: mgmt.config_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: mgmt.config_write(key, fi.read()) if args.action == "remove": for key in args.key: mgmt.config_remove(key) if args.action == "erase": mgmt.config_erase() if args.tool == "reboot": mgmt.reboot() if args.tool == "debug": if args.action == "allocator": mgmt.debug_allocator()
def main(): device_mgr = DeviceManager(DeviceDB("device_db.py")) try: experiment = SinaraTester((device_mgr, None, None, None)) experiment.prepare() experiment.run() experiment.analyze() finally: device_mgr.close_devices()
def test_no_device_db_in_file(self): with tempfile.NamedTemporaryFile(mode="w+", suffix=".py", delete=False) as f: print(f.name) print("", file=f, flush=True) with self.assertRaisesRegex(KeyError, "device_db"): DeviceDB(f.name) os.unlink(f.name)
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") comm.check_system_info() print(comm.get_log(), end="") finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) try: if args.action == "allocator": mgmt.debug_allocator() else: print("An action needs to be specified.", file=sys.stderr) sys.exit(1) finally: mgmt.close()
def test_import_same_level(self): with tempfile.TemporaryDirectory() as tmpdir: # make sure both files land in the same directory # tempfiles are cleanup together with tmpdir args = dict(mode="w+", suffix=".py", dir=tmpdir, delete=False) with tempfile.NamedTemporaryFile( **args) as fileA, tempfile.NamedTemporaryFile( **args) as fileB: print(DUMMY_DDB_FILE, file=fileA, flush=True) print( f""" from {Path(fileA.name).stem} import device_db device_db["new_core_alias"] = "core" """, file=fileB, flush=True, ) ddb = DeviceDB(fileB.name) self.assertEqual( ddb.get("new_core_alias", resolve_alias=True), DeviceDB(fileA.name).get("core"), )
def main(): args = get_argparser().parse_args() init_logger(args) core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) try: if args.action == "reboot": mgmt.reboot() elif args.action == "hotswap": mgmt.hotswap(args.image.read()) else: print("An action needs to be specified.", file=sys.stderr) sys.exit(1) finally: mgmt.close()
def main(): args = get_argparser().parse_args() init_logger(args) core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) try: if args.action == "set_level": mgmt.set_log_level(args.level) elif args.action == "set_uart_level": mgmt.set_uart_log_level(args.level) elif args.action == "clear": mgmt.clear_log() else: print(mgmt.get_log(), end="") finally: mgmt.close()
def main(): available_tests = SinaraTester.available_tests() args = get_argparser(available_tests).parse_args() if args.exclude is not None: # don't use set in order to keep the order tests = [test for test in available_tests if test not in args.exclude] elif args.only is not None: tests = args.only else: tests = available_tests device_mgr = DeviceManager(DeviceDB(args.device_db)) try: experiment = SinaraTester((device_mgr, None, None, None)) experiment.prepare() experiment.run(tests) experiment.analyze() finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) try: if args.action == "start": mgmt.start_profiler(args.interval, args.hits_size, args.edges_size) elif args.action == "stop": mgmt.stop_profiler() elif args.action == "save": hits, edges = mgmt.get_profile() writer = CallgrindWriter(args.output, args.firmware, not args.no_compression) writer.header() for addr, count in hits.items(): writer.hit(addr, count) for (caller, callee), count in edges.items(): writer.edge(caller, callee, count) finally: mgmt.close()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file, prefix="artiq_run_") exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) argument_mgr = ProcessArgumentManager(arguments) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) if not hasattr(exp.run, "artiq_embedded"): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.artiq_embedded.core_name core = getattr(exp_inst, core_name) object_map, kernel_library, _, _ = \ core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) except CompileError as error: return finally: device_mgr.close_devices() if object_map.has_rpc(): raise ValueError("Experiment must not use RPC") output = args.output if output is None: basename, ext = os.path.splitext(args.file) output = "{}.elf".format(basename) with open(output, "wb") as f: f.write(kernel_library)
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file) exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) exp_inst = exp(device_mgr, dataset_mgr, **arguments) if (not hasattr(exp.run, "k_function_info") or not exp.run.k_function_info): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.k_function_info.core_name core = getattr(exp_inst, core_name) binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function, [exp_inst], {}, with_attr_writeback=False) finally: device_mgr.close_devices() if rpc_map: raise ValueError("Experiment must not use RPC") output = args.output if output is None: output = args.file if output.endswith(".py"): output = output[:-3] output += ".elf" with open(output, "wb") as f: f.write(binary)
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
class TestDeviceDB(unittest.TestCase): def setUp(self): self.ddb_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".py", delete=False) print(DUMMY_DDB_FILE, file=self.ddb_file, flush=True) self.ddb = DeviceDB(self.ddb_file.name) def tearDown(self): self.ddb_file.close() os.unlink(self.ddb_file.name) def test_get(self): core = self.ddb.get("core") self.assertEqual(core["class"], "Core") def test_get_alias(self): with self.assertRaises(TypeError): # str indexing on str self.ddb.get("core_alias")["class"] self.assertEqual(self.ddb.get("core_alias", resolve_alias=True), self.ddb.get("core")) def test_get_unresolved_alias(self): with self.assertRaisesRegex(KeyError, "dummy"): self.ddb.get("unresolved_alias", resolve_alias=True) def test_update(self): with self.assertRaises(KeyError): self.ddb.get("core_log") update = """ device_db["core_log"] = { "type": "controller", "host": "::1", "port": 1068, "command": "aqctl_corelog -p {port} --bind {bind} ::1", }""" print(update, file=self.ddb_file, flush=True) self.ddb.scan() self.assertEqual(self.ddb.get("core_log")["type"], "controller") def test_get_ddb(self): ddb = self.ddb.get_device_db() raw = file_import(self.ddb_file.name).device_db self.assertEqual(ddb, raw)
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) if args.tool == "log": if args.action == "set_level": mgmt.set_log_level(args.level) if args.action == "set_uart_level": mgmt.set_uart_log_level(args.level) if args.action == "clear": mgmt.clear_log() if args.action == None: print(mgmt.get_log(), end="") if args.tool == "config": if args.action == "read": value = mgmt.config_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) if args.action == "write": for key, value in args.string: mgmt.config_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: mgmt.config_write(key, fi.read()) if args.action == "remove": for key in args.key: mgmt.config_remove(key) if args.action == "erase": mgmt.config_erase() if args.tool == "reboot": mgmt.reboot() if args.tool == "hotswap": mgmt.hotswap(args.image.read()) if args.tool == "profile": if args.action == "start": mgmt.start_profiler(args.interval, args.hits_size, args.edges_size) elif args.action == "stop": mgmt.stop_profiler() elif args.action == "save": hits, edges = mgmt.get_profile() writer = CallgrindWriter(args.output, args.firmware, "or1k-linux", args.compression, args.demangle) writer.header() for addr, count in hits.items(): writer.hit(addr, count) for (caller, callee), count in edges.items(): writer.edge(caller, callee, count) if args.tool == "debug": if args.action == "allocator": mgmt.debug_allocator() finally: device_mgr.close_devices()
def get_device_db(self): return DeviceDB(os.path.join(artiq_root, "device_db.pyon"))
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) atexit.register(loop.close) signal_handler = SignalHandler() signal_handler.setup() atexit.register(signal_handler.teardown) bind = common_args.bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB( repo_backend, worker_handlers, args.experiment_subdir) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db, args.log_submissions) scheduler.start() atexit_register_coroutine(scheduler.stop) config = MasterConfig(args.name) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "scheduler_check_termination": scheduler.check_termination, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_config": config, "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) print("ARTIQ master is now ready.") loop.run_until_complete(signal_handler.wait_terminate())
class ControllerCase(unittest.TestCase): def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.device_mgr = DeviceManager(self.device_db) self.controllers = {} def tearDown(self): self.device_mgr.close_devices() for name in list(self.controllers): self.stop_controller(name) def start_controller(self, name, sleep=1): if name in self.controllers: raise ValueError("controller `{}` already started".format(name)) try: entry = self.device_db.get(name) except KeyError: raise unittest.SkipTest( "controller `{}` not found".format(name)) entry["command"] = entry["command"].format( name=name, bind=entry["host"], port=entry["port"]) proc = subprocess.Popen(shlex.split(entry["command"])) self.controllers[name] = entry, proc time.sleep(sleep) def stop_controller(self, name, default_timeout=1): desc, proc = self.controllers[name] t = desc.get("term_timeout", default_timeout) target_name = desc.get("target_name", None) if target_name is None: target_name = AutoTarget try: try: client = Client(desc["host"], desc["port"], target_name, t) try: client.terminate() finally: client.close_rpc() proc.wait(t) return except (socket.timeout, subprocess.TimeoutExpired): logger.warning("Controller %s failed to exit on request", name) try: proc.terminate() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to exit on terminate", name) try: proc.kill() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to die on kill", name) finally: del self.controllers[name]
def main(): args = get_argparser().parse_args() log_forwarder = init_log(args) if os.name == "nt": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() atexit.register(loop.close) bind = bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop) log_forwarder.callback = (lambda msg: server_broadcast.broadcast("log", msg)) def ccb_issue(service, *args, **kwargs): msg = { "service": service, "args": args, "kwargs": kwargs } server_broadcast.broadcast("ccb", msg) device_db = DeviceDB(args.device_db) dataset_db = DatasetDB(args.dataset_db) dataset_db.start() atexit_register_coroutine(dataset_db.stop) worker_handlers = dict() if args.git: repo_backend = GitBackend(args.repository) else: repo_backend = FilesystemBackend(args.repository) experiment_db = ExperimentDB(repo_backend, worker_handlers) atexit.register(experiment_db.close) scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db) scheduler.start() atexit_register_coroutine(scheduler.stop) worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, "scheduler_get_status": scheduler.get_status, "scheduler_check_pause": scheduler.check_pause, "ccb_issue": ccb_issue, }) experiment_db.scan_repository_async() server_control = RPCServer({ "master_device_db": device_db, "master_dataset_db": dataset_db, "master_schedule": scheduler, "master_experiment_db": experiment_db }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) atexit_register_coroutine(server_control.stop) server_notify = Publisher({ "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, "explist": experiment_db.explist, "explist_status": experiment_db.status }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) atexit_register_coroutine(server_notify.stop) server_logging = LoggingServer() loop.run_until_complete(server_logging.start( bind, args.port_logging)) atexit_register_coroutine(server_logging.stop) logger.info("running, bound to %s", bind) loop.run_forever()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.device_mgr = DeviceManager(self.device_db) self.controllers = {}