def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") comm.check_ident() if args.action == "read": value = comm.flash_storage_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) elif args.action == "write": for key, value in args.string: comm.flash_storage_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: comm.flash_storage_write(key, fi.read()) elif args.action == "delete": for key in args.key: comm.flash_storage_remove(key) elif args.action == "erase": comm.flash_storage_erase() finally: device_mgr.close_devices()
class ExperimentCase(unittest.TestCase): def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB(os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db) def execute(self, cls, **kwargs): expid = { "file": sys.modules[cls.__module__].__file__, "class_name": cls.__name__, "arguments": kwargs } self.device_mgr.virtual_devices["scheduler"].expid = expid try: try: exp = cls(self.device_mgr, self.dataset_mgr, **kwargs) except KeyError as e: # skip if ddb does not match requirements raise unittest.SkipTest(*e.args) exp.prepare() exp.run() exp.analyze() return exp finally: self.device_mgr.close_devices()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB(os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db)
class ExperimentCase(unittest.TestCase): def setUp(self): self.ddb = FlatFileDB(os.path.join(artiq_root, "ddb.pyon")) self.dmgr = DeviceManager(self.ddb, virtual_devices={"scheduler": DummyScheduler()}) self.pdb = FlatFileDB(os.path.join(artiq_root, "pdb.pyon")) self.rdb = ResultDB() def execute(self, cls, **kwargs): expid = { "file": sys.modules[cls.__module__].__file__, "class_name": cls.__name__, "arguments": kwargs } self.dmgr.virtual_devices["scheduler"].expid = expid try: try: exp = cls(self.dmgr, self.pdb, self.rdb, **kwargs) except KeyError as e: # skip if ddb does not match requirements raise unittest.SkipTest(*e.args) exp.prepare() exp.run() exp.analyze() return exp finally: self.dmgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: core_addr = device_mgr.get_desc("core")["arguments"]["host"] dump = get_analyzer_dump(core_addr) decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump, uniform_interval=args.vcd_uniform_interval) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump)
def main(): args = get_argparser().parse_args() device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") if args.action == "log": print(comm.get_log()) elif args.action == "cfg-read": value = comm.flash_storage_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) elif args.action == "cfg-write": for key, value in args.string: comm.flash_storage_write(key, value) for key, filename in args.file: with open(filename, "rb") as fi: comm.flash_storage_write(key, fi.read()) elif args.action == "cfg-delete": for key in args.key: comm.flash_storage_remove(key) elif args.action == "cfg-erase": comm.flash_storage_erase() finally: device_mgr.close_devices()
def main(): if len(sys.argv) > 1 and sys.argv[1] == "+diag": del sys.argv[1] diag = True else: diag = False if len(sys.argv) > 1 and sys.argv[1] == "+compile": del sys.argv[1] compile_only = True else: compile_only = False ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.pyon") dmgr = DeviceManager(DeviceDB(ddb_path)) with open(sys.argv[1]) as f: testcase_code = compile(f.read(), f.name, "exec") testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr} exec(testcase_code, testcase_vars) try: core = dmgr.get("core") if compile_only: core.compile(testcase_vars["entrypoint"], (), {}) else: core.run(testcase_vars["entrypoint"], (), {}) print(core.comm.get_log()) core.comm.clear_log() except CompileError as error: if not diag: exit(1)
def main(): args = get_argparser().parse_args() init_logger(args) dmgr = DeviceManager(FlatFileDB(args.ddb)) pdb = FlatFileDB(args.pdb) try: module = file_import(args.file) exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) exp_inst = exp(dmgr, pdb, **arguments) if not hasattr(exp.run, "k_function_info") or not exp.run.k_function_info: raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.k_function_info.core_name core = getattr(exp_inst, core_name) binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function, [exp_inst], {}, with_attr_writeback=False) finally: dmgr.close_devices() if rpc_map: raise ValueError("Experiment must not use RPC") output = args.output if output is None: output = args.file if output.endswith(".py"): output = output[:-3] output += ".elf" with open(output, "wb") as f: f.write(binary)
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() except CompileError as error: return except Exception as exn: if hasattr(exn, "artiq_core_exception"): print(exn.artiq_core_exception, file=sys.stderr) raise exn finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) dmgr = DeviceManager(FlatFileDB(args.ddb), virtual_devices={"scheduler": DummyScheduler()}) pdb = FlatFileDB(args.pdb) pdb.hooks.append(SimpleParamLogger()) rdb = ResultDB() try: exp_inst = _build_experiment(dmgr, pdb, rdb, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() finally: dmgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: rdb.write_hdf5(f) elif rdb.rt.read or rdb.nrt: r = chain(rdb.rt.read.items(), rdb.nrt.items()) for k, v in sorted(r, key=itemgetter(0)): print("{}: {}".format(k, v))
def main(): args = get_argparser().parse_args() common_args.init_logger_from_args(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: core_addr = device_mgr.get_desc("core")["arguments"]["host"] dump = get_analyzer_dump(core_addr) decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump, uniform_interval=args.vcd_uniform_interval) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump)
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("core").comm comm.check_system_info() if args.action == "read": value = comm.flash_storage_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) elif args.action == "write": for key, value in args.string: comm.flash_storage_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: comm.flash_storage_write(key, fi.read()) elif args.action == "delete": for key in args.key: comm.flash_storage_remove(key) elif args.action == "erase": comm.flash_storage_erase() finally: device_mgr.close_devices()
class ExperimentCase(unittest.TestCase): def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB(os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager(self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db) def create(self, cls, **kwargs): try: exp = cls(self.device_mgr, self.dataset_mgr, **kwargs) exp.prepare() return exp except KeyError as e: # skip if ddb does not match requirements raise unittest.SkipTest(*e.args) def execute(self, cls, *args, **kwargs): expid = { "file": sys.modules[cls.__module__].__file__, "class_name": cls.__name__, "arguments": kwargs } self.device_mgr.virtual_devices["scheduler"].expid = expid try: exp = self.create(cls, **kwargs) exp.run() exp.analyze() return exp except CompileError as error: # Reduce amount of text on terminal. raise error from None finally: self.device_mgr.close_devices()
def main(): if len(sys.argv) > 1 and sys.argv[1] == "+diag": del sys.argv[1] diag = True else: diag = False if len(sys.argv) > 1 and sys.argv[1] == "+compile": del sys.argv[1] compile_only = True else: compile_only = False ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py") dmgr = DeviceManager(DeviceDB(ddb_path)) with open(sys.argv[1]) as f: testcase_code = compile(f.read(), f.name, "exec") testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr} exec(testcase_code, testcase_vars) try: core = dmgr.get("core") if compile_only: core.compile(testcase_vars["entrypoint"], (), {}) else: core.run(testcase_vars["entrypoint"], (), {}) print(core.comm.get_log()) core.comm.clear_log() except CompileError as error: if not diag: exit(1)
def main(): args = get_argparser().parse_args() init_logger(args) if not args.print_decoded and args.write_vcd is None and args.write_dump is None: print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: comm = device_mgr.get("comm") dump = comm.get_analyzer_dump() decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump) finally: device_mgr.close_devices()
def main(): sys.stdout = sys.stderr = LogForwarder() start_time = None rid = None expid = None exp = None exp_inst = None dmgr = DeviceManager(ParentDDB, virtual_devices={"scheduler": Scheduler()}) rdb = ResultDB() rdb.rt.publish = update_rt_results try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository expf = os.path.join(obj["wd"], expid["file"]) else: expf = expid["file"] exp = get_exp(expf, expid["class_name"]) dmgr.virtual_devices["scheduler"].set_run_info( obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp(dmgr, ParentPDB, rdb, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: rdb.write_hdf5(f) if "repo_rev" in expid: rr = expid["repo_rev"] dtype = "S{}".format(len(rr)) dataset = f.create_dataset("repo_rev", (), dtype) dataset[()] = rr.encode() finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(DummyDMGR(), DummyPDB(), ResultDB(), obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break finally: dmgr.close_devices()
def main(): device_mgr = DeviceManager(DeviceDB("device_db.py")) try: experiment = SinaraTester((device_mgr, None, None, None)) experiment.prepare() experiment.run() experiment.analyze() finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") comm.check_system_info() print(comm.get_log(), end="") finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: comm = device_mgr.get("comm") comm.check_ident() print(comm.get_log(), end="") finally: device_mgr.close_devices()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB( os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db)
class ExperimentCase(unittest.TestCase): def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.py")) self.dataset_db = DatasetDB( os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db) def tearDown(self): self.device_mgr.close_devices() def create(self, cls, *args, **kwargs): try: exp = cls( (self.device_mgr, self.dataset_mgr, None, {}), *args, **kwargs) except DeviceError as e: # skip if ddb does not match requirements raise unittest.SkipTest( "test device not available: `{}`".format(*e.args)) exp.prepare() return exp def execute(self, cls, *args, **kwargs): expid = { "file": sys.modules[cls.__module__].__file__, "class_name": cls.__name__, "arguments": dict() } self.device_mgr.virtual_devices["scheduler"].expid = expid try: exp = self.create(cls, *args, **kwargs) exp.run() exp.analyze() return exp except CompileError as error: # Reduce amount of text on terminal. raise error from None except Exception as exn: if hasattr(exn, "artiq_core_exception"): exn.args = "{}\n{}".format(exn.args[0], exn.artiq_core_exception), raise exn
class ExperimentCase(unittest.TestCase): def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB( os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db) def tearDown(self): self.device_mgr.close_devices() def create(self, cls, *args, **kwargs): try: exp = cls( (self.device_mgr, self.dataset_mgr, None), *args, **kwargs) exp.prepare() return exp except KeyError as e: # skip if ddb does not match requirements raise unittest.SkipTest( "device_db entry `{}` not found".format(*e.args)) def execute(self, cls, *args, **kwargs): expid = { "file": sys.modules[cls.__module__].__file__, "class_name": cls.__name__, "arguments": dict() } self.device_mgr.virtual_devices["scheduler"].expid = expid try: exp = self.create(cls, *args, **kwargs) exp.run() exp.analyze() return exp except CompileError as error: # Reduce amount of text on terminal. raise error from None except Exception as exn: if hasattr(exn, "artiq_core_exception"): exn.args = "{}\n{}".format(exn.args[0], exn.artiq_core_exception), raise exn
def main(): available_tests = SinaraTester.available_tests() args = get_argparser(available_tests).parse_args() if args.exclude is not None: # don't use set in order to keep the order tests = [test for test in available_tests if test not in args.exclude] elif args.only is not None: tests = args.only else: tests = available_tests device_mgr = DeviceManager(DeviceDB(args.device_db)) try: experiment = SinaraTester((device_mgr, None, None, None)) experiment.prepare() experiment.run(tests) experiment.analyze() finally: device_mgr.close_devices()
def main(): args = get_argparser().parse_args() init_logger(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): print("No action selected, use -p, -w and/or -d. See -h for help.") sys.exit(1) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: if args.read_dump: with open(args.read_dump, "rb") as f: dump = f.read() else: comm = device_mgr.get("comm") dump = comm.get_analyzer_dump() decoded_dump = decode_dump(dump) if args.print_decoded: print("Log channel:", decoded_dump.log_channel) print("DDS one-hot:", decoded_dump.dds_onehot_sel) for message in decoded_dump.messages: print(message) if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), decoded_dump) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump) finally: device_mgr.close_devices()
def setUp(self): self.dataset_db = MockDatasetDB() self.dataset_mgr = DatasetManager(self.dataset_db) self.device_db = MockDeviceDB() self.ccb = unittest.mock.Mock() self.core = unittest.mock.Mock() self.scheduler = MockScheduler() self.device_mgr = DeviceManager(self.device_db, virtual_devices={ "ccb": self.ccb, "core": self.core, "scheduler": self.scheduler })
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file, prefix="artiq_run_") exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) argument_mgr = ProcessArgumentManager(arguments) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) if not hasattr(exp.run, "artiq_embedded"): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.artiq_embedded.core_name core = getattr(exp_inst, core_name) object_map, kernel_library, _, _ = \ core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) except CompileError as error: return finally: device_mgr.close_devices() if object_map.has_rpc(): raise ValueError("Experiment must not use RPC") output = args.output if output is None: basename, ext = os.path.splitext(args.file) output = "{}.elf".format(basename) with open(output, "wb") as f: f.write(kernel_library)
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
def main(): args = get_argparser().parse_args() init_logger(args) dmgr = DeviceManager(FlatFileDB(args.ddb)) pdb = FlatFileDB(args.pdb) try: module = file_import(args.file) exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) exp_inst = exp(dmgr, pdb, **arguments) if (not hasattr(exp.run, "k_function_info") or not exp.run.k_function_info): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.k_function_info.core_name core = getattr(exp_inst, core_name) binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function, [exp_inst], {}, with_attr_writeback=False) finally: dmgr.close_devices() if rpc_map: raise ValueError("Experiment must not use RPC") output = args.output if output is None: output = args.file if output.endswith(".py"): output = output[:-3] output += ".elf" with open(output, "wb") as f: f.write(binary)
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB) device_mgr.virtual_devices["scheduler"] = Scheduler(device_mgr) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) dirname = os.path.join("results", time.strftime("%Y-%m-%d", start_time), time.strftime("%H", start_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f.create_group("datasets")) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = int(time.mktime(start_time)) f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ParentDatasetDB, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except Exception as exc: # When we get CompileError, a more suitable diagnostic has already # been printed. if not isinstance(exc, CompileError): short_exc_info = type(exc).__name__ exc_str = str(exc) if exc_str: short_exc_info += ": " + exc_str.splitlines()[0] lines = ["Terminating with exception ("+short_exc_info+")\n"] if hasattr(exc, "artiq_core_exception"): lines.append(str(exc.artiq_core_exception)) if hasattr(exc, "parent_traceback"): lines += exc.parent_traceback lines += traceback.format_exception_only(type(exc), exc) logging.error("".join(lines).rstrip(), exc_info=not hasattr(exc, "parent_traceback")) put_object({"action": "exception"}) finally: device_mgr.close_devices() ipc.close()
def setUp(self): self.ddb = FlatFileDB(os.path.join(artiq_root, "ddb.pyon")) self.dmgr = DeviceManager( self.ddb, virtual_devices={"scheduler": DummyScheduler()}) self.pdb = FlatFileDB(os.path.join(artiq_root, "pdb.pyon")) self.rdb = ResultDB()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler()}) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp( device_mgr, dataset_mgr, enable_processors=True, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: dataset_mgr.write_hdf5(f) string_to_hdf5(f, "artiq_version", artiq_version) if "repo_rev" in expid: string_to_hdf5(f, "repo_rev", expid["repo_rev"]) finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, DummyDatasetMgr, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except Exception as exc: # When we get CompileError, a more suitable diagnostic has already # been printed. if not isinstance(exc, CompileError): short_exc_info = type(exc).__name__ exc_str = str(exc) if exc_str: short_exc_info += ": " + exc_str lines = ["Terminating with exception ("+short_exc_info+")\n"] lines += traceback.format_exception_only(type(exc), exc) if hasattr(exc, "parent_traceback"): lines += exc.parent_traceback logging.error("".join(lines).rstrip(), exc_info=not hasattr(exc, "parent_traceback")) put_object({"action": "exception"}) finally: device_mgr.close_devices() ipc.close()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None run_time = None rid = None expid = None exp = None exp_inst = None repository_path = None def write_results(): filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={ "scheduler": Scheduler(), "ccb": CCB() }) dataset_mgr = DatasetManager(ParentDatasetDB) import_cache.install_hook() try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.time() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_experiment(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) rootdir = os.path.join(os.path.expanduser("~"), "data") dirname = os.path.join( rootdir, time.strftime("%Y-%m-%d", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) put_completed() elif action == "prepare": exp_inst.prepare() put_completed() elif action == "run": current_time = datetime.datetime.now().strftime("%H%M_%S") run_time = time.time() try: exp_inst.run() except: # Only write results in run() on failure; on success wait # for end of analyze stage. write_results() raise put_completed() elif action == "analyze": try: exp_inst.analyze() except: # make analyze failure non-fatal, as we may still want to # write results afterwards put_exception_report() else: put_object({"action": "completed"}) elif action == "write_results": if hasattr(exp_inst, "archive"): if not exp_inst.archive: put_object({"action": "completed"}) continue path = os.path.join(dirname, exp.__name__) if not os.path.exists(path): os.mkdir(path) if hasattr(exp_inst, "filename"): filename = list(exp_inst.filename.values())[0] else: filename = "raw-data_{}.h5".format(current_time) file_ = os.path.join(path, filename) with h5py.File(file_, "a") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) put_completed() elif action == "terminate": break except: put_exception_report() finally: device_mgr.close_devices() ipc.close()
class GenericControllerCase(unittest.TestCase): def get_device_db(self): raise NotImplementedError def setUp(self): self.device_db = self.get_device_db() self.device_mgr = DeviceManager(self.device_db) self.controllers = {} def tearDown(self): self.device_mgr.close_devices() for name in list(self.controllers): self.stop_controller(name) def start_controller(self, name, sleep=1): if name in self.controllers: raise ValueError("controller `{}` already started".format(name)) try: entry = self.device_db.get(name) except KeyError: raise unittest.SkipTest( "controller `{}` not found".format(name)) entry["command"] = entry["command"].format( name=name, bind=entry["host"], port=entry["port"]) proc = subprocess.Popen(shlex.split(entry["command"])) self.controllers[name] = entry, proc time.sleep(sleep) def stop_controller(self, name, default_timeout=1): desc, proc = self.controllers[name] t = desc.get("term_timeout", default_timeout) target_name = desc.get("target_name", None) if target_name is None: target_name = AutoTarget try: try: client = Client(desc["host"], desc["port"], target_name, t) try: client.terminate() finally: client.close_rpc() proc.wait(t) return except (socket.timeout, subprocess.TimeoutExpired): logger.warning("Controller %s failed to exit on request", name) try: proc.terminate() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to exit on terminate", name) try: proc.kill() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to die on kill", name) finally: del self.controllers[name]
def setUp(self): self.ddb = FlatFileDB(os.path.join(artiq_root, "ddb.pyon")) self.dmgr = DeviceManager(self.ddb, virtual_devices={"scheduler": DummyScheduler()}) self.pdb = FlatFileDB(os.path.join(artiq_root, "pdb.pyon")) self.rdb = ResultDB()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) try: core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] mgmt = CommMgmt(core_addr) if args.tool == "log": if args.action == "set_level": mgmt.set_log_level(args.level) if args.action == "set_uart_level": mgmt.set_uart_log_level(args.level) if args.action == "clear": mgmt.clear_log() if args.action == None: print(mgmt.get_log(), end="") if args.tool == "config": if args.action == "read": value = mgmt.config_read(args.key) if not value: print("Key {} does not exist".format(args.key)) else: print(value) if args.action == "write": for key, value in args.string: mgmt.config_write(key, value.encode("utf-8")) for key, filename in args.file: with open(filename, "rb") as fi: mgmt.config_write(key, fi.read()) if args.action == "remove": for key in args.key: mgmt.config_remove(key) if args.action == "erase": mgmt.config_erase() if args.tool == "reboot": mgmt.reboot() if args.tool == "hotswap": mgmt.hotswap(args.image.read()) if args.tool == "profile": if args.action == "start": mgmt.start_profiler(args.interval, args.hits_size, args.edges_size) elif args.action == "stop": mgmt.stop_profiler() elif args.action == "save": hits, edges = mgmt.get_profile() writer = CallgrindWriter(args.output, args.firmware, "or1k-linux", args.compression, args.demangle) writer.header() for addr, count in hits.items(): writer.hit(addr, count) for (caller, callee), count in edges.items(): writer.edge(caller, callee, count) if args.tool == "debug": if args.action == "allocator": mgmt.debug_allocator() finally: device_mgr.close_devices()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None run_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler(), "ccb": CCB()}) dataset_mgr = DatasetManager(ParentDatasetDB) import_cache.install_hook() try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.time() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) dirname = os.path.join("results", time.strftime("%Y-%m-%d", start_local_time), time.strftime("%H", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": run_time = time.time() exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": try: exp_inst.analyze() except: # make analyze failure non-fatal, as we may still want to # write results afterwards put_exception_report() else: put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: put_exception_report() finally: device_mgr.close_devices() ipc.close()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.device_mgr = DeviceManager(self.device_db) self.controllers = {}
def setUp(self): self.device_db = self.get_device_db() self.device_mgr = DeviceManager(self.device_db) self.controllers = {}
def main(): sys.stdout = LogForwarder() sys.stderr = LogForwarder() logging.basicConfig(level=int(sys.argv[1])) start_time = None rid = None expid = None exp = None exp_inst = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler()}) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository expf = os.path.join(obj["wd"], expid["file"]) else: expf = expid["file"] exp = get_exp(expf, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp(device_mgr, dataset_mgr, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: dataset_mgr.write_hdf5(f) if "repo_rev" in expid: rr = expid["repo_rev"] dtype = "S{}".format(len(rr)) dataset = f.create_dataset("repo_rev", (), dtype) dataset[()] = rr.encode() finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr(), DummyDatasetMgr(), obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: logging.error("Worker terminating with exception", exc_info=True) finally: device_mgr.close_devices()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None run_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={ "scheduler": Scheduler(), "ccb": CCB() }) dataset_mgr = DatasetManager(ParentDatasetDB) import_cache.install_hook() try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.time() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) dirname = os.path.join( "results", time.strftime("%Y-%m-%d", start_local_time), time.strftime("%H", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": run_time = time.time() exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": try: exp_inst.analyze() except: # make analyze failure non-fatal, as we may still want to # write results afterwards put_exception_report() else: put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: put_exception_report() finally: device_mgr.close_devices() ipc.close()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB) device_mgr.virtual_devices["scheduler"] = Scheduler(device_mgr) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) dirname = os.path.join("results", time.strftime("%Y-%m-%d", start_time), time.strftime("%H", start_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f.create_group("datasets")) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = int(time.mktime(start_time)) f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ParentDatasetDB, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except Exception as exc: # When we get CompileError, a more suitable diagnostic has already # been printed. if not isinstance(exc, CompileError): short_exc_info = type(exc).__name__ exc_str = str(exc) if exc_str: short_exc_info += ": " + exc_str.splitlines()[0] lines = ["Terminating with exception (" + short_exc_info + ")\n"] if hasattr(exc, "artiq_core_exception"): lines.append(str(exc.artiq_core_exception)) if hasattr(exc, "parent_traceback"): lines += exc.parent_traceback lines += traceback.format_exception_only(type(exc), exc) logging.error("".join(lines).rstrip(), exc_info=not hasattr(exc, "parent_traceback")) put_object({"action": "exception"}) finally: device_mgr.close_devices() ipc.close()