def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() except CompileError as error: return except Exception as exn: if hasattr(exn, "artiq_core_exception"): print(exn.artiq_core_exception, file=sys.stderr) raise exn finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
def setUp(self): self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon")) self.dataset_db = DatasetDB(os.path.join(artiq_root, "dataset_db.pyon")) self.device_mgr = DeviceManager( self.device_db, virtual_devices={"scheduler": DummyScheduler()}) self.dataset_mgr = DatasetManager(self.dataset_db)
def setUp(self): # Create an instance of TestExperiment locally in this process and a # mock dataset db to back it. When used from the master, the worker IPC # connection would marshal updates between dataset_mgr and dataset_db. self.dataset_db = MockDatasetDB() self.dataset_mgr = DatasetManager(self.dataset_db) self.exp = TestExperiment((None, self.dataset_mgr, None, None))
def setUp(self): self.dataset_db = MockDatasetDB() self.dataset_mgr = DatasetManager(self.dataset_db) self.device_db = MockDeviceDB() self.ccb = unittest.mock.Mock() self.core = unittest.mock.Mock() self.scheduler = MockScheduler() self.device_mgr = DeviceManager(self.device_db, virtual_devices={ "ccb": self.ccb, "core": self.core, "scheduler": self.scheduler })
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file, prefix="artiq_run_") exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) argument_mgr = ProcessArgumentManager(arguments) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) if not hasattr(exp.run, "artiq_embedded"): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.artiq_embedded.core_name core = getattr(exp_inst, core_name) object_map, kernel_library, _, _ = \ core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) except CompileError as error: return finally: device_mgr.close_devices() if object_map.has_rpc(): raise ValueError("Experiment must not use RPC") output = args.output if output is None: basename, ext = os.path.splitext(args.file) output = "{}.elf".format(basename) with open(output, "wb") as f: f.write(kernel_library)
def run(with_file=False): args = get_argparser(with_file).parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler()}) dataset_db = DatasetDB(args.dataset_db) dataset_mgr = DatasetManager(dataset_db) try: exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() exp_inst.analyze() finally: device_mgr.close_devices() if args.hdf5 is not None: with h5py.File(args.hdf5, "w") as f: dataset_mgr.write_hdf5(f) else: for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)): print("{}: {}".format(k, v)) dataset_db.save()
def main(): args = get_argparser().parse_args() init_logger(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file) exp = get_experiment(module, args.experiment) arguments = parse_arguments(args.arguments) exp_inst = exp(device_mgr, dataset_mgr, **arguments) if (not hasattr(exp.run, "k_function_info") or not exp.run.k_function_info): raise ValueError("Experiment entry point must be a kernel") core_name = exp.run.k_function_info.core_name core = getattr(exp_inst, core_name) binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function, [exp_inst], {}, with_attr_writeback=False) finally: device_mgr.close_devices() if rpc_map: raise ValueError("Experiment must not use RPC") output = args.output if output is None: output = args.file if output.endswith(".py"): output = output[:-3] output += ".elf" with open(output, "wb") as f: f.write(binary)
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None run_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={ "scheduler": Scheduler(), "ccb": CCB() }) dataset_mgr = DatasetManager(ParentDatasetDB) import_cache.install_hook() try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.time() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) dirname = os.path.join( "results", time.strftime("%Y-%m-%d", start_local_time), time.strftime("%H", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": run_time = time.time() exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": try: exp_inst.analyze() except: # make analyze failure non-fatal, as we may still want to # write results afterwards put_exception_report() else: put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: put_exception_report() finally: device_mgr.close_devices() ipc.close()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB) device_mgr.virtual_devices["scheduler"] = Scheduler(device_mgr) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) dirname = os.path.join("results", time.strftime("%Y-%m-%d", start_time), time.strftime("%H", start_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f.create_group("datasets")) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = int(time.mktime(start_time)) f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ParentDatasetDB, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except Exception as exc: # When we get CompileError, a more suitable diagnostic has already # been printed. if not isinstance(exc, CompileError): short_exc_info = type(exc).__name__ exc_str = str(exc) if exc_str: short_exc_info += ": " + exc_str.splitlines()[0] lines = ["Terminating with exception (" + short_exc_info + ")\n"] if hasattr(exc, "artiq_core_exception"): lines.append(str(exc.artiq_core_exception)) if hasattr(exc, "parent_traceback"): lines += exc.parent_traceback lines += traceback.format_exception_only(type(exc), exc) logging.error("".join(lines).rstrip(), exc_info=not hasattr(exc, "parent_traceback")) put_object({"action": "exception"}) finally: device_mgr.close_devices() ipc.close()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None run_time = None rid = None expid = None exp = None exp_inst = None repository_path = None def write_results(): filename = "{:09}-{}.h5".format(rid, exp.__name__) with h5py.File(filename, "w") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={ "scheduler": Scheduler(), "ccb": CCB() }) dataset_mgr = DatasetManager(ParentDatasetDB) import_cache.install_hook() try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.time() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_experiment(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) rootdir = os.path.join(os.path.expanduser("~"), "data") dirname = os.path.join( rootdir, time.strftime("%Y-%m-%d", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) put_completed() elif action == "prepare": exp_inst.prepare() put_completed() elif action == "run": current_time = datetime.datetime.now().strftime("%H%M_%S") run_time = time.time() try: exp_inst.run() except: # Only write results in run() on failure; on success wait # for end of analyze stage. write_results() raise put_completed() elif action == "analyze": try: exp_inst.analyze() except: # make analyze failure non-fatal, as we may still want to # write results afterwards put_exception_report() else: put_object({"action": "completed"}) elif action == "write_results": if hasattr(exp_inst, "archive"): if not exp_inst.archive: put_object({"action": "completed"}) continue path = os.path.join(dirname, exp.__name__) if not os.path.exists(path): os.mkdir(path) if hasattr(exp_inst, "filename"): filename = list(exp_inst.filename.values())[0] else: filename = "raw-data_{}.h5".format(current_time) file_ = os.path.join(path, filename) with h5py.File(file_, "a") as f: dataset_mgr.write_hdf5(f) f["artiq_version"] = artiq_version f["rid"] = rid f["start_time"] = start_time f["run_time"] = run_time f["expid"] = pyon.encode(expid) put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) put_completed() elif action == "terminate": break except: put_exception_report() finally: device_mgr.close_devices() ipc.close()
def main(): sys.stdout = LogForwarder() sys.stderr = LogForwarder() logging.basicConfig(level=int(sys.argv[1])) start_time = None rid = None expid = None exp = None exp_inst = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler()}) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository expf = os.path.join(obj["wd"], expid["file"]) else: expf = expid["file"] exp = get_exp(expf, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp(device_mgr, dataset_mgr, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: dataset_mgr.write_hdf5(f) if "repo_rev" in expid: rr = expid["repo_rev"] dtype = "S{}".format(len(rr)) dataset = f.create_dataset("repo_rev", (), dtype) dataset[()] = rr.encode() finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr(), DummyDatasetMgr(), obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: logging.error("Worker terminating with exception", exc_info=True) finally: device_mgr.close_devices()