def main(): sys.stdout = sys.stderr = LogForwarder() start_time = None rid = None expid = None exp = None exp_inst = None dmgr = DeviceManager(ParentDDB, virtual_devices={"scheduler": Scheduler()}) rdb = ResultDB() rdb.rt.publish = update_rt_results try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository expf = os.path.join(obj["wd"], expid["file"]) else: expf = expid["file"] exp = get_exp(expf, expid["class_name"]) dmgr.virtual_devices["scheduler"].set_run_info( obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp(dmgr, ParentPDB, rdb, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: rdb.write_hdf5(f) if "repo_rev" in expid: rr = expid["repo_rev"] dtype = "S{}".format(len(rr)) dataset = f.create_dataset("repo_rev", (), dtype) dataset[()] = rr.encode() finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(DummyDMGR(), DummyPDB(), ResultDB(), obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break finally: dmgr.close_devices()
def main(): global ipc multiline_log_config(level=int(sys.argv[2])) ipc = pipe_ipc.ChildComm(sys.argv[1]) start_time = None rid = None expid = None exp = None exp_inst = None repository_path = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler()}) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository experiment_file = os.path.join(obj["wd"], expid["file"]) repository_path = obj["wd"] else: experiment_file = expid["file"] repository_path = None setup_diagnostics(experiment_file, repository_path) exp = get_exp(experiment_file, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( rid, obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp( device_mgr, dataset_mgr, enable_processors=True, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: dataset_mgr.write_hdf5(f) string_to_hdf5(f, "artiq_version", artiq_version) if "repo_rev" in expid: string_to_hdf5(f, "repo_rev", expid["repo_rev"]) finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr, DummyDatasetMgr, obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except Exception as exc: # When we get CompileError, a more suitable diagnostic has already # been printed. if not isinstance(exc, CompileError): short_exc_info = type(exc).__name__ exc_str = str(exc) if exc_str: short_exc_info += ": " + exc_str lines = ["Terminating with exception ("+short_exc_info+")\n"] lines += traceback.format_exception_only(type(exc), exc) if hasattr(exc, "parent_traceback"): lines += exc.parent_traceback logging.error("".join(lines).rstrip(), exc_info=not hasattr(exc, "parent_traceback")) put_object({"action": "exception"}) finally: device_mgr.close_devices() ipc.close()
def main(): sys.stdout = LogForwarder() sys.stderr = LogForwarder() logging.basicConfig(level=int(sys.argv[1])) start_time = None rid = None expid = None exp = None exp_inst = None device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler()}) dataset_mgr = DatasetManager(ParentDatasetDB) try: while True: obj = get_object() action = obj["action"] if action == "build": start_time = time.localtime() rid = obj["rid"] expid = obj["expid"] if obj["wd"] is not None: # Using repository expf = os.path.join(obj["wd"], expid["file"]) else: expf = expid["file"] exp = get_exp(expf, expid["class_name"]) device_mgr.virtual_devices["scheduler"].set_run_info( obj["pipeline_name"], expid, obj["priority"]) exp_inst = exp(device_mgr, dataset_mgr, **expid["arguments"]) put_object({"action": "completed"}) elif action == "prepare": exp_inst.prepare() put_object({"action": "completed"}) elif action == "run": exp_inst.run() put_object({"action": "completed"}) elif action == "analyze": exp_inst.analyze() put_object({"action": "completed"}) elif action == "write_results": f = get_hdf5_output(start_time, rid, exp.__name__) try: dataset_mgr.write_hdf5(f) if "repo_rev" in expid: rr = expid["repo_rev"] dtype = "S{}".format(len(rr)) dataset = f.create_dataset("repo_rev", (), dtype) dataset[()] = rr.encode() finally: f.close() put_object({"action": "completed"}) elif action == "examine": examine(ExamineDeviceMgr(), DummyDatasetMgr(), obj["file"]) put_object({"action": "completed"}) elif action == "terminate": break except: logging.error("Worker terminating with exception", exc_info=True) finally: device_mgr.close_devices()