Exemplo n.º 1
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    except CompileError as error:
        return
    except Exception as exn:
        if hasattr(exn, "artiq_core_exception"):
            print(exn.artiq_core_exception, file=sys.stderr)
        raise exn
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Exemplo n.º 2
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    except CompileError as error:
        return
    except Exception as exn:
        if hasattr(exn, "artiq_core_exception"):
            print(exn.artiq_core_exception, file=sys.stderr)
        raise exn
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Exemplo n.º 3
0
 def setUp(self):
     # Create an instance of TestExperiment locally in this process and a
     # mock dataset db to back it. When used from the master, the worker IPC
     # connection would marshal updates between dataset_mgr and dataset_db.
     self.dataset_db = MockDatasetDB()
     self.dataset_mgr = DatasetManager(self.dataset_db)
     self.exp = TestExperiment((None, self.dataset_mgr, None, None))
Exemplo n.º 4
0
 def setUp(self):
     self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon"))
     self.dataset_db = DatasetDB(os.path.join(artiq_root,
                                              "dataset_db.pyon"))
     self.device_mgr = DeviceManager(
         self.device_db, virtual_devices={"scheduler": DummyScheduler()})
     self.dataset_mgr = DatasetManager(self.dataset_db)
Exemplo n.º 5
0
 def setUp(self):
     self.dataset_db = MockDatasetDB()
     self.dataset_mgr = DatasetManager(self.dataset_db)
     self.device_db = MockDeviceDB()
     self.ccb = unittest.mock.Mock()
     self.core = unittest.mock.Mock()
     self.scheduler = MockScheduler()
     self.device_mgr = DeviceManager(self.device_db,
                                     virtual_devices={
                                         "ccb": self.ccb,
                                         "core": self.core,
                                         "scheduler": self.scheduler
                                     })
Exemplo n.º 6
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Exemplo n.º 7
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Exemplo n.º 8
0
def main():
    args = get_argparser().parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db))
    dataset_mgr = DatasetManager(DatasetDB(args.dataset_db))

    try:
        module = file_import(args.file, prefix="artiq_run_")
        exp = get_experiment(module, args.experiment)
        arguments = parse_arguments(args.arguments)
        argument_mgr = ProcessArgumentManager(arguments)
        exp_inst = exp((device_mgr, dataset_mgr, argument_mgr))

        if not hasattr(exp.run, "artiq_embedded"):
            raise ValueError("Experiment entry point must be a kernel")
        core_name = exp.run.artiq_embedded.core_name
        core = getattr(exp_inst, core_name)

        object_map, kernel_library, _, _ = \
            core.compile(exp.run, [exp_inst], {},
                         attribute_writeback=False, print_as_rpc=False)
    except CompileError as error:
        return
    finally:
        device_mgr.close_devices()

    if object_map.has_rpc():
        raise ValueError("Experiment must not use RPC")

    output = args.output
    if output is None:
        basename, ext = os.path.splitext(args.file)
        output = "{}.elf".format(basename)

    with open(output, "wb") as f:
        f.write(kernel_library)
Exemplo n.º 9
0
def main():
    args = get_argparser().parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db))
    dataset_mgr = DatasetManager(DatasetDB(args.dataset_db))

    try:
        module = file_import(args.file)
        exp = get_experiment(module, args.experiment)
        arguments = parse_arguments(args.arguments)
        exp_inst = exp(device_mgr, dataset_mgr, **arguments)

        if (not hasattr(exp.run, "k_function_info")
                or not exp.run.k_function_info):
            raise ValueError("Experiment entry point must be a kernel")
        core_name = exp.run.k_function_info.core_name
        core = getattr(exp_inst, core_name)

        binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function,
                                          [exp_inst], {},
                                          with_attr_writeback=False)
    finally:
        device_mgr.close_devices()

    if rpc_map:
        raise ValueError("Experiment must not use RPC")

    output = args.output
    if output is None:
        output = args.file
        if output.endswith(".py"):
            output = output[:-3]
        output += ".elf"
    with open(output, "wb") as f:
        f.write(binary)
Exemplo n.º 10
0
def main():
    sys.stdout = LogForwarder()
    sys.stderr = LogForwarder()
    logging.basicConfig(level=int(sys.argv[1]))

    start_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={"scheduler": Scheduler()})
    dataset_mgr = DatasetManager(ParentDatasetDB)

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.localtime()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    expf = os.path.join(obj["wd"], expid["file"])
                else:
                    expf = expid["file"]
                exp = get_exp(expf, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    obj["pipeline_name"], expid, obj["priority"])
                exp_inst = exp(device_mgr, dataset_mgr, **expid["arguments"])
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                exp_inst.analyze()
                put_object({"action": "completed"})
            elif action == "write_results":
                f = get_hdf5_output(start_time, rid, exp.__name__)
                try:
                    dataset_mgr.write_hdf5(f)
                    if "repo_rev" in expid:
                        rr = expid["repo_rev"]
                        dtype = "S{}".format(len(rr))
                        dataset = f.create_dataset("repo_rev", (), dtype)
                        dataset[()] = rr.encode()
                finally:
                    f.close()
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr(), DummyDatasetMgr(), obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except:
        logging.error("Worker terminating with exception", exc_info=True)
    finally:
        device_mgr.close_devices()
Exemplo n.º 11
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={"scheduler": Scheduler()})
    dataset_mgr = DatasetManager(ParentDatasetDB)

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.localtime()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_exp(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                exp_inst = exp(
                    device_mgr, dataset_mgr, enable_processors=True,
                    **expid["arguments"])
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                exp_inst.analyze()
                put_object({"action": "completed"})
            elif action == "write_results":
                f = get_hdf5_output(start_time, rid, exp.__name__)
                try:
                    dataset_mgr.write_hdf5(f)
                    string_to_hdf5(f, "artiq_version", artiq_version)
                    if "repo_rev" in expid:
                        string_to_hdf5(f, "repo_rev", expid["repo_rev"])
                finally:
                    f.close()
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, DummyDatasetMgr, obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except Exception as exc:
        # When we get CompileError, a more suitable diagnostic has already
        # been printed.
        if not isinstance(exc, CompileError):
            short_exc_info = type(exc).__name__
            exc_str = str(exc)
            if exc_str:
                short_exc_info += ": " + exc_str
            lines = ["Terminating with exception ("+short_exc_info+")\n"]
            lines += traceback.format_exception_only(type(exc), exc)
            if hasattr(exc, "parent_traceback"):
                lines += exc.parent_traceback
            logging.error("".join(lines).rstrip(),
                          exc_info=not hasattr(exc, "parent_traceback"))
        put_object({"action": "exception"})
    finally:
        device_mgr.close_devices()
        ipc.close()
Exemplo n.º 12
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    run_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={
                                   "scheduler": Scheduler(),
                                   "ccb": CCB()
                               })
    dataset_mgr = DatasetManager(ParentDatasetDB)

    import_cache.install_hook()

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.time()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_exp(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                start_local_time = time.localtime(start_time)
                dirname = os.path.join(
                    "results", time.strftime("%Y-%m-%d", start_local_time),
                    time.strftime("%H", start_local_time))
                os.makedirs(dirname, exist_ok=True)
                os.chdir(dirname)
                argument_mgr = ProcessArgumentManager(expid["arguments"])
                exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {}))
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                run_time = time.time()
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                try:
                    exp_inst.analyze()
                except:
                    # make analyze failure non-fatal, as we may still want to
                    # write results afterwards
                    put_exception_report()
                else:
                    put_object({"action": "completed"})
            elif action == "write_results":
                filename = "{:09}-{}.h5".format(rid, exp.__name__)
                with h5py.File(filename, "w") as f:
                    dataset_mgr.write_hdf5(f)
                    f["artiq_version"] = artiq_version
                    f["rid"] = rid
                    f["start_time"] = start_time
                    f["run_time"] = run_time
                    f["expid"] = pyon.encode(expid)
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except:
        put_exception_report()
    finally:
        device_mgr.close_devices()
        ipc.close()
Exemplo n.º 13
0
def main():
    sys.stdout = LogForwarder()
    sys.stderr = LogForwarder()
    logging.basicConfig(level=int(sys.argv[1]))

    start_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={"scheduler": Scheduler()})
    dataset_mgr = DatasetManager(ParentDatasetDB)

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.localtime()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    expf = os.path.join(obj["wd"], expid["file"])
                else:
                    expf = expid["file"]
                exp = get_exp(expf, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    obj["pipeline_name"], expid, obj["priority"])
                exp_inst = exp(device_mgr, dataset_mgr,
                    **expid["arguments"])
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                exp_inst.analyze()
                put_object({"action": "completed"})
            elif action == "write_results":
                f = get_hdf5_output(start_time, rid, exp.__name__)
                try:
                    dataset_mgr.write_hdf5(f)
                    if "repo_rev" in expid:
                        rr = expid["repo_rev"]
                        dtype = "S{}".format(len(rr))
                        dataset = f.create_dataset("repo_rev", (), dtype)
                        dataset[()] = rr.encode()
                finally:
                    f.close()
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr(), DummyDatasetMgr(), obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except:
        logging.error("Worker terminating with exception", exc_info=True)
    finally:
        device_mgr.close_devices()
Exemplo n.º 14
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    run_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={"scheduler": Scheduler(),
                                                "ccb": CCB()})
    dataset_mgr = DatasetManager(ParentDatasetDB)

    import_cache.install_hook()

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.time()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_exp(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                start_local_time = time.localtime(start_time)
                dirname = os.path.join("results",
                                   time.strftime("%Y-%m-%d", start_local_time),
                                   time.strftime("%H", start_local_time))
                os.makedirs(dirname, exist_ok=True)
                os.chdir(dirname)
                argument_mgr = ProcessArgumentManager(expid["arguments"])
                exp_inst = exp((device_mgr, dataset_mgr, argument_mgr))
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                run_time = time.time()
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                try:
                    exp_inst.analyze()
                except:
                    # make analyze failure non-fatal, as we may still want to
                    # write results afterwards
                    put_exception_report()
                else:
                    put_object({"action": "completed"})
            elif action == "write_results":
                filename = "{:09}-{}.h5".format(rid, exp.__name__)
                with h5py.File(filename, "w") as f:
                    dataset_mgr.write_hdf5(f)
                    f["artiq_version"] = artiq_version
                    f["rid"] = rid
                    f["start_time"] = start_time
                    f["run_time"] = run_time
                    f["expid"] = pyon.encode(expid)
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except:
        put_exception_report()
    finally:
        device_mgr.close_devices()
        ipc.close()
Exemplo n.º 15
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    device_mgr = DeviceManager(ParentDeviceDB)
    device_mgr.virtual_devices["scheduler"] = Scheduler(device_mgr)
    dataset_mgr = DatasetManager(ParentDatasetDB)

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.localtime()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_exp(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                dirname = os.path.join("results",
                                       time.strftime("%Y-%m-%d", start_time),
                                       time.strftime("%H", start_time))
                os.makedirs(dirname, exist_ok=True)
                os.chdir(dirname)
                argument_mgr = ProcessArgumentManager(expid["arguments"])
                exp_inst = exp((device_mgr, dataset_mgr, argument_mgr))
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                exp_inst.analyze()
                put_object({"action": "completed"})
            elif action == "write_results":
                filename = "{:09}-{}.h5".format(rid, exp.__name__)
                with h5py.File(filename, "w") as f:
                    dataset_mgr.write_hdf5(f.create_group("datasets"))
                    f["artiq_version"] = artiq_version
                    f["rid"] = rid
                    f["start_time"] = int(time.mktime(start_time))
                    f["expid"] = pyon.encode(expid)
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, ParentDatasetDB, obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except Exception as exc:
        # When we get CompileError, a more suitable diagnostic has already
        # been printed.
        if not isinstance(exc, CompileError):
            short_exc_info = type(exc).__name__
            exc_str = str(exc)
            if exc_str:
                short_exc_info += ": " + exc_str.splitlines()[0]
            lines = ["Terminating with exception (" + short_exc_info + ")\n"]
            if hasattr(exc, "artiq_core_exception"):
                lines.append(str(exc.artiq_core_exception))
            if hasattr(exc, "parent_traceback"):
                lines += exc.parent_traceback
                lines += traceback.format_exception_only(type(exc), exc)
            logging.error("".join(lines).rstrip(),
                          exc_info=not hasattr(exc, "parent_traceback"))
        put_object({"action": "exception"})
    finally:
        device_mgr.close_devices()
        ipc.close()
Exemplo n.º 16
0
class ExperimentDatasetCase(unittest.TestCase):
    def setUp(self):
        # Create an instance of TestExperiment locally in this process and a
        # mock dataset db to back it. When used from the master, the worker IPC
        # connection would marshal updates between dataset_mgr and dataset_db.
        self.dataset_db = MockDatasetDB()
        self.dataset_mgr = DatasetManager(self.dataset_db)
        self.exp = TestExperiment((None, self.dataset_mgr, None, None))

    def test_set_local(self):
        with self.assertRaises(KeyError):
            self.exp.get(KEY)

        for i in range(2):    
            self.exp.set(KEY, i)
            self.assertEqual(self.exp.get(KEY), i)
            with self.assertRaises(KeyError):
                self.dataset_db.get(KEY)

    def test_set_broadcast(self):
        with self.assertRaises(KeyError):
            self.exp.get(KEY)

        self.exp.set(KEY, 0, broadcast=True)
        self.assertEqual(self.exp.get(KEY), 0)
        self.assertEqual(self.dataset_db.get(KEY), 0)

        self.exp.set(KEY, 1, broadcast=False)
        self.assertEqual(self.exp.get(KEY), 1)
        with self.assertRaises(KeyError):
            self.dataset_db.get(KEY)

    def test_append_local(self):
        self.exp.set(KEY, [])
        self.exp.append(KEY, 0)
        self.assertEqual(self.exp.get(KEY), [0])
        self.exp.append(KEY, 1)
        self.assertEqual(self.exp.get(KEY), [0, 1])

    def test_append_broadcast(self):
        self.exp.set(KEY, [], broadcast=True)
        self.exp.append(KEY, 0)
        self.assertEqual(self.dataset_db.data[KEY]["value"], [0])
        self.exp.append(KEY, 1)
        self.assertEqual(self.dataset_db.data[KEY]["value"], [0, 1])

    def test_append_array(self):
        for broadcast in (True, False):
            self.exp.set(KEY, [], broadcast=broadcast)
            self.exp.append(KEY, [])
            self.exp.append(KEY, [])
            self.assertEqual(self.exp.get(KEY), [[], []])

    def test_append_scalar_fails(self):
        for broadcast in (True, False):
            with self.assertRaises(AttributeError):
                self.exp.set(KEY, 0, broadcast=broadcast)
                self.exp.append(KEY, 1)

    def test_append_nonexistent_fails(self):
        with self.assertRaises(KeyError):
            self.exp.append(KEY, 0)

    def test_write_hdf5_options(self):
        data = np.random.randint(0, 1024, 1024)
        self.exp.set(
            KEY,
            data,
            hdf5_options=dict(
                compression="gzip",
                compression_opts=6,
                shuffle=True,
                fletcher32=True
            ),
        )

        with h5py.File("test.h5", "a", "core", backing_store=False) as f:
            self.dataset_mgr.write_hdf5(f)

            self.assertTrue(np.array_equal(f["datasets"][KEY][()], data))
            self.assertEqual(f["datasets"][KEY].compression, "gzip")
            self.assertEqual(f["datasets"][KEY].compression_opts, 6)
            self.assertTrue(f["datasets"][KEY].shuffle)
            self.assertTrue(f["datasets"][KEY].fletcher32)

    def test_write_hdf5_no_options(self):
        data = np.random.randint(0, 1024, 1024)
        self.exp.set(KEY, data)

        with h5py.File("test.h5", "a", "core", backing_store=False) as f:
            self.dataset_mgr.write_hdf5(f)
            self.assertTrue(np.array_equal(f["datasets"][KEY][()], data))
            self.assertIsNone(f["datasets"][KEY].compression)

    def test_write_hdf5_invalid_type(self):
        class CustomType:
            def __init__(self, x):
                self.x = x

        self.exp.set(KEY, CustomType(42))

        with h5py.File("test.h5", "w", "core", backing_store=False) as f:
            with self.assertRaisesRegex(TypeError, "CustomType"):
                self.dataset_mgr.write_hdf5(f)
Exemplo n.º 17
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    run_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    def write_results():
        filename = "{:09}-{}.h5".format(rid, exp.__name__)
        with h5py.File(filename, "w") as f:
            dataset_mgr.write_hdf5(f)
            f["artiq_version"] = artiq_version
            f["rid"] = rid
            f["start_time"] = start_time
            f["run_time"] = run_time
            f["expid"] = pyon.encode(expid)

    device_mgr = DeviceManager(ParentDeviceDB,
                               virtual_devices={
                                   "scheduler": Scheduler(),
                                   "ccb": CCB()
                               })
    dataset_mgr = DatasetManager(ParentDatasetDB)

    import_cache.install_hook()

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.time()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_experiment(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                start_local_time = time.localtime(start_time)
                rootdir = os.path.join(os.path.expanduser("~"), "data")
                dirname = os.path.join(
                    rootdir, time.strftime("%Y-%m-%d", start_local_time))
                os.makedirs(dirname, exist_ok=True)
                os.chdir(dirname)
                argument_mgr = ProcessArgumentManager(expid["arguments"])
                exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {}))
                put_completed()
            elif action == "prepare":
                exp_inst.prepare()
                put_completed()
            elif action == "run":
                current_time = datetime.datetime.now().strftime("%H%M_%S")
                run_time = time.time()
                try:
                    exp_inst.run()
                except:
                    # Only write results in run() on failure; on success wait
                    # for end of analyze stage.
                    write_results()
                    raise
                put_completed()
            elif action == "analyze":
                try:
                    exp_inst.analyze()
                except:
                    # make analyze failure non-fatal, as we may still want to
                    # write results afterwards
                    put_exception_report()
                else:
                    put_object({"action": "completed"})
            elif action == "write_results":
                if hasattr(exp_inst, "archive"):
                    if not exp_inst.archive:
                        put_object({"action": "completed"})
                        continue
                path = os.path.join(dirname, exp.__name__)
                if not os.path.exists(path):
                    os.mkdir(path)
                if hasattr(exp_inst, "filename"):
                    filename = list(exp_inst.filename.values())[0]
                else:
                    filename = "raw-data_{}.h5".format(current_time)
                file_ = os.path.join(path, filename)
                with h5py.File(file_, "a") as f:
                    dataset_mgr.write_hdf5(f)
                    f["artiq_version"] = artiq_version
                    f["rid"] = rid
                    f["start_time"] = start_time
                    f["run_time"] = run_time
                    f["expid"] = pyon.encode(expid)
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"])
                put_completed()
            elif action == "terminate":
                break
    except:
        put_exception_report()
    finally:
        device_mgr.close_devices()
        ipc.close()
Exemplo n.º 18
0
def main():
    global ipc

    multiline_log_config(level=int(sys.argv[2]))
    ipc = pipe_ipc.ChildComm(sys.argv[1])

    start_time = None
    rid = None
    expid = None
    exp = None
    exp_inst = None
    repository_path = None

    device_mgr = DeviceManager(ParentDeviceDB)
    device_mgr.virtual_devices["scheduler"] = Scheduler(device_mgr)
    dataset_mgr = DatasetManager(ParentDatasetDB)

    try:
        while True:
            obj = get_object()
            action = obj["action"]
            if action == "build":
                start_time = time.localtime()
                rid = obj["rid"]
                expid = obj["expid"]
                if obj["wd"] is not None:
                    # Using repository
                    experiment_file = os.path.join(obj["wd"], expid["file"])
                    repository_path = obj["wd"]
                else:
                    experiment_file = expid["file"]
                    repository_path = None
                setup_diagnostics(experiment_file, repository_path)
                exp = get_exp(experiment_file, expid["class_name"])
                device_mgr.virtual_devices["scheduler"].set_run_info(
                    rid, obj["pipeline_name"], expid, obj["priority"])
                dirname = os.path.join("results",
                                       time.strftime("%Y-%m-%d", start_time),
                                       time.strftime("%H", start_time))
                os.makedirs(dirname, exist_ok=True)
                os.chdir(dirname)
                argument_mgr = ProcessArgumentManager(expid["arguments"])
                exp_inst = exp((device_mgr, dataset_mgr, argument_mgr))
                put_object({"action": "completed"})
            elif action == "prepare":
                exp_inst.prepare()
                put_object({"action": "completed"})
            elif action == "run":
                exp_inst.run()
                put_object({"action": "completed"})
            elif action == "analyze":
                exp_inst.analyze()
                put_object({"action": "completed"})
            elif action == "write_results":
                filename = "{:09}-{}.h5".format(rid, exp.__name__)
                with h5py.File(filename, "w") as f:
                    dataset_mgr.write_hdf5(f.create_group("datasets"))
                    f["artiq_version"] = artiq_version
                    f["rid"] = rid
                    f["start_time"] = int(time.mktime(start_time))
                    f["expid"] = pyon.encode(expid)
                put_object({"action": "completed"})
            elif action == "examine":
                examine(ExamineDeviceMgr, ParentDatasetDB, obj["file"])
                put_object({"action": "completed"})
            elif action == "terminate":
                break
    except Exception as exc:
        # When we get CompileError, a more suitable diagnostic has already
        # been printed.
        if not isinstance(exc, CompileError):
            short_exc_info = type(exc).__name__
            exc_str = str(exc)
            if exc_str:
                short_exc_info += ": " + exc_str.splitlines()[0]
            lines = ["Terminating with exception ("+short_exc_info+")\n"]
            if hasattr(exc, "artiq_core_exception"):
                lines.append(str(exc.artiq_core_exception))
            if hasattr(exc, "parent_traceback"):
                lines += exc.parent_traceback
                lines += traceback.format_exception_only(type(exc), exc)
            logging.error("".join(lines).rstrip(),
                          exc_info=not hasattr(exc, "parent_traceback"))
        put_object({"action": "exception"})
    finally:
        device_mgr.close_devices()
        ipc.close()