Example #1
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    except CompileError as error:
        return
    except Exception as exn:
        if hasattr(exn, "artiq_core_exception"):
            print(exn.artiq_core_exception, file=sys.stderr)
        raise exn
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Example #2
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    except CompileError as error:
        return
    except Exception as exn:
        if hasattr(exn, "artiq_core_exception"):
            print(exn.artiq_core_exception, file=sys.stderr)
        raise exn
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Example #3
0
def main():
    args = get_argparser().parse_args()
    log_buffer = init_log(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(lambda: loop.close())

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit.register(lambda: loop.run_until_complete(dataset_db.stop()))

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    repository = Repository(repo_backend, device_db.get_device_db, log_worker)
    atexit.register(repository.close)
    repository.scan_async()

    worker_handlers = {
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "log": log_worker
    }
    scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend)
    worker_handlers["scheduler_submit"] = scheduler.submit
    scheduler.start()
    atexit.register(lambda: loop.run_until_complete(scheduler.stop()))

    server_control = RPCServer({
        "master_device_db": device_db,
        "master_dataset_db": dataset_db,
        "master_schedule": scheduler,
        "master_repository": repository
    })
    loop.run_until_complete(server_control.start(args.bind, args.port_control))
    atexit.register(lambda: loop.run_until_complete(server_control.stop()))

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": device_db.data,
        "datasets": dataset_db.data,
        "explist": repository.explist,
        "log": log_buffer.data
    })
    loop.run_until_complete(server_notify.start(args.bind, args.port_notify))
    atexit.register(lambda: loop.run_until_complete(server_notify.stop()))

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(args.bind, args.port_logging))
    atexit.register(lambda: loop.run_until_complete(server_logging.stop()))

    loop.run_forever()
Example #4
0
    def setUp(self):
        # empty dataset persistance file
        self.persist_file = tempfile.NamedTemporaryFile(mode="w+")
        print("{}", file=self.persist_file, flush=True)

        self.ddb = DatasetDB(self.persist_file.name)

        self.ddb.set(KEY1, DATA, persist=True)
        self.ddb.set(KEY2, DATA, persist=True, hdf5_options=dict(compression=COMP))
        self.ddb.set(KEY3, DATA, hdf5_options=dict(shuffle=True))

        self.save_ddb_to_disk()
Example #5
0
 def setUp(self):
     self.device_db = DeviceDB(os.path.join(artiq_root, "device_db.pyon"))
     self.dataset_db = DatasetDB(os.path.join(artiq_root,
                                              "dataset_db.pyon"))
     self.device_mgr = DeviceManager(
         self.device_db, virtual_devices={"scheduler": DummyScheduler()})
     self.dataset_mgr = DatasetManager(self.dataset_db)
Example #6
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Example #7
0
def run(with_file=False):
    args = get_argparser(with_file).parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db),
                               virtual_devices={"scheduler": DummyScheduler()})
    dataset_db = DatasetDB(args.dataset_db)
    dataset_mgr = DatasetManager(dataset_db)

    try:
        exp_inst = _build_experiment(device_mgr, dataset_mgr, args)
        exp_inst.prepare()
        exp_inst.run()
        exp_inst.analyze()
    finally:
        device_mgr.close_devices()

    if args.hdf5 is not None:
        with h5py.File(args.hdf5, "w") as f:
            dataset_mgr.write_hdf5(f)
    else:
        for k, v in sorted(dataset_mgr.local.items(), key=itemgetter(0)):
            print("{}: {}".format(k, v))
    dataset_db.save()
Example #8
0
def main():
    args = get_argparser().parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db))
    dataset_mgr = DatasetManager(DatasetDB(args.dataset_db))

    try:
        module = file_import(args.file, prefix="artiq_run_")
        exp = get_experiment(module, args.experiment)
        arguments = parse_arguments(args.arguments)
        argument_mgr = ProcessArgumentManager(arguments)
        exp_inst = exp((device_mgr, dataset_mgr, argument_mgr))

        if not hasattr(exp.run, "artiq_embedded"):
            raise ValueError("Experiment entry point must be a kernel")
        core_name = exp.run.artiq_embedded.core_name
        core = getattr(exp_inst, core_name)

        object_map, kernel_library, _, _ = \
            core.compile(exp.run, [exp_inst], {},
                         attribute_writeback=False, print_as_rpc=False)
    except CompileError as error:
        return
    finally:
        device_mgr.close_devices()

    if object_map.has_rpc():
        raise ValueError("Experiment must not use RPC")

    output = args.output
    if output is None:
        basename, ext = os.path.splitext(args.file)
        output = "{}.elf".format(basename)

    with open(output, "wb") as f:
        f.write(kernel_library)
Example #9
0
def main():
    args = get_argparser().parse_args()
    init_logger(args)

    device_mgr = DeviceManager(DeviceDB(args.device_db))
    dataset_mgr = DatasetManager(DatasetDB(args.dataset_db))

    try:
        module = file_import(args.file)
        exp = get_experiment(module, args.experiment)
        arguments = parse_arguments(args.arguments)
        exp_inst = exp(device_mgr, dataset_mgr, **arguments)

        if (not hasattr(exp.run, "k_function_info")
                or not exp.run.k_function_info):
            raise ValueError("Experiment entry point must be a kernel")
        core_name = exp.run.k_function_info.core_name
        core = getattr(exp_inst, core_name)

        binary, rpc_map, _ = core.compile(exp.run.k_function_info.k_function,
                                          [exp_inst], {},
                                          with_attr_writeback=False)
    finally:
        device_mgr.close_devices()

    if rpc_map:
        raise ValueError("Experiment must not use RPC")

    output = args.output
    if output is None:
        output = args.file
        if output.endswith(".py"):
            output = output[:-3]
        output += ".elf"
    with open(output, "wb") as f:
        f.write(binary)
Example #10
0
def main():
    args = get_argparser().parse_args()
    log_forwarder = init_log(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(loop.close)
    bind = bind_address_from_args(args)

    server_broadcast = Broadcaster()
    loop.run_until_complete(server_broadcast.start(
        bind, args.port_broadcast))
    atexit_register_coroutine(server_broadcast.stop)

    log_forwarder.callback = (lambda msg:
        server_broadcast.broadcast("log", msg))
    def ccb_issue(service, *args, **kwargs):
        msg = {
            "service": service,
            "args": args,
            "kwargs": kwargs
        }
        server_broadcast.broadcast("ccb", msg)

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit_register_coroutine(dataset_db.stop)
    worker_handlers = dict()

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    experiment_db = ExperimentDB(repo_backend, worker_handlers)
    atexit.register(experiment_db.close)

    scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db)
    scheduler.start()
    atexit_register_coroutine(scheduler.stop)

    worker_handlers.update({
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "scheduler_submit": scheduler.submit,
        "scheduler_delete": scheduler.delete,
        "scheduler_request_termination": scheduler.request_termination,
        "scheduler_get_status": scheduler.get_status,
        "scheduler_check_pause": scheduler.check_pause,
        "ccb_issue": ccb_issue,
    })
    experiment_db.scan_repository_async()

    server_control = RPCServer({
        "master_device_db": device_db,
        "master_dataset_db": dataset_db,
        "master_schedule": scheduler,
        "master_experiment_db": experiment_db
    }, allow_parallel=True)
    loop.run_until_complete(server_control.start(
        bind, args.port_control))
    atexit_register_coroutine(server_control.stop)

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": device_db.data,
        "datasets": dataset_db.data,
        "explist": experiment_db.explist,
        "explist_status": experiment_db.status
    })
    loop.run_until_complete(server_notify.start(
        bind, args.port_notify))
    atexit_register_coroutine(server_notify.stop)

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(
        bind, args.port_logging))
    atexit_register_coroutine(server_logging.stop)

    logger.info("running, bound to %s", bind)
    loop.run_forever()
Example #11
0
def main():
    args = get_argparser().parse_args()
    log_buffer = init_log(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(loop.close)

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit_register_coroutine(dataset_db.stop)
    worker_handlers = dict()

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    experiment_db = ExperimentDB(repo_backend, worker_handlers)
    atexit.register(experiment_db.close)

    scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db)
    scheduler.start()
    atexit_register_coroutine(scheduler.stop)

    worker_handlers.update({
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "scheduler_submit": scheduler.submit,
        "scheduler_delete": scheduler.delete,
        "scheduler_request_termination": scheduler.request_termination,
        "scheduler_get_status": scheduler.get_status        
    })
    experiment_db.scan_repository_async()

    bind = bind_address_from_args(args)

    server_control = RPCServer({
        "master_device_db": device_db,
        "master_dataset_db": dataset_db,
        "master_schedule": scheduler,
        "master_experiment_db": experiment_db
    }, allow_parallel=True)
    loop.run_until_complete(server_control.start(
        bind, args.port_control))
    atexit_register_coroutine(server_control.stop)

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": device_db.data,
        "datasets": dataset_db.data,
        "explist": experiment_db.explist,
        "explist_status": experiment_db.status,
        "log": log_buffer.data
    })
    loop.run_until_complete(server_notify.start(
        bind, args.port_notify))
    atexit_register_coroutine(server_notify.stop)

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(
        bind, args.port_logging))
    atexit_register_coroutine(server_logging.stop)

    logger.info("running, bound to %s", bind)
    loop.run_forever()
Example #12
0
def main():
    args = get_argparser().parse_args()
    log_forwarder = init_log(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(loop.close)
    bind = bind_address_from_args(args)

    server_broadcast = Broadcaster()
    loop.run_until_complete(server_broadcast.start(
        bind, args.port_broadcast))
    atexit_register_coroutine(server_broadcast.stop)

    log_forwarder.callback = (lambda msg:
        server_broadcast.broadcast("log", msg))
    def ccb_issue(service, *args, **kwargs):
        msg = {
            "service": service,
            "args": args,
            "kwargs": kwargs
        }
        server_broadcast.broadcast("ccb", msg)

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit_register_coroutine(dataset_db.stop)
    worker_handlers = dict()

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    experiment_db = ExperimentDB(repo_backend, worker_handlers)
    atexit.register(experiment_db.close)

    scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db)
    scheduler.start()
    atexit_register_coroutine(scheduler.stop)

    worker_handlers.update({
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "scheduler_submit": scheduler.submit,
        "scheduler_delete": scheduler.delete,
        "scheduler_request_termination": scheduler.request_termination,
        "scheduler_get_status": scheduler.get_status,
        "scheduler_check_pause": scheduler.check_pause,
        "ccb_issue": ccb_issue,
    })
    experiment_db.scan_repository_async()

    server_control = RPCServer({
        "master_device_db": device_db,
        "master_dataset_db": dataset_db,
        "master_schedule": scheduler,
        "master_experiment_db": experiment_db
    }, allow_parallel=True)
    loop.run_until_complete(server_control.start(
        bind, args.port_control))
    atexit_register_coroutine(server_control.stop)

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": device_db.data,
        "datasets": dataset_db.data,
        "explist": experiment_db.explist,
        "explist_status": experiment_db.status
    })
    loop.run_until_complete(server_notify.start(
        bind, args.port_notify))
    atexit_register_coroutine(server_notify.stop)

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(
        bind, args.port_logging))
    atexit_register_coroutine(server_logging.stop)

    print("ARTIQ master is now ready.")
    loop.run_forever()
Example #13
0
def main():
    args = get_argparser().parse_args()
    log_buffer = init_log(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(lambda: loop.close())

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit.register(lambda: loop.run_until_complete(dataset_db.stop()))

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    repository = Repository(repo_backend, device_db.get_device_db, log_worker)
    atexit.register(repository.close)
    repository.scan_async()

    worker_handlers = {
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "log": log_worker,
    }
    scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend)
    worker_handlers["scheduler_submit"] = scheduler.submit
    scheduler.start()
    atexit.register(lambda: loop.run_until_complete(scheduler.stop()))

    server_control = RPCServer(
        {
            "master_device_db": device_db,
            "master_dataset_db": dataset_db,
            "master_schedule": scheduler,
            "master_repository": repository,
        }
    )
    loop.run_until_complete(server_control.start(args.bind, args.port_control))
    atexit.register(lambda: loop.run_until_complete(server_control.stop()))

    server_notify = Publisher(
        {
            "schedule": scheduler.notifier,
            "devices": device_db.data,
            "datasets": dataset_db.data,
            "explist": repository.explist,
            "log": log_buffer.data,
        }
    )
    loop.run_until_complete(server_notify.start(args.bind, args.port_notify))
    atexit.register(lambda: loop.run_until_complete(server_notify.stop()))

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(args.bind, args.port_logging))
    atexit.register(lambda: loop.run_until_complete(server_logging.stop()))

    loop.run_forever()
Example #14
0
def main():
    args = get_argparser().parse_args()
    log_forwarder = init_log(args)
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    atexit.register(loop.close)
    signal_handler = SignalHandler()
    signal_handler.setup()
    atexit.register(signal_handler.teardown)
    bind = common_args.bind_address_from_args(args)

    server_broadcast = Broadcaster()
    loop.run_until_complete(server_broadcast.start(
        bind, args.port_broadcast))
    atexit_register_coroutine(server_broadcast.stop)

    log_forwarder.callback = (lambda msg:
        server_broadcast.broadcast("log", msg))
    def ccb_issue(service, *args, **kwargs):
        msg = {
            "service": service,
            "args": args,
            "kwargs": kwargs
        }
        server_broadcast.broadcast("ccb", msg)

    device_db = DeviceDB(args.device_db)
    dataset_db = DatasetDB(args.dataset_db)
    dataset_db.start()
    atexit_register_coroutine(dataset_db.stop)
    worker_handlers = dict()

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    experiment_db = ExperimentDB(
        repo_backend, worker_handlers, args.experiment_subdir)
    atexit.register(experiment_db.close)

    scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db, args.log_submissions)
    scheduler.start()
    atexit_register_coroutine(scheduler.stop)

    config = MasterConfig(args.name)

    worker_handlers.update({
        "get_device_db": device_db.get_device_db,
        "get_device": device_db.get,
        "get_dataset": dataset_db.get,
        "update_dataset": dataset_db.update,
        "scheduler_submit": scheduler.submit,
        "scheduler_delete": scheduler.delete,
        "scheduler_request_termination": scheduler.request_termination,
        "scheduler_get_status": scheduler.get_status,
        "scheduler_check_pause": scheduler.check_pause,
        "scheduler_check_termination": scheduler.check_termination,
        "ccb_issue": ccb_issue,
    })
    experiment_db.scan_repository_async()

    server_control = RPCServer({
        "master_config": config,
        "master_device_db": device_db,
        "master_dataset_db": dataset_db,
        "master_schedule": scheduler,
        "master_experiment_db": experiment_db
    }, allow_parallel=True)
    loop.run_until_complete(server_control.start(
        bind, args.port_control))
    atexit_register_coroutine(server_control.stop)

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": device_db.data,
        "datasets": dataset_db.data,
        "explist": experiment_db.explist,
        "explist_status": experiment_db.status
    })
    loop.run_until_complete(server_notify.start(
        bind, args.port_notify))
    atexit_register_coroutine(server_notify.stop)

    server_logging = LoggingServer()
    loop.run_until_complete(server_logging.start(
        bind, args.port_logging))
    atexit_register_coroutine(server_logging.stop)

    print("ARTIQ master is now ready.")
    loop.run_until_complete(signal_handler.wait_terminate())
Example #15
0
class TestDatasetDB(unittest.TestCase):
    def setUp(self):
        # empty dataset persistance file
        self.persist_file = tempfile.NamedTemporaryFile(mode="w+")
        print("{}", file=self.persist_file, flush=True)

        self.ddb = DatasetDB(self.persist_file.name)

        self.ddb.set(KEY1, DATA, persist=True)
        self.ddb.set(KEY2, DATA, persist=True, hdf5_options=dict(compression=COMP))
        self.ddb.set(KEY3, DATA, hdf5_options=dict(shuffle=True))

        self.save_ddb_to_disk()

    def save_ddb_to_disk(self):
        self.ddb.save()
        self.persist_file.flush()

    def load_ddb_from_disk(self):
        return pyon.load_file(self.persist_file.name)

    def test_persist_format(self):
        data = pyon.load_file(self.persist_file.name)

        for key in [KEY1, KEY2]:
            self.assertTrue(data[key]["persist"])
            self.assertEqual(data[key]["value"], DATA)

        self.assertEqual(data[KEY2]["hdf5_options"]["compression"], COMP)
        self.assertEqual(data[KEY1]["hdf5_options"], dict())

    def test_only_persist_marked_datasets(self):
        data = self.load_ddb_from_disk()

        with self.assertRaises(KeyError):
            data[KEY3]

    def test_memory_format(self):
        ds = self.ddb.get(KEY2)
        self.assertTrue(ds["persist"])
        self.assertEqual(ds["value"], DATA)
        self.assertEqual(ds["hdf5_options"]["compression"], COMP)

        ds = self.ddb.get(KEY3)
        self.assertFalse(ds["persist"])
        self.assertEqual(ds["value"], DATA)
        self.assertTrue(ds["hdf5_options"]["shuffle"])

    def test_delete(self):
        self.ddb.delete(KEY1)
        self.save_ddb_to_disk()

        data = self.load_ddb_from_disk()

        with self.assertRaises(KeyError):
            data[KEY1]

        self.assertTrue(data[KEY2]["persist"])

    def test_update(self):
        self.assertFalse(self.ddb.get(KEY3)["persist"])

        mod = {
            "action": "setitem",
            "path": [KEY3],
            "key": "persist",
            "value": True,
        }

        self.ddb.update(mod)
        self.assertTrue(self.ddb.get(KEY3)["persist"])

    def test_update_hdf5_options(self):
        with self.assertRaises(KeyError):
            self.ddb.get(KEY1)["hdf5_options"]["shuffle"]

        mod = {
            "action": "setitem",
            "path": [KEY1, "hdf5_options"],
            "key": "shuffle",
            "value": False,
        }

        self.ddb.update(mod)
        self.assertFalse(self.ddb.get(KEY1)["hdf5_options"]["shuffle"])

    def test_reset_copies_persist(self):
        self.assertTrue(self.ddb.get(KEY1)["persist"])
        self.ddb.set(KEY1, DATA)
        self.assertTrue(self.ddb.get(KEY1)["persist"])

        self.assertFalse(self.ddb.get(KEY3)["persist"])
        self.ddb.set(KEY3, DATA)
        self.assertFalse(self.ddb.get(KEY3)["persist"])

        self.ddb.set(KEY3, DATA, persist=True)
        self.assertTrue(self.ddb.get(KEY3)["persist"])