def main(): args = get_argparser().parse_args() init_logger(args) dev = Client(args.server, args.port, "pdq2") freq = dev.get_freq() times = np.around(eval(args.times, globals(), {})*freq) voltages = eval(args.voltages, globals(), dict(t=times/freq)) dt = np.diff(times.astype(np.int)) if args.order: tck = interpolate.splrep(times, voltages, k=args.order, s=0) u = interpolate.spalde(times, tck) else: u = voltages[:, None] segment = [] for dti, ui in zip(dt, u): segment.append({ "duration": int(dti), "channel_data": [{ "bias": { "amplitude": [float(uij) for uij in ui] } }] }) program = [[] for i in range(args.frame)] program.append(segment) dev.park() dev.program(program, [args.channel]) dev.unpark() dev.cmd("TRIGGER", args.free)
def main(): args = get_argparser().parse_args() action = args.action.replace("-", "_") if action == "show": if args.what == "schedule": _show_dict(args, "schedule", _show_schedule) elif args.what == "log": _show_log(args) elif args.what == "devices": _show_dict(args, "devices", _show_devices) elif args.what == "datasets": _show_dict(args, "datasets", _show_datasets) else: print("Unknown object to show, use -h to list valid names.") sys.exit(1) else: port = 3251 if args.port is None else args.port target_name = { "submit": "master_schedule", "delete": "master_schedule", "set_dataset": "master_dataset_db", "del_dataset": "master_dataset_db", "scan_devices": "master_device_db", "scan_repository": "master_repository", }[action] remote = Client(args.server, port, target_name) try: globals()["_action_" + action](remote, args) finally: remote.close_rpc()
def save_and_send_to_rcg(self, x, y, name): if self.timestamp is None: self.timestamp = datetime.now().strftime("%H%M_%S") self.filename = self.timestamp + ".h5" with h5.File(self.filename, "a") as f: datagrp = f.create_group("scan_data") datagrp.attrs["plot_show"] = self.RCG_TAB f.create_dataset("time", data=[], maxshape=(None,)) params = f.create_group("parameters") for collection in self.p.keys(): collectiongrp = params.create_group(collection) for key, val in self.p[collection].items(): collectiongrp.create_dataset(key, data=str(val)) with open("../scan_list", "a+") as csvfile: csvwriter = csv.writer(csvfile, delimiter=",") csvwriter.writerow([self.timestamp, type(self).__name__, os.path.join(self.dir, self.filename)]) if self.rcg is None: try: self.rcg = Client("::1", 3286, "rcg") except: return try: self.rcg.plot(x, y, tab_name=self.RCG_TAB, plot_title=self.timestamp + " - " + name, append=True, file_=os.path.join(os.getcwd(), self.filename)) except: return
def main(): args = get_argparser().parse_args() remote = Client(args.server, args.port, None) targets, description = remote.get_rpc_id() if args.action != "list-targets": # If no target specified and remote has only one, then use this one. # Exit otherwise. if len(targets) > 1 and args.target is None: print("Remote server has several targets, please supply one with " "-t") sys.exit(1) elif args.target is None: args.target = targets[0] remote.select_rpc_target(args.target) if args.action == "list-targets": list_targets(targets, description) elif args.action == "list-methods": list_methods(remote) elif args.action == "call": call_method(remote, args.method, args.args) else: print("Unrecognized action: {}".format(args.action))
def main(): args = get_argparser().parse_args() action = args.action.replace("-", "_") if action == "show": if args.what == "queue": _show_list(args, "queue", _show_queue) elif args.what == "timed": _show_dict(args, "timed", _show_timed) elif args.what == "devices": _show_dict(args, "devices", _show_devices) elif args.what == "parameters": _show_dict(args, "parameters", _show_parameters) else: print("Unknown object to show, use -h to list valid names.") sys.exit(1) else: port = 3251 if args.port is None else args.port target_name = { "submit": "master_schedule", "cancel": "master_schedule", "set_device": "master_ddb", "del_device": "master_ddb", "set_parameter": "master_pdb", "del_parameter": "master_pdb", }[action] remote = Client(args.server, port, target_name) try: globals()["_action_" + action](remote, args) finally: remote.close_rpc()
def main(): args = get_argparser().parse_args() action = args.action.replace("-", "_") if action == "show": if args.what == "schedule": _show_dict(args, "schedule", _show_schedule) elif args.what == "log": _show_log(args) elif args.what == "ccb": _show_ccb(args) elif args.what == "devices": _show_dict(args, "devices", _show_devices) elif args.what == "datasets": _show_dict(args, "datasets", _show_datasets) else: raise ValueError else: port = 3251 if args.port is None else args.port target_name = { "submit": "master_schedule", "delete": "master_schedule", "set_dataset": "master_dataset_db", "del_dataset": "master_dataset_db", "scan_devices": "master_device_db", "scan_repository": "master_experiment_db", "ls": "master_experiment_db" }[action] remote = Client(args.server, port, target_name) try: globals()["_action_" + action](remote, args) finally: remote.close_rpc()
def main(): args = get_argparser().parse_args() action = args.action.replace("-", "_") if action == "show": if args.what == "schedule": _show_dict(args, "schedule", _show_schedule) elif args.what == "log": _show_log(args) elif args.what == "devices": _show_dict(args, "devices", _show_devices) elif args.what == "datasets": _show_dict(args, "datasets", _show_datasets) else: print("Unknown object to show, use -h to list valid names.") sys.exit(1) else: port = 3251 if args.port is None else args.port target_name = { "submit": "master_schedule", "delete": "master_schedule", "set_dataset": "master_dataset_db", "del_dataset": "master_dataset_db", "scan_devices": "master_device_db", "scan_repository": "master_experiment_db", "ls": "master_experiment_db" }[action] remote = Client(args.server, port, target_name) try: globals()["_action_" + action](remote, args) finally: remote.close_rpc()
def main(): args = get_argparser().parse_args() remote = Client(args.server, args.port, None) try: target_names, id_parameters = remote.get_rpc_id() finally: remote.close_rpc() print("Target(s): " + ", ".join(target_names)) if id_parameters is not None: print("Parameters: " + id_parameters)
def run(self): self.c = Client("::1", 3286, "rcg") i = 0 self.set_dataset("x", []) self.set_dataset("y", []) while True: self.append_to_dataset("x", i) self.append_to_dataset("y", np.sin(2 * np.pi / 10 * i)) x = self.get_dataset("x") y = self.get_dataset("y") self.send_to_rcg(x, y) time.sleep(.5) i += 1
def main(): args = get_argparser().parse_args() remote = Client(args.server, args.port, None) targets, description = remote.get_rpc_id() if args.action != "list-targets": remote.select_rpc_target(AutoTarget) if args.action == "list-targets": list_targets(targets, description) elif args.action == "list-methods": list_methods(remote) elif args.action == "call": call_method(remote, args.method, args.args) else: print("Unrecognized action: {}".format(args.action))
class FloppingF(EnvExperiment): def run(self): self.c = Client("::1", 3286, "rcg") i = 0 self.set_dataset("x", []) self.set_dataset("y", []) while True: self.append_to_dataset("x", i) self.append_to_dataset("y", np.sin(2 * np.pi / 10 * i)) x = self.get_dataset("x") y = self.get_dataset("y") self.send_to_rcg(x, y) time.sleep(.5) i += 1 @rpc(flags={"async"}) def send_to_rcg(self, x, y): self.c.plot(x, y)
def main(): args = get_argparser().parse_args() remote = Client(args.server, args.port, "lda") try: if args.attenuation is None: print("Current attenuation: {}".format(remote.get_attenuation())) else: remote.set_attenuation(args.attenuation) finally: remote.close_rpc()
def stop_controller(self, name, default_timeout=1): desc, proc = self.controllers[name] t = desc.get("term_timeout", default_timeout) target_name = desc.get("target_name", None) if target_name is None: target_name = AutoTarget try: try: client = Client(desc["host"], desc["port"], target_name, t) try: client.terminate() finally: client.close_rpc() proc.wait(t) return except (socket.timeout, subprocess.TimeoutExpired): logger.warning("Controller %s failed to exit on request", name) try: proc.terminate() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to exit on terminate", name) try: proc.kill() except ProcessLookupError: pass try: proc.wait(t) return except subprocess.TimeoutExpired: logger.warning("Controller %s failed to die on kill", name) finally: del self.controllers[name]
def remote(args): ### Remote operation ### camera = Client(args.server, args.artiq_port) sn = camera.get_serial_no() b = BeamDisplay(camera) ctx = zmq.Context() sock = zmq_setup(ctx, args.server, args.zmq_port) @QtCore.pyqtSlot() def qt_update(): try: im = sock.recv_pyobj() except zmq.error.Again as e: pass else: b.queue_image(im) timer = QtCore.QTimer(b) timer.timeout.connect(qt_update) timer.start(50) # timeout ms title = b.windowTitle() + " (sn: {} @ host: {}, artiq: {}, zmq: {})".format( sn, args.server, args.artiq_port, args.zmq_port) b.setWindowTitle(title)
def prepare(self): # Grab parametervault params: cxn = labrad.connect() p = cxn.parametervault collections = p.get_collections() # Takes over 1 second to do this. We should move away from using labrad units # in registry. Would be nice if parametervault was not a labrad server. D = dict() L = locals() for collection in collections: d = dict() names = p.get_parameter_names(collection) for name in names: try: param = p.get_parameter([collection, name]) try: units = param.units if units == "": param = param[units] else: param = param[units] * L[units] except AttributeError: pass except KeyError: if (units == "dBm" or units == "deg" or units == ""): param = param[units] d[name] = param except: #broken parameter continue D[collection] = d self.p = edict(D) cxn.disconnect() # Grab cw parameters: # Because parameters are grabbed in prepare stage, loaded dds cw parameters # may not be the most current. self.dds_list = list() self.freq_list = list() self.amp_list = list() self.att_list = list() self.state_list = list() for key, settings in self.p.dds_cw_parameters.items(): self.dds_list.append(getattr(self, "dds_" + key)) self.freq_list.append(float(settings[1][0]) * 1e6) self.amp_list.append(float(settings[1][1])) self.att_list.append(float(settings[1][3])) self.state_list.append(bool(float(settings[1][2]))) # Try to make rcg/hist connection try: self.rcg = Client("::1", 3286, "rcg") except: self.rcg = None try: self.pmt_hist = Client("::1", 3287, "pmt_histogram") except: self.pmt_hist = None # Make scan object for repeating the experiment N = int(self.p.StateReadout.repeat_each_measurement) self.N = N # Create datasets M = len(self.scan) self.set_dataset("x", np.full(M, np.nan), broadcast=True) self.set_dataset("y1", np.full((M, N), np.nan), broadcast=True) self.set_dataset("y2", np.full((M, N), np.nan), broadcast=True) A = np.full((M, N), np.nan) for x in np.nditer(A, op_flags=["readwrite"]): x[...] = np.random.normal(0, .1) self.rand = A self.setattr_dataset("x") self.setattr_dataset("y1") self.setattr_dataset("y2") self.yfull1 = np.full(M, np.nan) self.yfull2 = np.full(M, np.nan) self.hist_counts = np.full((M, N), np.nan) for row in range(M): for col in range(N): self.hist_counts[row][col] = np.random.normal(10, 5) # Tab For Plotting self.RCG_TAB = "Rabi" # Setup for saving data self.timestamp = None self.dir = os.path.join(os.path.expanduser("~"), "data", datetime.now().strftime("%Y-%m-%d"), type(self).__name__) os.makedirs(self.dir, exist_ok=True) os.chdir(self.dir)
def analyze(self): camera_dock = Client("::1", 3288, "camera_reference_image") acquired_images = [] try: timeout_in_seconds = 60 acquired_images = self.camera.get_acquired_data(timeout_in_seconds) except Exception as e: logger.error(e) logger.error("Camera acquisition timed out") camera_dock.enable_button() camera_dock.close_rpc() self.close_camera() return image_region = self.image_region x_pixels = int( (image_region[3] - image_region[2] + 1) / image_region[0]) y_pixels = int( (image_region[5] - image_region[4] + 1) / image_region[1]) images = np.reshape(acquired_images, (self.N, y_pixels, x_pixels)) image = np.average(images, axis=0) self.close_camera() camera_dock.plot(image, image_region) camera_dock.enable_button() camera_dock.close_rpc()
def main(): args = get_argparser().parse_args() init_logger(args) dev = Client(args.server, args.port, "pdq2") dev.init() if args.reset: dev.write(b"\x00\x00") # flush any escape dev.cmd("RESET", True) time.sleep(.1) dev.cmd("START", False) dev.cmd("ARM", False) dev.cmd("DCM", args.dcm) freq = 100e6 if args.dcm else 50e6 dev.set_freq(freq) num_channels = dev.get_num_channels() num_frames = dev.get_num_frames() times = eval(args.times, globals(), {}) voltages = eval(args.voltages, globals(), dict(t=times)) if args.demo: for ch, channel in enumerate(dev.channels): entry = [] for fr in range(dev.channels[0].num_frames): vi = .1*fr + ch + voltages entry.append(channel.segment(times, vi, order=args.order, end=False, aux=args.aux)) pi = 2*np.pi*(-.5 + .01*fr + .1*ch + 0*voltages) fi = 10e6*times/times[-1] channel.segment(2*times, voltages, pi, fi, trigger=False, silence=True, aux=args.aux) dev.write_channel(channel, entry) elif args.bit: v = [-1, 0, -1] # for i in range(15): # v.extend([(1 << i) - 1, 1 << i]) v = np.array(v)*dev.channels[0].max_out/dev.channels[0].max_val t = np.arange(len(v)) for channel in dev.channels: s = channel.segment(t, v, order=0, shift=15, stop=False, trigger=False) dev.write_channel(channel, [s for i in range(channel.num_frames)]) else: c = dev.channels[args.channel] map = [None] * c.num_frames map[args.frame] = c.segment(times, voltages, order=args.order, aux=args.aux) dev.write_channel(c, map) dev.cmd("START", True) dev.cmd("ARM", not args.disarm) dev.cmd("TRIGGER", args.free) if args.plot: from matplotlib import pyplot as plt fig, ax = plt.subplots() ax.plot(times, voltages, "xk", label="points") if args.order > 0: spline = interpolate.splrep(times, voltages, k=args.order) ttimes = np.arange(0, times[-1], 1/freq) vvoltages = interpolate.splev(ttimes, spline) ax.plot(ttimes, vvoltages, ",b", label="interpolation") fig.savefig(args.plot)
def main(): args = get_argparser().parse_args() init_logger(args) dev = Client(args.server, args.port, "pdq2") dev.init() if args.reset: dev.write(b"\x00\x00") # flush any escape dev.cmd("RESET", True) time.sleep(.1) dev.cmd("START", False) dev.cmd("ARM", False) dev.cmd("DCM", args.dcm) freq = 100e6 if args.dcm else 50e6 dev.set_freq(freq) num_channels = dev.get_num_channels() num_frames = dev.get_num_frames() times = eval(args.times, globals(), {}) voltages = eval(args.voltages, globals(), dict(t=times)) if args.demo: for ch, channel in enumerate(dev.channels): entry = [] for fr in range(dev.channels[0].num_frames): vi = .1 * fr + ch + voltages entry.append( channel.segment(times, vi, order=args.order, end=False, aux=args.aux)) pi = 2 * np.pi * (-.5 + .01 * fr + .1 * ch + 0 * voltages) fi = 10e6 * times / times[-1] channel.segment(2 * times, voltages, pi, fi, trigger=False, silence=True, aux=args.aux) dev.write_channel(channel, entry) elif args.bit: v = [-1, 0, -1] # for i in range(15): # v.extend([(1 << i) - 1, 1 << i]) v = np.array(v) * dev.channels[0].max_out / dev.channels[0].max_val t = np.arange(len(v)) for channel in dev.channels: s = channel.segment(t, v, order=0, shift=15, stop=False, trigger=False) dev.write_channel(channel, [s for i in range(channel.num_frames)]) else: c = dev.channels[args.channel] map = [None] * c.num_frames map[args.frame] = c.segment(times, voltages, order=args.order, aux=args.aux) dev.write_channel(c, map) dev.cmd("START", True) dev.cmd("ARM", not args.disarm) dev.cmd("TRIGGER", args.free) if args.plot: from matplotlib import pyplot as plt fig, ax = plt.subplots() ax.plot(times, voltages, "xk", label="points") if args.order > 0: spline = interpolate.splrep(times, voltages, k=args.order) ttimes = np.arange(0, times[-1], 1 / freq) vvoltages = interpolate.splev(ttimes, spline) ax.plot(ttimes, vvoltages, ",b", label="interpolation") fig.savefig(args.plot)
def main(): args = get_argparser().parse_args() dev = Client(args.server, args.port, "pdq2") dev.init() if args.reset: dev.flush_escape() dev.write_cmd("RESET_EN") time.sleep(.1) if args.dcm: dev.write_cmd("DCM_EN") dev.set_freq(100e6) elif args.dcm == 0: dev.write_cmd("DCM_DIS") dev.set_freq(50e6) dev.write_cmd("START_DIS") num_channels = dev.get_num_channels() num_frames = dev.get_num_frames() times = eval(args.times, globals(), {}) voltages = eval(args.voltages, globals(), dict(t=times)) if args.demo: # FIXME channels = [args.channel] if args.channel < num_channels \ else range(num_channels) frames = [args.frame] if args.frame < num_frames \ else range(num_frames) for channel in channels: f = [] for frame in frames: vi = .1 * frame + channel + voltages pi = 2 * np.pi * (.01 * frame + .1 * channel + 0 * voltages) fi = 10e6 * times / times[-1] f.append(b"".join([ dev.frame(times, vi, order=args.order, end=False), dev.frame(2 * times, voltages, pi, fi, trigger=False), # dev.frame(2*times, 0*vi+.1, 0*pi, 0*fi+1e6), # dev.frame(times, 0*vi, order=args.order, silence=True), ])) board, dac = divmod(channel, dev.num_dacs) dev.write_data(dev.add_mem_header(board, dac, dev.map_frames(f))) elif args.bit: map = [0] * num_frames t = np.arange(2 * 16) * 1. v = [-1, 0, -1] for i in range(15): vi = 1 << i v.extend([vi - 1, vi]) v = np.array(v) * dev.get_max_out() / (1 << 15) t, v = t[:3], v[:3] # print(t, v) for channel in range(num_channels): dev.multi_frame([(t, v)], channel=channel, order=0, map=map, shift=15, stop=False, trigger=False) else: tv = [(times, voltages)] map = [None] * num_frames map[args.frame] = 0 dev.multi_frame(tv, channel=args.channel, order=args.order, map=map) dev.write_cmd("START_EN") if not args.disarm: dev.write_cmd("ARM_EN") if args.free: dev.write_cmd("TRIGGER_EN") if args.plot: from matplotlib import pyplot as plt fig, ax0 = plt.subplots() ax0.plot(times, voltages, "xk", label="points") if args.order: spline = interpolate.splrep(times, voltages, k=args.order) ttimes = np.arange(0, times[-1], 1 / dev.get_freq()) vvoltages = interpolate.splev(ttimes, spline) ax0.plot(ttimes, vvoltages, ",b", label="interpolation") fig.savefig(args.plot)
def main(): args = get_argparser().parse_args() init_logger(args) servers = { idx: { "host": ip, "notify": 3250, "control": 3251 } for idx, ip in enumerate(args.server) } while True: measurements = [] for _, server in servers.items(): try: client = RPCClient(server["host"], server["control"]) lasers = client.get_laser_db() for laser in lasers: meas = client.get_freq(laser, age=args.poll_time, priority=3, get_osa_trace=False, blocking=True, mute=False, offset_mode=False) status, freq, _ = meas if status != WLMMeasurementStatus.OKAY: logger.info("{}: measurement error") continue f_ref = lasers[laser]["f_ref"] delta = freq - lasers[laser]["f_ref"] measurements.append({ "measurement": laser, "fields": { "freq": freq, "f_ref": f_ref, "detuning": delta } }) logger.info("{}: freq {} THz, f_ref {} THz, " "detuning {} MHz".format( laser, freq, f_ref, delta)) except OSError: logger.warning("Error querying server {}".format(server)) finally: client.close_rpc() if measurements == []: time.sleep(args.poll_time) continue try: influx = influxdb.InfluxDBClient(host="10.255.6.4", database=args.database, username="******", password="******") influx.write_points(measurements) finally: influx.close() time.sleep(args.poll_time)
def main(): # initialize application args = get_argparser().parse_args() widget_log_handler = log.init_log(args, "dashboard") if args.db_file is None: args.db_file = os.path.join( get_user_config_dir(), "artiq_dashboard_{server}_{port}.pyon".format( server=args.server.replace(":", "."), port=args.port_notify)) app = QtWidgets.QApplication(["ARTIQ Dashboard"]) loop = QEventLoop(app) asyncio.set_event_loop(loop) atexit.register(loop.close) smgr = state.StateManager(args.db_file) # create connections to master rpc_clients = dict() for target in "schedule", "experiment_db", "dataset_db": client = AsyncioClient() loop.run_until_complete( client.connect_rpc(args.server, args.port_control, "master_" + target)) atexit.register(client.close_rpc) rpc_clients[target] = client config = Client(args.server, args.port_control, "master_config") try: server_name = config.get_name() finally: config.close_rpc() disconnect_reported = False def report_disconnect(): nonlocal disconnect_reported if not disconnect_reported: logging.error("connection to master lost, " "restart dashboard to reconnect") disconnect_reported = True sub_clients = dict() for notifier_name, modelf in (("explist", explorer.Model), ("explist_status", explorer.StatusUpdater), ("datasets", datasets.Model), ("schedule", schedule.Model)): subscriber = ModelSubscriber(notifier_name, modelf, report_disconnect) loop.run_until_complete( subscriber.connect(args.server, args.port_notify)) atexit_register_coroutine(subscriber.close) sub_clients[notifier_name] = subscriber broadcast_clients = dict() for target in "log", "ccb": client = Receiver(target, [], report_disconnect) loop.run_until_complete( client.connect(args.server, args.port_broadcast)) atexit_register_coroutine(client.close) broadcast_clients[target] = client # initialize main window main_window = MainWindow( args.server if server_name is None else server_name) smgr.register(main_window) mdi_area = MdiArea() mdi_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) mdi_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) main_window.setCentralWidget(mdi_area) # create UI components expmgr = experiments.ExperimentManager(main_window, sub_clients["explist"], sub_clients["schedule"], rpc_clients["schedule"], rpc_clients["experiment_db"]) smgr.register(expmgr) d_shortcuts = shortcuts.ShortcutsDock(main_window, expmgr) smgr.register(d_shortcuts) d_explorer = explorer.ExplorerDock(expmgr, d_shortcuts, sub_clients["explist"], sub_clients["explist_status"], rpc_clients["schedule"], rpc_clients["experiment_db"]) smgr.register(d_explorer) d_datasets = datasets.DatasetsDock(sub_clients["datasets"], rpc_clients["dataset_db"]) smgr.register(d_datasets) d_applets = applets_ccb.AppletsCCBDock(main_window, sub_clients["datasets"]) atexit_register_coroutine(d_applets.stop) smgr.register(d_applets) broadcast_clients["ccb"].notify_cbs.append(d_applets.ccb_notify) d_ttl_dds = moninj.MonInj() loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify)) atexit_register_coroutine(d_ttl_dds.stop) d_schedule = schedule.ScheduleDock(rpc_clients["schedule"], sub_clients["schedule"]) smgr.register(d_schedule) logmgr = log.LogDockManager(main_window) smgr.register(logmgr) broadcast_clients["log"].notify_cbs.append(logmgr.append_message) widget_log_handler.callback = logmgr.append_message # lay out docks right_docks = [ d_explorer, d_shortcuts, d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock, d_datasets, d_applets ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) for d1, d2 in zip(right_docks, right_docks[1:]): main_window.tabifyDockWidget(d1, d2) main_window.addDockWidget(QtCore.Qt.BottomDockWidgetArea, d_schedule) # load/initialize state if os.name == "nt": # HACK: show the main window before creating applets. # Otherwise, the windows of those applets that are in detached # QDockWidgets fail to be embedded. main_window.show() smgr.load() smgr.start() atexit_register_coroutine(smgr.stop) # create first log dock if not already in state d_log0 = logmgr.first_log_dock() if d_log0 is not None: main_window.tabifyDockWidget(d_schedule, d_log0) if server_name is not None: server_description = server_name + " ({})".format(args.server) else: server_description = args.server logging.info("ARTIQ dashboard %s connected to %s", artiq_version, server_description) # run main_window.show() loop.run_until_complete(main_window.exit_request.wait())
def main(): args = get_argparser().parse_args() init_logger(args) dev = Client(args.server, args.port, "pdq2") freq = dev.get_freq() times = np.around(eval(args.times, globals(), {}) * freq) voltages = eval(args.voltages, globals(), dict(t=times / freq)) dt = np.diff(times.astype(np.int)) if args.order: tck = interpolate.splrep(times, voltages, k=args.order, s=0) u = interpolate.spalde(times, tck) else: u = voltages[:, None] segment = [] for dti, ui in zip(dt, u): segment.append({ "duration": int(dti), "channel_data": [{ "bias": { "amplitude": [float(uij) for uij in ui] } }] }) program = [[] for i in range(args.frame)] program.append(segment) dev.park() dev.program(program, [args.channel]) dev.unpark() dev.cmd("TRIGGER", args.free)
def main(): # initialize application args = get_argparser().parse_args() widget_log_handler = log.init_log(args, "dashboard") if args.db_file is None: args.db_file = os.path.join(get_user_config_dir(), "artiq_dashboard_{server}_{port}.pyon".format( server=args.server.replace(":","."), port=args.port_notify)) app = QtWidgets.QApplication(["ARTIQ Dashboard"]) loop = QEventLoop(app) asyncio.set_event_loop(loop) atexit.register(loop.close) smgr = state.StateManager(args.db_file) # create connections to master rpc_clients = dict() for target in "schedule", "experiment_db", "dataset_db": client = AsyncioClient() loop.run_until_complete(client.connect_rpc( args.server, args.port_control, "master_" + target)) atexit.register(client.close_rpc) rpc_clients[target] = client config = Client(args.server, args.port_control, "master_config") try: server_name = config.get_name() finally: config.close_rpc() disconnect_reported = False def report_disconnect(): nonlocal disconnect_reported if not disconnect_reported: logging.error("connection to master lost, " "restart dashboard to reconnect") disconnect_reported = True sub_clients = dict() for notifier_name, modelf in (("explist", explorer.Model), ("explist_status", explorer.StatusUpdater), ("datasets", datasets.Model), ("schedule", schedule.Model)): subscriber = ModelSubscriber(notifier_name, modelf, report_disconnect) loop.run_until_complete(subscriber.connect( args.server, args.port_notify)) atexit_register_coroutine(subscriber.close) sub_clients[notifier_name] = subscriber broadcast_clients = dict() for target in "log", "ccb": client = Receiver(target, [], report_disconnect) loop.run_until_complete(client.connect( args.server, args.port_broadcast)) atexit_register_coroutine(client.close) broadcast_clients[target] = client # initialize main window main_window = MainWindow(args.server if server_name is None else server_name) smgr.register(main_window) mdi_area = MdiArea() mdi_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) mdi_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) main_window.setCentralWidget(mdi_area) # create UI components expmgr = experiments.ExperimentManager(main_window, sub_clients["explist"], sub_clients["schedule"], rpc_clients["schedule"], rpc_clients["experiment_db"]) smgr.register(expmgr) d_shortcuts = shortcuts.ShortcutsDock(main_window, expmgr) smgr.register(d_shortcuts) d_explorer = explorer.ExplorerDock(expmgr, d_shortcuts, sub_clients["explist"], sub_clients["explist_status"], rpc_clients["schedule"], rpc_clients["experiment_db"]) smgr.register(d_explorer) d_datasets = datasets.DatasetsDock(sub_clients["datasets"], rpc_clients["dataset_db"]) smgr.register(d_datasets) d_applets = applets_ccb.AppletsCCBDock(main_window, sub_clients["datasets"]) atexit_register_coroutine(d_applets.stop) smgr.register(d_applets) broadcast_clients["ccb"].notify_cbs.append(d_applets.ccb_notify) d_ttl_dds = moninj.MonInj() loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify)) atexit_register_coroutine(d_ttl_dds.stop) d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) smgr.register(d_schedule) logmgr = log.LogDockManager(main_window) smgr.register(logmgr) broadcast_clients["log"].notify_cbs.append(logmgr.append_message) widget_log_handler.callback = logmgr.append_message # lay out docks right_docks = [ d_explorer, d_shortcuts, d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock, d_datasets, d_applets ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) for d1, d2 in zip(right_docks, right_docks[1:]): main_window.tabifyDockWidget(d1, d2) main_window.addDockWidget(QtCore.Qt.BottomDockWidgetArea, d_schedule) # load/initialize state if os.name == "nt": # HACK: show the main window before creating applets. # Otherwise, the windows of those applets that are in detached # QDockWidgets fail to be embedded. main_window.show() smgr.load() smgr.start() atexit_register_coroutine(smgr.stop) # create first log dock if not already in state d_log0 = logmgr.first_log_dock() if d_log0 is not None: main_window.tabifyDockWidget(d_schedule, d_log0) if server_name is not None: server_description = server_name + " ({})".format(args.server) else: server_description = args.server logging.info("ARTIQ dashboard %s connected to %s", artiq_version, server_description) # run main_window.show() loop.run_until_complete(main_window.exit_request.wait())
class scanTest(EnvExperiment): def build(self): self.setattr_device("core") self.setattr_device("scheduler") self.setattr_device("LTriggerIN") self.setattr_argument("scan", scan.Scannable(default=scan.RangeScan(0, 1, 100))) # Load all AD9910 and AD9912 DDS channels specified in device_db: for key, val in self.get_device_db().items(): if isinstance(val, dict) and "class" in val: if val["class"] == "AD9910" or val["class"] == "AD9912": setattr(self, "dds_" + key, self.get_device(key)) self.cpld_list = [self.get_device("urukul{}_cpld".format(i)) for i in range(3)] def prepare(self): # Grab parametervault params: cxn = labrad.connect() p = cxn.parametervault collections = p.get_collections() # Takes over 1 second to do this. We should move away from using labrad units # in registry. Would be nice if parametervault was not a labrad server. D = dict() L = locals() for collection in collections: d = dict() names = p.get_parameter_names(collection) for name in names: try: param = p.get_parameter([collection, name]) try: units = param.units if units == "": param = param[units] else: param = param[units] * L[units] except AttributeError: pass except KeyError: if (units == "dBm" or units == "deg" or units == ""): param = param[units] d[name] = param except: #broken parameter continue D[collection] = d self.p = edict(D) cxn.disconnect() # Grab cw parameters: # Because parameters are grabbed in prepare stage, loaded dds cw parameters # may not be the most current. self.dds_list = list() self.freq_list = list() self.amp_list = list() self.att_list = list() self.state_list = list() for key, settings in self.p.dds_cw_parameters.items(): self.dds_list.append(getattr(self, "dds_" + key)) self.freq_list.append(float(settings[1][0]) * 1e6) self.amp_list.append(float(settings[1][1])) self.att_list.append(float(settings[1][3])) self.state_list.append(bool(float(settings[1][2]))) # Try to make rcg/hist connection try: self.rcg = Client("::1", 3286, "rcg") except: self.rcg = None try: self.pmt_hist = Client("::1", 3287, "pmt_histogram") except: self.pmt_hist = None # Make scan object for repeating the experiment N = int(self.p.StateReadout.repeat_each_measurement) self.N = N # Create datasets M = len(self.scan) self.set_dataset("x", np.full(M, np.nan), broadcast=True) self.set_dataset("y1", np.full((M, N), np.nan), broadcast=True) self.set_dataset("y2", np.full((M, N), np.nan), broadcast=True) A = np.full((M, N), np.nan) for x in np.nditer(A, op_flags=["readwrite"]): x[...] = np.random.normal(0, .1) self.rand = A self.setattr_dataset("x") self.setattr_dataset("y1") self.setattr_dataset("y2") self.yfull1 = np.full(M, np.nan) self.yfull2 = np.full(M, np.nan) self.hist_counts = np.full((M, N), np.nan) for row in range(M): for col in range(N): self.hist_counts[row][col] = np.random.normal(10, 5) # Tab For Plotting self.RCG_TAB = "Rabi" # Setup for saving data self.timestamp = None self.dir = os.path.join(os.path.expanduser("~"), "data", datetime.now().strftime("%Y-%m-%d"), type(self).__name__) os.makedirs(self.dir, exist_ok=True) os.chdir(self.dir) def run(self): if self.p.line_trigger_settings.enabled: offset = float(self.p.line_trigger_settings.offset_duration) offset = self.core.seconds_to_mu((16 + offset)*ms) self.line_trigger(offset) else: self.core.reset() for i, step in enumerate(self.scan): for j in range(self.N): xval = step y1val = np.sin(2*np.pi * xval)**2 + self.rand[i, j] y2val = np.cos(2*np.pi * xval)**2 + self.rand[i, j] self.record_result("y1", (i, j), y1val) self.record_result("y2", (i, j), y2val) self.record_result("x", i, xval) dp = sum(self.get_dataset("y1")[i]) / self.N self.yfull1[i] = dp dp1 = sum(self.get_dataset("y2")[i] / self.N) self.yfull2[i] = dp1 self.save_and_send_to_rcg(self.get_dataset("x"), self.yfull1, "yfull1") self.save_and_send_to_rcg(self.get_dataset("x"), self.yfull2, "yfull2") if (i + 1) % 5 == 0: self.save_result("x", self.get_dataset("x")[i - 4:i + 1], xdata=True) self.save_result("yfull1", self.yfull1[i - 4:i + 1]) self.save_result("yfull2", self.yfull2[i - 4:i + 1]) self.send_to_hist(self.hist_counts[i-5:i].flatten()) time.sleep(0.5) else: rem = (i + 1) % 5 self.save_result("x", self.get_dataset("x")[-rem:], xdata=True) self.save_result("yfull1", self.yfull1[-rem:]) self.save_result("yfull2", self.yfull2[-rem:]) self.reset_cw_settings(self.dds_list, self.freq_list, self.amp_list, self.state_list, self.att_list) @rpc(flags={"async"}) def save_and_send_to_rcg(self, x, y, name): if self.timestamp is None: self.timestamp = datetime.now().strftime("%H%M_%S") self.filename = self.timestamp + ".h5" with h5.File(self.filename, "a") as f: datagrp = f.create_group("scan_data") datagrp.attrs["plot_show"] = self.RCG_TAB f.create_dataset("time", data=[], maxshape=(None,)) params = f.create_group("parameters") for collection in self.p.keys(): collectiongrp = params.create_group(collection) for key, val in self.p[collection].items(): collectiongrp.create_dataset(key, data=str(val)) with open("../scan_list", "a+") as csvfile: csvwriter = csv.writer(csvfile, delimiter=",") csvwriter.writerow([self.timestamp, type(self).__name__, os.path.join(self.dir, self.filename)]) if self.rcg is None: try: self.rcg = Client("::1", 3286, "rcg") except: return try: self.rcg.plot(x, y, tab_name=self.RCG_TAB, plot_title=self.timestamp + " - " + name, append=True, file_=os.path.join(os.getcwd(), self.filename)) except: return @kernel def reset_cw_settings(self, dds_list, freq_list, amp_list, state_list, att_list): # Return the CW settings to what they were when prepare # stage was run self.core.reset() for cpld in self.cpld_list: cpld.init() with parallel: for i in range(len(dds_list)): dds_list[i].init() dds_list[i].set(freq_list[i], amplitude=amp_list[i]) dds_list[i].set_att(att_list[i]*dB) if state_list[i]: dds_list[i].sw.on() else: dds_list[i].sw.off() @kernel def line_trigger(self, offset): # Phase lock to mains self.core.reset() t_gate = self.LTriggerIN.gate_rising(16*ms) trigger_time = self.LTriggerIN.timestamp_mu(t_gate) at_mu(trigger_time + offset) @rpc(flags={"async"}) def record_result(self, dataset, idx, val): self.mutate_dataset(dataset, idx, val) @rpc(flags={"async"}) def save_result(self, dataset, data, xdata=False): with h5.File(self.filename, "a") as f: datagrp = f["scan_data"] try: datagrp[dataset] except KeyError: data = datagrp.create_dataset(dataset, data=data, maxshape=(None,)) if xdata: data.attrs["x-axis"] = True return datagrp[dataset].resize(datagrp[dataset].shape[0] + data.shape[0], axis=0) datagrp[dataset][-data.shape[0]:] = data @rpc(flags={"async"}) def send_to_hist(self, data): self.pmt_hist.plot(data) def analyze(self): # Is this necessary? try: self.rcg.close_rpc() self.pmt_hist.close_rpc() except: pass