def add_gate_label(plots, id): """ Add gate labels to a plot """ ds = load_by_id(id) json_meta = json.loads(ds.get_metadata('snapshot')) sub_dict = json_meta['station']['instruments']['mdac']['submodules'] label_txt = [] for ch in range(1, 65): ch_str = 'ch{num:02d}'.format(num=ch) label = sub_dict[ch_str]['parameters']['voltage']['label'] v_value = sub_dict[ch_str]['parameters']['voltage']['value'] if abs(v_value) > 1e-6: label_txt.append('{}: {:+.4f}'.format(label, v_value)) if isinstance(plots, pyplot.PlotWindow): plots = plots.items elif isinstance(plots, pyplot.PlotItem): plots = (plots,) else: raise TypeError("Either pass a window, or a PlotItem in a window") for item in plots: if isinstance(item, pyplot.PlotItem): txt = item.textbox('<br>'.join(label_txt)) txt.anchor('br') txt.offset = (-10, -50) else: print("Item is a {}".format(type(item)))
def correct_label( dataid: int, db_name: str, new_stage: str, new_quality: int, db_folder: Optional[str] = None, ) -> bool: if db_folder is None: db_folder = nt.config["db_folder"] if db_name[-2:] != "db": db_name += ".db" db_folder = os.path.join(nt.config["db_folder"], db_name) qc.config["core"]["db_location"] = db_folder ds = load_by_id(dataid) if new_stage not in LABELS: logger.error("Wrong tuning stage. Leaving old label.") return False else: new_label = dict.fromkeys(LABELS, 0) new_label[new_stage] = 1 new_label["good"] = int(new_quality) for label, value in new_label.items(): ds.add_metadata(label, value) return True
def save_predicted_category(self): """""" ds = load_by_id(self.current_id) nt_meta = json.loads(ds.get_metadata(nt.meta_tag)) nt_meta["predicted_category"] = int(self.current_quality) ds.add_metadata(nt.meta_tag, json.dumps(nt_meta))
def test_dataset_property_getters(nt_dataset_pinchoff, tmp_path): ds = Dataset(1, db_name="temp.db", db_folder=str(tmp_path)) assert ds._normalization_constants == ds.normalization_constants assert ds.features == { "dc_current": { "amplitude": 0.6, "slope": 1000, "low_signal": 0, "high_signal": 1, "residuals": 0.5, "offset": 50, "transition_signal": 0.5, "low_voltage": -0.06, "high_voltage": -0.03, "transition_voltage": -0.05, }, "dc_sensor": { "amplitude": 0.5, "slope": 800, "low_signal": 0, "high_signal": 1, "residuals": 0.5, "offset": 50, "transition_signal": 0.5, "low_voltage": -0.06, "high_voltage": -0.03, "transition_voltage": -0.05, } } assert ds._snapshot == ds.snapshot qc_ds = load_by_id(1) assert ds.snapshot == json.loads(qc_ds.get_metadata("snapshot")) nt_metadata = json.loads(qc_ds.get_metadata(nt.meta_tag)) assert ds.nt_metadata == nt_metadata with pytest.raises(AttributeError): ds.snapshot = {} with pytest.raises(AttributeError): ds.normalization_constants = {} with pytest.raises(AttributeError): ds.features = {} qc_ds = load_by_id(1) assert ds.features == nt_metadata["features"]
def test_pinchofffit_features_property(nt_dataset_pinchoff, tmp_path): pf = PinchoffFit(1, "temp.db", db_folder=str(tmp_path)) assert not pf._features feat_dict = pf.features assert feat_dict ds = load_by_id(1) nt_metadata = json.loads(ds.get_metadata(nt.meta_tag)) assert nt_metadata["features"] == feat_dict
def test_coulomboscillationfit_features(nt_dataset_coulomboscillation, tmp_path): co = CoulombOscillationFit(1, "temp.db", db_folder=str(tmp_path)) assert not co._features feat_dict = co.features assert feat_dict ds = load_by_id(1) nt_metadata = json.loads(ds.get_metadata(nt.meta_tag)) assert nt_metadata["features"] == feat_dict
def save_features(self) -> None: """""" nt.set_database(self.db_name, db_folder=self.db_folder) ds = load_by_id(self.qc_run_id) try: nt_meta = json.loads(ds.get_metadata(nt.meta_tag)) except (RuntimeError, TypeError) as r: nt_meta = {} nt_meta["features"] = self.features ds.add_metadata(nt.meta_tag, json.dumps(nt_meta))
def get_param_values( qc_run_id: int, db_name: str, db_folder: Optional[str] = None, return_meta_add_on: Optional[bool] = False, ) -> Tuple[List[List[str]], List[List[str]]]: """""" if db_folder is None: db_folder = nt.config["db_folder"] nt.set_database(db_name) ds = load_by_id(qc_run_id) nt_metadata = json.loads(ds.get_metadata(nt.meta_tag)) snapshot = json.loads(ds.get_metadata("snapshot"))["station"] device_name = nt_metadata["device_name"] device_snap = snapshot["instruments"][device_name] submods = device_snap["submodules"].keys() param_values = [["Parameter", "Value"]] for submod in submods: gate_val = device_snap["submodules"][submod]["parameters"] try: gate_val = gate_val["dc_voltage"]["value"] except KeyError: gate_val = gate_val["state"]["value"] param_values.append([submod, gate_val]) features = [] if return_meta_add_on: features = [["Feature", "Value"]] param_values.append(["db_name", db_name]) param_values.append(["guid", ds.guid]) for name, v in nt_metadata.items(): if name == "elapsed_time" and v is not None: m, s = divmod(v, 60) h, m = divmod(m, 60) v = "{}h {}min {}s".format(h, m, s) param_values.append([name, v]) elif name == "features": for fname, fval in v.items(): features.append([fname, fval]) elif name == "normalization_constants": param_values.append(["dc normalization", str(v["dc_current"])]) param_values.append(["rf normalization", str(v["rf"])]) else: if type(v) == list: param_values.append([name, *v]) else: param_values.append([name, v]) return param_values, features
def determine_regime( self, segment_ids: List[int], ) -> Dict[int, int]: """""" seg_db_name = self.data_settings['segment_db_name'] seg_db_folder = self.data_settings['segment_db_folder'] with nt.switch_database(seg_db_name, seg_db_folder): segment_regimes = {} for data_id in segment_ids: goodsingle = self.classifiers['singledot'].predict( data_id, seg_db_name, db_folder=seg_db_folder) gooddouble = self.classifiers['doubledot'].predict( data_id, seg_db_name, db_folder=seg_db_folder) dotregime = self.classifiers['dotregime'].predict( data_id, seg_db_name, db_folder=seg_db_folder) if any(goodsingle) and any(gooddouble): # both good single and doubledot # category = DOT_LABLE_MAPPING['doubledot'][1] category = DOT_LABLE_MAPPING["singledot"][1] else: # check if one is good and whether they contradict the # dot regime prediction if any(goodsingle) and not any(gooddouble): # check if regime clf suggests a single dot as well # dotregime = 0 => good single if not any(dotregime): category = DOT_LABLE_MAPPING["singledot"][1] else: category = DOT_LABLE_MAPPING["bothpoor"][0] elif not any(goodsingle) and any(gooddouble): if dotregime: category = DOT_LABLE_MAPPING["doubledot"][1] else: category = DOT_LABLE_MAPPING["bothpoor"][0] elif not any(goodsingle) and not any(gooddouble): category = DOT_LABLE_MAPPING["bothpoor"][0] else: logger.error( "ChargeDiagram.check_quality: Unable to " + "assign dot quality. Unknown combination " + "of single and doubledot predictions.") segment_regimes[data_id] = category # save predicted category to metadata ds = load_by_id(data_id) nt_metadata = json.loads(ds.get_metadata(nt.meta_tag)) nt_metadata["predicted_category"] = category ds.add_metadata(nt.meta_tag, json.dumps(nt_metadata)) return segment_regimes
def from_qcodes_dataset(self): """ Load data from qcodes dataset """ qc_dataset = load_by_id(self.qc_run_id) self.exp_id = qc_dataset.exp_id self.guid = qc_dataset.guid self.qc_parameters = qc_dataset.get_parameters() self.raw_data = qc_dataset.to_xarray_dataset() self._load_metadata_from_qcodes(qc_dataset) self._prep_qcodes_data()
def get_data_ids( self, start_over: bool = False, ) -> Tuple[List[int], List[int], int]: """""" unlabelled_ids: List[int] = [] labelled_ids: List[int] = [] print("getting datasets") last_id = nt.get_last_dataid(self.db_name, db_folder=self.db_folder) all_ids = list(range(1, last_id)) # dds = self.experiment.data_sets() # if len(dds) == 0: # logger.error('Experiment has no data. Nothing to label.') # raise ValueError # start_id = dds[0].run_id # stop_id = dds[-1].run_id # all_ids = list(range(start_id, stop_id+1)) print("len(all_ids): " + str(len(all_ids))) # check if database has label columns as column if not start_over: # Make sure database has nanotune label columns. Just a check. try: ds = load_by_id(1) quality = ds.get_metadata("good") except OperationalError: logger.warning("""No nanotune_label column found in current database. Probably because no data has been labelled yet. Hence starting over. """) start_over = True # except RuntimeError: # logger.error('Probably data in experiment.') # raise print("start_over: " + str(start_over)) if start_over: unlabelled_ids = all_ids labelled_ids = [] else: unlabelled_ids = nt.get_unlabelled_ids(self.db_name) labelled_ids = [x for x in all_ids if x not in unlabelled_ids] return unlabelled_ids, labelled_ids, len(all_ids)
def print_label( dataid: int, db_name: str, db_folder: Optional[str] = None, plot_data: Optional[bool] = True, ) -> None: """""" if db_folder is None: db_folder = nt.config["db_folder"] ds = load_by_id(dataid) if plot_data: plot_by_id(dataid) print("dataset {} in {}: ".format(dataid, db_name)) quality_mapping = {1: "good", 0: "poor"} for label in LABELS: if label != "good": if int(ds.get_metadata(label)) == 1: quality = quality_mapping[int(ds.get_metadata("good"))] print("{} {}.".format(quality, label))
def save_labels(self) -> None: """""" # logger.error('Need to update label saving! -> One column per label.') # raise NotImplementedError for button in self._buttons: if button.objectName() == label_bad: continue checked = button.isChecked() self.current_label[button.objectName()] = int(checked) if self._quality_group.checkedId() == -1: msg = 'Please choose quality. \n \n Either "Good" or ' msg += '"' + label_bad + '"' + " has" msg += " to be selected." qtw.QMessageBox.warning(self, "Cannot save label.", msg, qtw.QMessageBox.Ok) else: ds = load_by_id(self.current_id) for label, value in self.current_label.items(): ds.add_metadata(label, value) self.clear() self.next()
def save_segmented_data_return_info( self, segment_db_name: str, segment_db_folder: Optional[str] = None, ) -> Dict[int, Dict[str, Dict[str, Tuple[float, float]]]]: """ Save each mesh in a new dataset in given databases returns: segment_info = { data_id: { readout_method: {'range_x': (), 'range_y': () } } } """ if segment_db_folder is None: segment_db_folder = nt.config["db_folder"] if not self.segmented_data: self.prepare_segmented_data(use_raw_data=True) if not os.path.isfile(os.path.join(segment_db_folder, segment_db_name)): ds = load_by_id(self.qc_run_id) nt.new_database(segment_db_name, db_folder=segment_db_folder) qc.new_experiment(f'segmented_{ds.exp_name}', sample_name=ds.sample_name) original_params = self.qc_parameters segment_info: Dict[int, Dict[str, Dict[str, Tuple[float, float]]]] = {} with nt.switch_database(segment_db_name, segment_db_folder): for segment in self.segmented_data: meas = Measurement() meas.register_custom_parameter( original_params[0].name, label=original_params[0].label, unit=original_params[0].unit, paramtype="array", ) meas.register_custom_parameter( original_params[1].name, label=original_params[1].label, unit=original_params[1].unit, paramtype="array", ) result: List[List[Tuple[str, np.ndarray]]] = [] ranges: Dict[str, Dict[str, Tuple[float, float]]] = {} m_params = [str(it) for it in list(segment.data_vars)] for ip, param_name in enumerate(m_params): coord_names = list(segment.coords) x_crd_name = coord_names[0] y_crd_name = coord_names[1] voltage_x = segment[param_name][x_crd_name].values voltage_y = segment[param_name][y_crd_name].values signal = segment[param_name].values range_x = (np.min(voltage_x), np.max(voltage_x)) range_y = (np.min(voltage_y), np.max(voltage_y)) ranges[param_name] = {} ranges[param_name]["range_x"] = range_x ranges[param_name]["range_y"] = range_y setpoints = self.raw_data[param_name].depends_on meas.register_custom_parameter( original_params[ip+2].name, label=original_params[ip+2].label, unit=original_params[1].unit, paramtype="array", setpoints=setpoints, ) v_x_grid, v_y_grid = np.meshgrid(voltage_x, voltage_y) result.append([(setpoints[0], v_x_grid), (setpoints[1], v_y_grid), (param_name, signal.T)]) with meas.run() as datasaver: for r_i in range(len(self.readout_methods)): datasaver.add_result(*result[r_i]) datasaver.dataset.add_metadata( "snapshot", json.dumps(self.snapshot) ) datasaver.dataset.add_metadata( nt.meta_tag, json.dumps(self.nt_metadata) ) datasaver.dataset.add_metadata( "original_guid", json.dumps(self.guid) ) logger.debug( "New dataset created and populated.\n" + "database: " + str(segment_db_name) + "ID: " + str(datasaver.run_id) ) segment_info[datasaver.run_id] = ranges return segment_info
def _save_to_db( self, parameters: Sequence[Parameter], setpoints: Sequence[Sequence[float]], data: np.ndarray, nt_label: Sequence[str], quality: int = 1, write_period: int = 10, ) -> Union[None, int]: """ Save data to database. Returns run id. """ nt.set_database(self.db_name, self.db_folder) if len(parameters) not in [1, 2]: logger.error("Only 1D and 2D sweeps supported right now.") return None meas = Measurement() if len(parameters) == 1: meas.register_parameter(parameters[0]) meas.register_parameter(self.dummy_lockin.R, setpoints=(parameters[0], )) with meas.run() as datasaver: for x_indx, x_val in enumerate(setpoints[0]): parameters[0](x_val) datasaver.add_result((parameters[0], x_val), (self.dummy_lockin.R, data[x_indx])) dataid = datasaver.run_id if len(parameters) == 2: meas.register_parameter(parameters[0]) meas.register_parameter(parameters[1]) meas.register_parameter(self.dummy_lockin.R, setpoints=(parameters[0], parameters[1])) with meas.run() as datasaver: for x_indx, x_val in enumerate(setpoints[0]): parameters[0](x_val) for y_indx, y_val in enumerate(setpoints[1]): parameters[1](y_val) # qdot.voltage_nodes[2].v(x_val) # qdot.voltage_nodes[4].v(y_val) datasaver.add_result( (parameters[0], x_val), (parameters[1], y_val), (self.dummy_lockin.R, data[x_indx, y_indx]), ) dataid = datasaver.run_id ds = load_by_id(dataid) meta_add_on = dict.fromkeys(nt.config["core"]["meta_fields"], Any) meta_add_on["device_name"] = self.name nm = dict.fromkeys(["dc_current", "rf"], (0, 1)) meta_add_on["normalization_constants"] = nm ds.add_metadata(nt.meta_tag, json.dumps(meta_add_on)) current_label = dict.fromkeys(LABELS, 0) for label in nt_label: if label is not None: # and nt_label in LABELS: if label not in LABELS: logger.error("CapacitanceModel: Invalid label.") print(label) raise ValueError current_label[label] = 1 current_label["good"] = quality # print('data id {} current label: {} '.format(dataid, current_label )) for label, value in current_label.items(): ds.add_metadata(label, value) return dataid