コード例 #1
0
def switch_database(temp_db_name: str, temp_db_folder: str):
    """ """
    original_db, original_db_folder = nt.get_database()
    nt.set_database(temp_db_name, db_folder=temp_db_folder)
    try:
        yield
    finally:
        nt.set_database(original_db, db_folder=original_db_folder)
コード例 #2
0
 def save_features(self) -> None:
     """"""
     nt.set_database(self.db_name, db_folder=self.db_folder)
     ds = load_by_id(self.qc_run_id)
     try:
         nt_meta = json.loads(ds.get_metadata(nt.meta_tag))
     except (RuntimeError, TypeError) as r:
         nt_meta = {}
     nt_meta["features"] = self.features
     ds.add_metadata(nt.meta_tag, json.dumps(nt_meta))
コード例 #3
0
def get_param_values(
    qc_run_id: int,
    db_name: str,
    db_folder: Optional[str] = None,
    return_meta_add_on: Optional[bool] = False,
) -> Tuple[List[List[str]], List[List[str]]]:
    """"""

    if db_folder is None:
        db_folder = nt.config["db_folder"]

    nt.set_database(db_name)
    ds = load_by_id(qc_run_id)

    nt_metadata = json.loads(ds.get_metadata(nt.meta_tag))
    snapshot = json.loads(ds.get_metadata("snapshot"))["station"]

    device_name = nt_metadata["device_name"]
    device_snap = snapshot["instruments"][device_name]
    submods = device_snap["submodules"].keys()
    param_values = [["Parameter", "Value"]]
    for submod in submods:
        gate_val = device_snap["submodules"][submod]["parameters"]
        try:
            gate_val = gate_val["dc_voltage"]["value"]
        except KeyError:
            gate_val = gate_val["state"]["value"]
        param_values.append([submod, gate_val])

    features = []

    if return_meta_add_on:
        features = [["Feature", "Value"]]

        param_values.append(["db_name", db_name])
        param_values.append(["guid", ds.guid])
        for name, v in nt_metadata.items():
            if name == "elapsed_time" and v is not None:
                m, s = divmod(v, 60)
                h, m = divmod(m, 60)
                v = "{}h {}min {}s".format(h, m, s)
                param_values.append([name, v])
            elif name == "features":
                for fname, fval in v.items():
                    features.append([fname, fval])
            elif name == "normalization_constants":
                param_values.append(["dc normalization", str(v["dc_current"])])
                param_values.append(["rf normalization", str(v["rf"])])
            else:
                if type(v) == list:
                    param_values.append([name, *v])
                else:
                    param_values.append([name, v])

    return param_values, features
コード例 #4
0
def get_last_dataid(
    db_name: str,
    db_folder: Optional[str] = None,
) -> int:
    """
    Return last 'global' dataid for given database. It is this ID that is used
    in plot_by_id
    """
    if db_folder is None:
        db_folder = nt.config["db_folder"]
    if db_name[-2:] != "db":
        db_name += ".db"

    nt.set_database(db_name, db_folder=db_folder)
    last_index = 0
    for experiment in experiments():
        last_index += experiment.last_counter

    return last_index
コード例 #5
0
    def __init__(
        self,
        qc_run_id: int,
        db_name: Optional[str] = None,
        db_folder: Optional[str] = None,
    ) -> None:

        if db_folder is None:
            db_folder = nt.config["db_folder"]
        self.db_folder = db_folder

        if db_name is None:
            self.db_name, _ = nt.get_database()
        else:
            nt.set_database(db_name, db_folder=db_folder)
            self.db_name = db_name

        self.qc_run_id = qc_run_id
        self._snapshot: Dict[str, Any] = {}
        self._nt_metadata: Dict[str, Any] = {}
        self._normalization_constants: Dict[str, List[float]] = {}

        self.exp_id: int
        self.guid: str
        self.qc_parameters: List[qc.Parameter]
        self.label: List[str]
        self.dimensions: Dict[str, int] = {}
        self.readout_methods: Dict[str, str] = {}

        self.raw_data: xr.Dataset = xr.Dataset()
        self.data: xr.Dataset = xr.Dataset()
        self.power_spectrum: xr.Dataset = xr.Dataset()
        self.filtered_data: xr.Dataset = xr.Dataset()

        self.from_qcodes_dataset()
        self.prepare_filtered_data()
        self.compute_power_spectrum()
コード例 #6
0
    def _save_to_db(
        self,
        parameters: Sequence[Parameter],
        setpoints: Sequence[Sequence[float]],
        data: np.ndarray,
        nt_label: Sequence[str],
        quality: int = 1,
        write_period: int = 10,
    ) -> Union[None, int]:
        """ Save data to database. Returns run id. """

        nt.set_database(self.db_name, self.db_folder)

        if len(parameters) not in [1, 2]:
            logger.error("Only 1D and 2D sweeps supported right now.")
            return None

        meas = Measurement()

        if len(parameters) == 1:
            meas.register_parameter(parameters[0])
            meas.register_parameter(self.dummy_lockin.R,
                                    setpoints=(parameters[0], ))

            with meas.run() as datasaver:
                for x_indx, x_val in enumerate(setpoints[0]):
                    parameters[0](x_val)
                    datasaver.add_result((parameters[0], x_val),
                                         (self.dummy_lockin.R, data[x_indx]))

                dataid = datasaver.run_id

        if len(parameters) == 2:
            meas.register_parameter(parameters[0])
            meas.register_parameter(parameters[1])
            meas.register_parameter(self.dummy_lockin.R,
                                    setpoints=(parameters[0], parameters[1]))

            with meas.run() as datasaver:
                for x_indx, x_val in enumerate(setpoints[0]):
                    parameters[0](x_val)
                    for y_indx, y_val in enumerate(setpoints[1]):
                        parameters[1](y_val)
                        # qdot.voltage_nodes[2].v(x_val)
                        # qdot.voltage_nodes[4].v(y_val)
                        datasaver.add_result(
                            (parameters[0], x_val),
                            (parameters[1], y_val),
                            (self.dummy_lockin.R, data[x_indx, y_indx]),
                        )

                dataid = datasaver.run_id

        ds = load_by_id(dataid)

        meta_add_on = dict.fromkeys(nt.config["core"]["meta_fields"], Any)
        meta_add_on["device_name"] = self.name
        nm = dict.fromkeys(["dc_current", "rf"], (0, 1))
        meta_add_on["normalization_constants"] = nm

        ds.add_metadata(nt.meta_tag, json.dumps(meta_add_on))

        current_label = dict.fromkeys(LABELS, 0)
        for label in nt_label:
            if label is not None:  # and nt_label in LABELS:
                if label not in LABELS:
                    logger.error("CapacitanceModel: Invalid label.")
                    print(label)
                    raise ValueError
                current_label[label] = 1
                current_label["good"] = quality

        # print('data id {} current label: {} '.format(dataid, current_label ))
        for label, value in current_label.items():
            ds.add_metadata(label, value)

        return dataid
コード例 #7
0
def export_data(
    category: str,
    db_names: List[str],
    stages: List[str],
    skip_ids: Optional[Dict[str, List[int]]] = None,
    quality: Optional[int] = None,
    filename: Optional[str] = None,
    db_folder: Optional[str] = None,
) -> None:
    """"""
    assert isinstance(db_names, list)
    assert isinstance(stages, list)

    if db_folder is None:
        db_folder = nt.config["db_folder"]

    if category in ["pinchoff", "clmboscs"]:
        dim = 1
    elif category in [
            "dotregime",
            "singledot",
            "doubledot",
            "clmbdiam",
            "outerbarriers",
    ]:
        dim = 2
    else:
        logger.error("Trying to export data of" +
                     " a category: {}/".format(category) +
                     " Please update utils/export_data.py and tell me" +
                     " the dimensions of the data ")

    shape = tuple(nt.config["core"]["standard_shapes"][str(dim)])
    condensed_data_all = np.empty(
        (len(nt.config["core"]["data_types"]), 0, np.prod(shape)))

    relevant_ids: Dict[str, List[int]] = {}
    for db_name in db_names:
        relevant_ids[db_name] = []
        nt.set_database(db_name, db_folder)
        for stage in stages:
            try:
                if quality is None:
                    relevant_ids[db_name] += nt.get_dataIDs(
                        db_name, stage, db_folder=db_folder)
                else:
                    relevant_ids[db_name] += nt.get_dataIDs(
                        db_name, stage, quality=quality, db_folder=db_folder)
            except Exception as e:
                logger.error("""Unable to load relevant ids
                in {}""".format(db_name))
                logger.error(e)
                break

    labels_exp = []

    for db_name, dataids in relevant_ids.items():
        nt.set_database(db_name, db_folder)
        skip_us = []
        if skip_ids is not None:
            try:
                skip_us = skip_ids[db_name]
            except KeyError:
                logger.warning("No data IDs to skip in {}.".format(db_name))

        for d_id in dataids:
            if d_id not in skip_us:
                df = Dataset(d_id, db_name, db_folder=db_folder)

                condensed_data = prep_data(df, category)
                condensed_data_all = np.append(condensed_data_all,
                                               condensed_data[0],
                                               axis=1)
                new_label = export_label(df.label, df.quality, category)
                labels_exp.append(new_label)

    n = list(condensed_data_all.shape)
    n[-1] += 1

    data_w_labels = np.zeros(n)
    data_w_labels[:, :, -1] = labels_exp
    data_w_labels[:, :, :-1] = condensed_data_all

    if filename is None:
        filename = "_".join(stages)
    path = os.path.join(db_folder, filename)
    np.save(path, data_w_labels)
コード例 #8
0
    def _take_data(self, qc_measurement_parameters: List[qc.Parameter]) -> int:
        """
        It will always sweep the same gates and measure the same parameter
        TO DO: Implement smart way of sampling measurement points
        """
        meas = QC_Measurement()
        output = []
        output_dict: Dict[str, Optional[float]] = {}
        gate_parameters = []
        n_points_true = [0, 0]
        gates_to_sweep = self.setpoint_settings['gates_to_sweep']

        nt.set_database(self.data_settings['db_name'],
                        db_folder=self.data_settings['db_folder'])

        nt_meta = self._prepare_nt_metadata()

        with self.set_up_gates_for_measurement():
            for gate in gates_to_sweep:
                meas.register_parameter(gate.dc_voltage)
                gate_parameters.append(gate.dc_voltage)

            for m_param in qc_measurement_parameters:
                _flush_buffers(m_param)
                meas.register_parameter(m_param, setpoints=gate_parameters)
                output.append([m_param, None])
                output_dict[m_param.full_name] = None

            start_time = time.time()
            done = False

            with meas.run() as datasaver:
                # Save some important metadata before we start measuring
                datasaver.dataset.add_metadata(nt.meta_tag, json.dumps(nt_meta))

                for set_point0 in self.current_setpoints[0]:
                    gates_to_sweep[0].dc_voltage(set_point0)
                    self.do_at_outer_setpoint(set_point0)
                    n_points_true[0] += 1

                    if len(gates_to_sweep) == 2:
                        gates_to_sweep[1].use_ramp(True)
                        start_voltage = self.current_setpoints[1][0]

                        gates_to_sweep[1].dc_voltage(start_voltage)
                        gates_to_sweep[1].use_ramp(False)

                        for set_point1 in self.current_setpoints[1]:
                            gates_to_sweep[1].dc_voltage(set_point1)
                            n_points_true[1] += 1
                            m_params = qc_measurement_parameters
                            for p, parameter in enumerate(m_params):
                                value = parameter.get()
                                output[p][1] = value
                                output_dict[parameter.full_name] = value

                            paramx = gates_to_sweep[0].dc_voltage.full_name
                            paramy = gates_to_sweep[1].dc_voltage.full_name
                            datasaver.add_result(
                                (paramx, set_point0),
                                (paramy, set_point1),
                                *output, # type: ignore
                            )
                            done = self.finish_early(output_dict)  # type: ignore
                            if done:
                                break
                    else:
                        m_params = qc_measurement_parameters
                        for p, parameter in enumerate(m_params):
                            value = parameter.get()
                            output[p][1] = value
                            output_dict[parameter.full_name] = value

                        paramx = gates_to_sweep[0].dc_voltage.full_name
                        datasaver.add_result(
                            (paramx, set_point0), *output # type: ignore
                        )
                        done = self.finish_early(output_dict)  # type: ignore
                    if done:
                        break

                elapsed_time = time.time() - start_time
                minutes, seconds = divmod(elapsed_time, 60)
                msg = "Elapsed time to take data: {:.0f} min, {:.2f} sec."
                logger.info(msg.format(minutes, seconds))

                # Add last bits of info to metadata
                nt_meta["n_points"] = n_points_true
                nt_meta["elapsed_time"] = round(float(elapsed_time), 2)

                datasaver.dataset.add_metadata(nt.meta_tag, json.dumps(nt_meta))

        return datasaver.run_id
コード例 #9
0
ファイル: tuner.py プロジェクト: oobeya-space/nanotune
    def __init__(
        self,
        name: str,
        data_settings: Dict[str, Any],
        classifiers: Dict[str, Classifier],
        setpoint_settings: Dict[str, Any],
        fit_options: Optional[Dict[str, Dict[str, Any]]] = None,
    ) -> None:
        super().__init__(name)

        self.classifiers = classifiers

        assert 'db_name' in data_settings.keys()
        if 'db_folder' in data_settings.keys():
            nt.set_database(data_settings['db_name'],
                            db_folder=data_settings['db_folder'])
        else:
            nt.set_database(data_settings['db_name'])

        if data_settings.get('qc_experiment_id') is None:
            try:
                self.qcodes_experiment = load_last_experiment()
            except ValueError:
                logger.warning(
                    'No qcodes experiment found. Starting a new '
                    'one called "automated_tuning", with an unknown sample.')
                self.qcodes_experiment = new_experiment("automated_tuning",
                                                        sample_name="unknown")
            exp_id = self.qcodes_experiment.exp_id
            data_settings['qc_experiment_id'] = exp_id

        self._data_settings = data_settings
        super().add_parameter(
            name="data_settings",
            label="data_settings",
            docstring="",
            set_cmd=self.update_data_settings,
            get_cmd=self.get_data_settings,
            initial_value=data_settings,
            vals=vals.Dict(),
        )
        if fit_options is None or not fit_options:
            fit_options = {
                key: {}
                for key in nt.config['core']['implemented_fits']
            }

        self._fit_options = fit_options
        super().add_parameter(
            name="fit_options",
            label="fit_options",
            docstring="",
            set_cmd=self.set_fit_options,
            get_cmd=self.get_fit_options,
            initial_value=fit_options,
            vals=vals.Dict(),
        )

        super().add_parameter(
            name="setpoint_settings",
            label="setpoint_settings",
            docstring="options for setpoint determination",
            set_cmd=None,
            get_cmd=None,
            initial_value=setpoint_settings,
            vals=vals.Dict(),
        )
コード例 #10
0
    def __init__(
        self,
        experiment_id: Optional[int] = None,
        db_folder: Optional[str] = None,
        db_name: Optional[str] = None,
        start_over: bool = False,
        figure_fontsize: int = 8,
    ) -> None:
        """"""
        if db_folder is None:
            db_folder = nt.config["db_folder"]

        LABELS = list(dict(nt.config["core"]["labels"]).keys())

        if db_name is None:
            logger.warning("Labelling default main database.")
            db_name = nt.config["main_db"]
        nt.set_database(db_name)
        self.db_name = db_name

        self.db_folder = db_folder

        # print(qc.config['core']['db_location'])
        matplotlib.rc("font", size=figure_fontsize)
        super(LabellingTool, self).__init__()

        self.current_label = dict.fromkeys(LABELS, 0)
        self.experiment_id = experiment_id

        if self.experiment_id is None:
            logger.error("Please select an experiment. Labelling entire " +
                         " database is not supported yet.")
            raise NotImplementedError
            # all_experiments = experiments()
            # for e in all_experiments:
            # self.experiment = e
            # (self._iterator_list,
            #  self.labelled_ids,
            #  self.n_total) = self.get_data_ids(start_over)
        else:
            try:
                self.experiment = load_experiment(self.experiment_id)

                (
                    self._iterator_list,
                    self.labelled_ids,
                    self.n_total,
                ) = self.get_data_ids(start_over)

                self._id_iterator = iter(self._iterator_list)
                try:
                    self.current_id = self._id_iterator.__next__()
                except StopIteration:
                    logger.warning("All data of this experiment is already " +
                                   "labelled")
                    raise

            except ValueError:
                msg = "Unable to load experiment."
                # ee = experiments()
                # for e in ee:
                #     msg += e.name + '\n'
                qtw.QMessageBox.warning(self,
                                        "Error instantiating LabellingTool.",
                                        msg, qtw.QMessageBox.Ok)
            except IndexError as I:
                msg = "Did not find any unlabelled data in experiment "
                msg += self.experiment.name + "."
                qtw.QMessageBox.warning(self,
                                        "Error instantiating LabellingTool.",
                                        msg, qtw.QMessageBox.Ok)

        self._main_widget = qtw.QWidget(self)
        self.setCentralWidget(self._main_widget)

        self.initUI()
        self.show()