def second_empty_temp_db(tmp_path): global n_experiments n_experiments = 0 try: nt.new_database("temp2.db", tmp_path) yield finally: gc.collect()
def empty_db_different_folder(tmp_path): global n_experiments n_experiments = 0 try: path = os.path.join(str(tmp_path), "test") os.mkdir(path) nt.new_database("temp2.db", path) yield finally: gc.collect()
def test_database_creation_and_init(tmp_path): db_folder = os.path.join(tmp_path, "temp.db") assert not os.path.exists(db_folder) nt.new_database("temp.db", str(tmp_path)) assert os.path.exists(db_folder) # Make sure label columnbs are created. Following lines should not # raise an exception ids = get_dataIDs("temp.db", "pinchoff", db_folder=tmp_path) assert ids == [] ids = get_dataIDs("temp.db", "good", db_folder=tmp_path) assert ids == []
def save_segmented_data_return_info( self, segment_db_name: str, segment_db_folder: Optional[str] = None, ) -> Dict[int, Dict[str, Dict[str, Tuple[float, float]]]]: """ Save each mesh in a new dataset in given databases returns: segment_info = { data_id: { readout_method: {'range_x': (), 'range_y': () } } } """ if segment_db_folder is None: segment_db_folder = nt.config["db_folder"] if not self.segmented_data: self.prepare_segmented_data(use_raw_data=True) if not os.path.isfile(os.path.join(segment_db_folder, segment_db_name)): ds = load_by_id(self.qc_run_id) nt.new_database(segment_db_name, db_folder=segment_db_folder) qc.new_experiment(f'segmented_{ds.exp_name}', sample_name=ds.sample_name) original_params = self.qc_parameters segment_info: Dict[int, Dict[str, Dict[str, Tuple[float, float]]]] = {} with nt.switch_database(segment_db_name, segment_db_folder): for segment in self.segmented_data: meas = Measurement() meas.register_custom_parameter( original_params[0].name, label=original_params[0].label, unit=original_params[0].unit, paramtype="array", ) meas.register_custom_parameter( original_params[1].name, label=original_params[1].label, unit=original_params[1].unit, paramtype="array", ) result: List[List[Tuple[str, np.ndarray]]] = [] ranges: Dict[str, Dict[str, Tuple[float, float]]] = {} m_params = [str(it) for it in list(segment.data_vars)] for ip, param_name in enumerate(m_params): coord_names = list(segment.coords) x_crd_name = coord_names[0] y_crd_name = coord_names[1] voltage_x = segment[param_name][x_crd_name].values voltage_y = segment[param_name][y_crd_name].values signal = segment[param_name].values range_x = (np.min(voltage_x), np.max(voltage_x)) range_y = (np.min(voltage_y), np.max(voltage_y)) ranges[param_name] = {} ranges[param_name]["range_x"] = range_x ranges[param_name]["range_y"] = range_y setpoints = self.raw_data[param_name].depends_on meas.register_custom_parameter( original_params[ip+2].name, label=original_params[ip+2].label, unit=original_params[1].unit, paramtype="array", setpoints=setpoints, ) v_x_grid, v_y_grid = np.meshgrid(voltage_x, voltage_y) result.append([(setpoints[0], v_x_grid), (setpoints[1], v_y_grid), (param_name, signal.T)]) with meas.run() as datasaver: for r_i in range(len(self.readout_methods)): datasaver.add_result(*result[r_i]) datasaver.dataset.add_metadata( "snapshot", json.dumps(self.snapshot) ) datasaver.dataset.add_metadata( nt.meta_tag, json.dumps(self.nt_metadata) ) datasaver.dataset.add_metadata( "original_guid", json.dumps(self.guid) ) logger.debug( "New dataset created and populated.\n" + "database: " + str(segment_db_name) + "ID: " + str(datasaver.run_id) ) segment_info[datasaver.run_id] = ranges return segment_info