def test_loop_writing_2D(self): # pass station = Station() MockPar = MockParabola(name='Loop_writing_test_2D') station.add_component(MockPar) loop = Loop(MockPar.x[-100:100:20]).loop(MockPar.y[-50:50:10]).each( MockPar.skewed_parabola) data1 = loop.run(name='MockLoop_hdf5_test', formatter=self.formatter) data2 = DataSet(location=data1.location, formatter=self.formatter) data2.read() for key in data2.arrays.keys(): self.checkArraysEqual(data2.arrays[key], data1.arrays[key]) metadata_equal, err_msg = compare_dictionaries(data1.metadata, data2.metadata, 'original_metadata', 'loaded_metadata') self.assertTrue(metadata_equal, msg='\n' + err_msg) self.formatter.close_file(data1) self.formatter.close_file(data2)
def test_reading_into_existing_data_array(self): data = DataSet1D(location=self.loc_provider, name='test_read_existing') # closing before file is written should not raise error self.formatter.write(data) data2 = DataSet(location=data.location, formatter=self.formatter) d_array = DataArray( name='dummy', array_id='x_set', # existing array id in data label='bla', unit='a.u.', is_setpoint=False, set_arrays=(), preset_data=np.zeros(5)) data2.add_array(d_array) # test if d_array refers to same as array x_set in dataset self.assertTrue(d_array is data2.arrays['x_set']) data2.read() # test if reading did not overwrite dataarray self.assertTrue(d_array is data2.arrays['x_set']) # Testing if data was correctly updated into dataset self.checkArraysEqual(data2.arrays['x_set'], data.arrays['x_set']) self.checkArraysEqual(data2.arrays['y'], data.arrays['y']) self.formatter.close_file(data) self.formatter.close_file(data2)
def _slice_dataset(dataset: DataSet, slice_objects: Sequence[slice], output_parameter_name: Optional[str], copy_metadata: bool, verbose: int = 0): """ Slice the measurement array of a dataset and adjust the setpoints arrays accordingly """ zarray = dataset.default_parameter_array() if output_parameter_name is None: output_parameter_name = zarray.name set_arrays = zarray.set_arrays yarray = set_arrays[0] scan_dimension = dataset_dimension(dataset) is_1d_dataset = scan_dimension == 1 is_2d_dataset = scan_dimension == 2 if verbose: print( f'slice_dataset: dimension {scan_dimension} slice_objects {slice_objects}' ) if is_1d_dataset: signal_window = zarray[tuple(slice_objects)] dataset_window = qtt.data.makeDataSet1Dplain( yarray.name, yarray[slice_objects[0]], yname=output_parameter_name, y=signal_window, xunit=yarray.unit, yunit=zarray.unit) elif is_2d_dataset: xarray = set_arrays[1] signal_window = zarray[tuple(slice_objects)] dataset_window = qtt.data.makeDataSet2Dplain( xarray.name, xarray[0][slice_objects[1]], yarray.name, yarray[slice_objects[0]], zname=output_parameter_name, z=signal_window, xunit=xarray.unit, yunit=yarray.unit, zunit=zarray.unit) else: raise NotImplementedError( 'slicing a multi-dimensional dataset of dimension {scan_dimension} is not supported' ) if copy_metadata: dataset_window.metadata = copy.deepcopy(dataset.metadata) return dataset_window
def average_probability(data_set, location, NewIO, formatter, qubit_num=1): for parameter in data_set.arrays: if len(data_set.arrays[parameter].ndarray.shape ) == 2 and parameter.endswith('set'): data_set_new = DataSet(location=location + '_average_probability_data_' + parameter, io=NewIO, formatter=formatter) for parameter in data_set.arrays: if len(data_set.arrays[parameter].ndarray.shape) == 2: data = deepcopy(data_set.arrays[parameter].ndarray) data = np.average(data, axis=0) is_setpoint = data_set.arrays[parameter].is_setpoint name = data_set.arrays[parameter].name array_id = data_set.arrays[parameter].array_id data_set_new.add_array( DataArray(preset_data=data, name=name, array_id=array_id, is_setpoint=is_setpoint)) return data_set_new
def test_xarray_example_conversion(self): times = pd.date_range("2000-01-01", "2000-1-31", name="time") shape = (31, 3) xarray_dataset = xr.Dataset( {"tmin": (("time", "location"), np.random.rand(*shape)), "tmax": (("time", "location"), np.random.rand(*shape)), }, {"time": times, "location": ["IA", "IN", "IL"]},) qd = DataSet.from_xarray(xarray_dataset) xarray_dataset2 = qd.to_xarray() self.assertEqual(qd.default_parameter_array().shape, xarray_dataset.tmin.shape) self.assertEqual(list(xarray_dataset.coords.keys()), list(xarray_dataset2.coords.keys())) self.assertEqual(list(xarray_dataset.data_vars.keys()), list(xarray_dataset2.data_vars.keys())) self.assertEqual(xarray_dataset.tmin.shape, xarray_dataset2.tmin.shape)
def slice_dataset(dataset: DataSet, window: Sequence[float], axis: int = 0, verbose: int = 0, copy_metadata: bool = False, output_parameter_name=None) -> DataSet: """ Given a dataset and a window for the horizontal axis return the dataset with selected window Args: dataset: Dataset to be slice window: Specification of the window to be selected axis: Axis used for slicing verbose: Verbosity level copy_metadata: If True then copy the metadata of the input dataset output_parameter_name: Name of the output array Returns: Dataset with sliced data """ zarray = dataset.default_parameter_array() if output_parameter_name is None: output_parameter_name = zarray.name set_arrays = zarray.set_arrays yarray = set_arrays[0] scan_dimension = dataset_dimension(dataset) is_1d_dataset = scan_dimension == 1 if is_1d_dataset: if not axis == 0: raise AssertionError('for a 1D dataset axis should be 0') else: xarray = set_arrays[1] slice_objects = [slice(0, size) for jj, size in enumerate(zarray.shape)] if axis == 0: slice_array = yarray start_idx = int(np.floor(np.interp(window[0], slice_array.ndarray, np.arange(slice_array.ndarray.size)))) end_idx = int(np.interp(window[1], slice_array.ndarray, np.arange(slice_array.ndarray.size))) slice_objects[0] = slice(start_idx, end_idx) else: slice_array = xarray start_idx = int(np.floor(np.interp(window[0], slice_array.ndarray[0], np.arange(slice_array.ndarray[0].size)))) end_idx = int(np.interp(window[1], slice_array.ndarray[0], np.arange(slice_array.ndarray[0].size))) slice_objects[1] = slice(start_idx, end_idx) return _slice_dataset(dataset, tuple(slice_objects), output_parameter_name, copy_metadata=copy_metadata, verbose=0)
def convert_to_probability( data_set, location, NewIO, formatter, threshold, qubit_num=1, repetition=100, ): for parameter in data_set.arrays: if len(data_set.arrays[parameter].ndarray.shape ) == 2 and parameter.endswith('set'): data_set_new = DataSet(location=location + '_average_probability_' + parameter, io=NewIO, formatter=formatter) # data_set = convert_to_01_state(data_set, threshold, qubit_num, repetition, name, unit, sweep_array) qubit_data_array = [] set_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter].ndarray dimension_1 = data_array.shape[0] arrayid = data_set.arrays[parameter].array_id if parameter.endswith( 'set'): ## or data_set.arrays[parameter].is_setpoint set_array.append( DataArray(preset_data=data_array, name=parameter, array_id=arrayid, is_setpoint=True)) elif not parameter.endswith('set'): dimension_2 = data_array.shape[1] probability_data = np.ndarray(shape=(dimension_1, dimension_2)) for k in range(dimension_1): for l in range(dimension_2): probability_data[k][l] = np.average(data_array[k][l]) qubit_data_array.append( DataArray(preset_data=probability_data, name=parameter, array_id=arrayid, is_setpoint=False)) for array in set_array: data_set_new.add_array(array) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) return data_set_new
def seperate_data(data_set, location, NewIO, formatter, qubit_num=1, repetition=100, sweep_arrays=None, sweep_names=None): #this function will seperate the raw data for each experiment (appended to the same seqeunce) #into different data files. This will make plotting and data handling easier. start = 0 end = 0 seperated_data = [] for count, array in enumerate(sweep_arrays): end = start + len(sweep_arrays[count]) - 1 seperated_data.append( DataSet(location=location + '_' + sweep_names[count] + '_set', io=NewIO, formatter=formatter)) for parameter in data_set.arrays: if parameter.endswith( 'set') and data_set.arrays[parameter].ndarray.ndim > 1: name = sweep_names[count] + '_set' else: name = parameter if data_set.arrays[parameter].ndarray.ndim > 1: seperated_data[count].add_array( DataArray( preset_data=data_set.arrays[parameter][:, start:end], name=name, array_id=name, is_setpoint=True)) else: seperated_data[count].add_array( DataArray(preset_data=data_set.arrays[parameter], name=name, array_id=name, is_setpoint=True)) start = end + 1 return seperated_data
def analyse_RTS(dataset: DataSet, fig: int = 1) -> dict: time = default_setpoint_array(dataset) rtsdata = np.array(dataset.default_parameter_array()) num_bins = 40 counts, bins = np.histogram(rtsdata, bins=num_bins) bincentres = np.array([(bins[i] + bins[i + 1]) / 2 for i in range(0, len(bins) - 1)]) par_fit, result_dict = fit_double_gaussian(bincentres, counts) split = result_dict['split'] plt.figure(fig) plt.clf() plt.subplot(1, 2, 1) plt.plot(time[:10000], rtsdata[:10000], '.', label='signal') plt.xlabel('Time') plt.title('Selection of points') plt.subplot(1, 2, 2) _plot_rts_histogram(rtsdata, num_bins, par_fit, split, 'Histogram') return {}
def analyse_polarization_line(dataset: DataSet, fig: int = 1, verbose=0) -> dict: """ Analyse dataset with polarization line """ if verbose: print('analyse_polarization_line: dataset: %s' % dataset.location) signal = dataset.default_parameter_array() delta = default_setpoint_array(dataset, signal.name) lever_arm = 80 delta_uev = np.array(delta) * lever_arm signal = qtt.algorithms.generic.smoothImage(signal) kb = scipy.constants.physical_constants['Boltzmann constant in eV/K'][ 0] * 1e6 kT = 75e-3 * kb # effective electron temperature in ueV par_fit, initial_parameters, results = fit_pol_all(delta_uev, signal, kT) plot_polarization_fit(delta_uev, signal, results, fig) return {}
def test_multifile(self): formatter = GNUPlotFormat() location = self.locations[1] data = DataSetCombined(location) formatter.write(data, data.io, data.location) filex, filexy = files_combined() with open(location + '/x_set.dat') as f: self.assertEqual(f.read(), filex) with open(location + '/x_set_y_set.dat') as f: self.assertEqual(f.read(), filexy) data2 = DataSet(location=location) formatter.read(data2) for array_id in ('x_set', 'y1', 'y2', 'y_set', 'z1', 'z2'): self.checkArraysEqual(data2.arrays[array_id], data.arrays[array_id])
def resample_dataset(dataset: DataSet, sample_rate: Tuple[int], copy_metadata: bool = False, output_parameter_name: Optional[str] = None) -> DataSet: """ Given a dataset resample the measurement array Args: dataset: Dataset to be slice sample_rate: Tuple with for each axis the sample rate. Must be a postive integer copy_metadata: If True then copy the metadata of the input dataset output_parameter_name: Name of the output array Returns: Dataset with sliced data """ zarray = dataset.default_parameter_array() if output_parameter_name is None: output_parameter_name = zarray.name slice_objects = tuple(slice(0, size, sample_rate[jj]) for jj, size in enumerate(zarray.shape)) return _slice_dataset(dataset, slice_objects, output_parameter_name, copy_metadata=copy_metadata, verbose=0)
def test_xarray_conversions(self): qd = DataSet1D(name="TestNewData_test_xarray_conversions") xarray_data_set = qcodes_dataset_to_xarray_dataset(qd) qd_transformed = xarray_dataset_to_qcodes_dataset(xarray_data_set) m = qd.default_parameter_array() mt = qd_transformed.default_parameter_array() for key in ["name", "unit"]: self.assertEqual(getattr(m, key), getattr(mt, key)) qd2 = DataSet2D(name="TestNewData_test_xarray_conversions") xarray_data_set = qcodes_dataset_to_xarray_dataset(qd2) qd2_transformed = xarray_dataset_to_qcodes_dataset(xarray_data_set) m = qd2.default_parameter_array() mt = qd2_transformed.default_parameter_array() for key in ["name", "unit"]: self.assertEqual(getattr(m, key), getattr(mt, key)) xds = qd.to_xarray() qds = DataSet.from_xarray(xds)
def average_multirow_dataset(dataset: DataSet, number_of_repetitions: int, new_values=None, parameter_name: str = 'signal', output_parameter_name: str = 'signal') -> DataSet: """ Calculate the averaged signal from a 2D dataset with repeated rows Args: dataset: Dataset containing the data to be averaged number_of_repetitions: Number of rows over which to average new_values: Optional new values for the averaged axis parameter_name: Name of data array to process output_parameter_name: Name of output array Returns: Averaged dataset """ zarray = dataset.default_parameter_array(parameter_name) set_arrays = zarray.set_arrays xarray = set_arrays[1] yarray = set_arrays[0] if new_values is None: number_of_blocks = int(zarray.shape[0] / number_of_repetitions) new_values = np.linspace(yarray[0], yarray[-1], number_of_blocks) data = zarray ncolumns = data.shape[1] averaged_signal = data.transpose().reshape( -1, number_of_repetitions).mean(1).reshape(ncolumns, -1).transpose() dataset_averaged = qtt.data.makeDataSet2Dplain(xarray.name, xarray[0], yarray.name, new_values, zname=output_parameter_name, z=averaged_signal, xunit=xarray.unit, yunit=yarray.unit, zunit=zarray.unit) return dataset_averaged
def plot_dataset(dataset: DataSet, parameter_names: Optional[list] = None, fig: Optional[int] = 1) -> None: """ Plot a dataset to matplotlib figure window Args: dataset: DataSet to be plotted parameter_names: List of arrays to be plotted fig: Specification if Matplotlib figure window """ if parameter_names is None: parameter_names = [dataset.default_parameter_name()] if parameter_names == 'all': parameter_names = [ name for name in dataset.arrays.keys() if not dataset.arrays[name].is_setpoint ] default_array = dataset.default_parameter_array() if fig: plt.figure(fig) plt.clf() if len(default_array.shape) >= 2: if len(parameter_names) > 1: arrays = [ dataset.default_parameter_array(parameter_name) for parameter_name in parameter_names ] plot_handle = MatPlot(*arrays, num=fig) else: plot_handle = MatPlot(dataset.default_parameter_array( parameter_names[0]), num=fig) else: for idx, parameter_name in enumerate(parameter_names): if idx == 0: plot_handle = MatPlot( dataset.default_parameter_array(parameter_name), num=fig) else: plot_handle.add( dataset.default_parameter_array(parameter_name, ))
def plot_dataset(dataset: DataSet, scanjob, save=True) -> None: """ Plot a dataset to matplotlib figure window Args: dataset: DataSet to be plotted scanjob: scanjob of the measurement save: Select if you want to save the plots """ parameter_names = [ name for name in dataset.arrays.keys() if not dataset.arrays[name].is_setpoint ] default_array = dataset.default_parameter_array() # Path for saving base_loc = dataset.default_io.base_location folder = '\\' + dataset.location + '\\' label = str(scanjob.get('dataset_label')) path = base_loc + folder + label # 2D plots if len(default_array.shape) >= 2: for idx, parameter_name in enumerate(parameter_names): plot_handle = MatPlot(dataset.arrays[parameter_name], num=idx) plot_handle.rescale_axis() if save == True: plt.savefig(path + str(idx) + '.png') # 1D plots else: for idx, parameter_name in enumerate(parameter_names): plot_handle = MatPlot(dataset.arrays[parameter_name], num=idx) plot_handle.rescale_axis() if save == True: plt.savefig(path + str(idx) + '.png')
def convert_to_01_state(data_set, threshold, qubit_num=1, repetition=100): #data_set = convert_to_ordered_data(data_set, qubit_num, repetition, name, unit, sweep_array) qubit_data_array = [] set_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter].ndarray dimension_1 = data_array.shape[0] array_id = data_set.arrays[parameter].array_id if parameter.endswith( 'set'): ## or data_set.arrays[parameter].is_setpoint set_array.append( DataArray(preset_data=data_array, name=parameter, array_id=array_id, is_setpoint=True)) elif not parameter.endswith('set'): dimension_2 = data_array.shape[1] data = np.ndarray(shape=(dimension_1, dimension_2, repetition)) for k in range(dimension_1): for l in range(dimension_2): for j in range(repetition): data[k][l][j] = 1 if np.min( data_array[k][l][j * seg_size:(j + 1) * seg_size]) <= threshold else 0 qubit_data_array.append( DataArray(preset_data=data, name=parameter, array_id=array_id, is_setpoint=False)) data_set_new = DataSet(location=new_location + '_01_state', io=NewIO, formatter=formatter) for array in set_array: data_set_new.add_array(array) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) return data_set_new
def majority_vote(data_set, threshold, qubit_num=1, repetition=100, name='frequency', unit='GHz', sweep_array=None, average=False): data_set = convert_to_01_state(data_set, threshold, qubit_num, repetition, name, unit, sweep_array) set_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter].ndarray dimension_1 = data_array.shape[0] arrayid = data_set.arrays[parameter].array_id if parameter.endswith( 'set'): ## or data_set.arrays[parameter].is_setpoint set_array.append( DataArray(preset_data=data_array, name=parameter, array_id=arrayid, is_setpoint=True)) dimension_2 = len(sweep_array) if sweep_array is not None else 2 # dimension_1 = 5 vote_data = np.ndarray(shape=(dimension_1, dimension_2, repetition)) average_vote_data = np.ndarray(shape=(dimension_1, dimension_2)) name = 'vote' arrayid = 'vote' for k in range(dimension_1): for l in range(dimension_2): for repe in range(repetition): voter = np.array([ data_set.digitizerqubit_1[k][l][repe], data_set.digitizerqubit_2[k][l][repe], data_set.digitizerqubit_3[k][l][repe], ]) vote_data[k][l][repe] = 1 if np.sum(voter) >= 2 else 0 if average: average_vote_data[k][l] = np.average(vote_data[k][l]) print('average: ', average_vote_data[k][l]) data = vote_data if not average else average_vote_data vote_data_array = DataArray(preset_data=data, name=name, array_id=arrayid, is_setpoint=False) data_set_new = DataSet(location=new_location, io=NewIO, formatter=formatter) for array in set_array: data_set_new.add_array(array) data_set_new.add_array(vote_data_array) return data_set_new
def convert_to_01_state(data_set, threshold, loop_num, qubit_num, name='frequency', unit='GHz', sweep_array=None): data_set = convert_to_ordered_data(data_set, loop_num, qubit_num, name, unit, sweep_array) qubit_data_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter] dimension_1 = data_array.shape[0] arrayid = data_set.arrays[parameter].array_id if parameter[ -3:] == 'set': ## or data_set.arrays[parameter].is_setpoint if len(data_array.shape) == 1: set_array1 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) elif len(data_array.shape ) == 2 and not parameter.startswith('index'): set_array2 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) elif parameter[-3:] != 'set': seg_num = int(data_set.arrays[parameter].shape[1] / seg_size) data = np.ndarray(shape=(dimension_1, seg_num)) setpara = np.ndarray(shape=(dimension_1, seg_num)) for k in range(dimension_1): for j in range(seg_num): setpara[k][j] = j for i in range(seg_size): if data_array.ndarray[k][j * seg_size + i] <= threshold: data[k][j] = 1 break if i == seg_size - 1: data[k][j] = 0 set_array3 = DataArray(preset_data=setpara, name=name, array_id=name + '_set', is_setpoint=True) qubit_data_array.append( DataArray(preset_data=data, name=parameter, array_id=arrayid, is_setpoint=False)) data_set_new = DataSet(location=new_location, io=NewIO, formatter=formatter) data_set_new.add_array(set_array1) data_set_new.add_array(set_array2) if loop_num > 1: data_set_new.add_array(set_array3) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) return data_set_new
def convert_to_probability(data_set, threshold, loop_num, qubit_num=1, name='frequency', unit='GHz', sweep_array=None): data_set = convert_to_01_state(data_set, threshold, loop_num, qubit_num, name, unit, sweep_array) qubit_data_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter] dimension_1 = data_array.shape[0] arrayid = data_set.arrays[parameter].array_id if parameter[ -3:] == 'set': ## or data_set.arrays[parameter].is_setpoint if len(data_array.shape) == 1: set_array1 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) if len(data_array.shape ) == 2 and not parameter.startswith('index'): set_array2 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) elif parameter[-3:] != 'set': seg_num = int(data_set.arrays[parameter].shape[1]) data = np.ndarray(shape=(dimension_1, loop_num)) setpara = np.ndarray(shape=(dimension_1, loop_num)) for k in range(dimension_1): # data_k = [] # setpara_k = [] state = np.ndarray(shape=(loop_num, int(seg_num / loop_num))) for i in range(seg_num): loop = i % loop_num sweep = i // loop_num state[loop][sweep] = data_array.ndarray[k][i] for j in range(loop_num): setpara[k][j] = j probability = np.average(state[j]) data[k][j] = probability if loop_num > 1 and sweep_array is not None: setpara[k] = sweep_array set_array3 = DataArray(preset_data=setpara, name=name, array_id=name + '_set', is_setpoint=True) # if loop_num == 1: # data = data.T[0] qubit_data_array.append( DataArray(preset_data=data, name=parameter, array_id=arrayid, is_setpoint=False)) data_set_new = DataSet(location=new_location, io=NewIO, formatter=formatter) data_set_new.add_array(set_array1) data_set_new.add_array(set_array2) if loop_num > 1: data_set_new.add_array(set_array3) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) return data_set_new
def convert_to_ordered_data(data_set, qubit_num=1, repetition=100, name='frequency', unit='GHz', sweep_array=None): qubit_data_array = [] set_array = [] for parameter in data_set.arrays: data_array = data_set.arrays[parameter].ndarray dimension_1 = data_array.shape[0] array_name = parameter array_id = data_set.arrays[parameter].array_id if parameter.endswith('set'): if data_array.ndim == 2 and parameter.startswith('index'): dimension_2 = int(data_array.shape[-1] / 2 / (repetition + 1) / seg_size / qubit_num) sweep_array = sweep_array if sweep_array is not None else np.linspace( 0, dimension_2 - 1, dimension_2) data_array = np.array( [sweep_array for k in range(dimension_1)]) array_name = name + '_set' array_id = name + '_set' if data_array.ndim != 3 or not parameter.startswith('index'): set_array.append( DataArray(preset_data=data_array, name=array_name, array_id=array_id, is_setpoint=True)) elif not parameter.endswith('set') and data_array.ndim == 2: data_num = int(data_array.shape[-1] / 2 / (repetition + 1) * repetition) qubit_data_num = int(data_num / qubit_num) dimension_2 = int(data_array.shape[-1] / 2 / (repetition + 1) / seg_size / qubit_num) qubit_data = np.ndarray(shape=(qubit_num, dimension_1, dimension_2, int(qubit_data_num / dimension_2))) for k in range(dimension_1): raw_data = data_array[k][::2] raw_marker = data_array[k][1::2] for seg in range(seg_size * qubit_num * dimension_2): if raw_marker[seg] > 0.2: ## a better threshold ??? break data = raw_data[seg:data_num + seg] print('seg', seg) data_reshape = data.reshape(int(data_num / seg_size), seg_size) print('data_shape', data_reshape.shape) for l in range(dimension_2): for q in range(qubit_num): qubit_data[q][k][l] = data_reshape[qubit_num * l + q::dimension_2 * qubit_num].reshape( seg_size * repetition, ) n = 2 if q == 0 else q if q >= 2: n = q + 1 qubit_data_array.append( DataArray(preset_data=qubit_data[q], name=parameter + 'qubit_%d' % (n), array_id=array_id + 'qubit_%d' % (n), is_setpoint=False)) elif not parameter.endswith('set') and data_array.ndim == 3: data_num = int(data_array.shape[-1] / 2 / (repetition + 1) * repetition) qubit_data_num = int(data_num / qubit_num) dimension_2 = data_array.shape[1] print('qubit_num, dimension_1, dimension_2, int(qubit_data_num)', qubit_num, dimension_1, dimension_2, int(qubit_data_num)) qubit_data = np.ndarray(shape=(qubit_num, dimension_1, dimension_2, int(qubit_data_num))) for k in range(dimension_1): for l in range(dimension_2): raw_data = data_array[k][l][::2] raw_marker = data_array[k][l][1::2] for seg in range(seg_size * qubit_num): if raw_marker[seg] > 0.2: ## a better threshold ??? break data = raw_data[ seg:data_num + seg] ## here data consists both data from qubit1 and qubit2 for q in range(qubit_num): data_reshape = data.reshape(int(data_num / seg_size), seg_size) qubit_data[q][k][l] = data_reshape[ q::qubit_num].reshape(seg_size * repetition, ) n = 2 if q == 0 else q qubit_data_array.append( DataArray(preset_data=qubit_data[q], name=parameter + 'qubit_%d' % (n), array_id=array_id + 'qubit_%d' % (n), is_setpoint=False)) data_set_new = DataSet(location=new_location + '_ordered_raw_data', io=NewIO, formatter=formatter) for array in set_array: data_set_new.add_array(array) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) return data_set_new
def _parse_1d_dataset(dataset: DataSet) -> tuple: y_data = np.array(dataset.default_parameter_array()) x_data = np.array(qtt.data.default_setpoint_array(dataset)) return x_data, y_data
] arrays4 = [data1, data2, data3] data_set_2 = new_data( arrays=arrays3, location=None, loc_record={ 'name': 'T1', 'label': 'Vread_sweep' }, io=NewIO, ) data_set_2.save_metadata() test_location = '2017-08-18/20-40-19_T1_Vread_sweep' data_set_3 = DataSet( location=test_location, io=NewIO, ) data_set_3.read() AWGpara_array = data_set_3.arrays['AWGpara_set'].ndarray index0_array = data_set_3.arrays['index0_set'].ndarray digitizer_array = data_set_3.arrays['digitizer_digitizer'].ndarray # #print('loop.data_set: %s' % LP.data_set) # #data = LP.run() #
def test_from_server(self, gdm_mock): mock_dm = MockDataManager() gdm_mock.return_value = mock_dm mock_dm.location = 'Mars' mock_dm.live_data = MockLive() # wrong location or False location - converts to local data = DataSet(location='Jupiter', data_manager=True, mode=DataMode.PULL_FROM_SERVER) self.assertEqual(data.mode, DataMode.LOCAL) data = DataSet(location=False, data_manager=True, mode=DataMode.PULL_FROM_SERVER) self.assertEqual(data.mode, DataMode.LOCAL) # location matching server - stays in server mode data = DataSet(location='Mars', data_manager=True, mode=DataMode.PULL_FROM_SERVER, formatter=MockFormatter()) self.assertEqual(data.mode, DataMode.PULL_FROM_SERVER) self.assertEqual(data.arrays, MockLive.arrays) # cannot write except in LOCAL mode with self.assertRaises(RuntimeError): data.write() # cannot finalize in PULL_FROM_SERVER mode with self.assertRaises(RuntimeError): data.finalize() # now test when the server says it's not there anymore mock_dm.location = 'Saturn' data.sync() self.assertEqual(data.mode, DataMode.LOCAL) self.assertEqual(data.has_read_data, True) # now it's LOCAL so we *can* write. data.write() self.assertEqual(data.has_written_data, True) # location=False: write, read and sync are noops. data.has_read_data = False data.has_written_data = False data.location = False data.write() data.read() data.sync() self.assertEqual(data.has_read_data, False) self.assertEqual(data.has_written_data, False)
def dataset_dimension(dataset: DataSet) -> int: """ Return dimension of DataSet """ return len(dataset.default_parameter_array().set_arrays)
def test_to_server(self, gdm_mock): mock_dm = MockDataManager() mock_dm.needs_restart = True gdm_mock.return_value = mock_dm data = DataSet(location='Venus', data_manager=True, mode=DataMode.PUSH_TO_SERVER) self.assertEqual(mock_dm.needs_restart, False, data) self.assertEqual(mock_dm.data_set, data) self.assertEqual(data.data_manager, mock_dm) self.assertEqual(data.mode, DataMode.PUSH_TO_SERVER) # cannot write except in LOCAL mode with self.assertRaises(RuntimeError): data.write() # now do what the DataServer does with this DataSet: init_on_server # fails until there is an array with self.assertRaises(RuntimeError): data.init_on_server() data.add_array(MockArray()) data.init_on_server() self.assertEqual(data.noise.ready, True) # we can only add a given array_id once with self.assertRaises(ValueError): data.add_array(MockArray())
def convert_to_ordered_data(data_set, loop_num, qubit_num, name='frequency', unit='GHz', sweep_array=None): # Dimension = '1D' for parameter in data_set.arrays: data_array = data_set.arrays[parameter] dimension_1 = data_array.shape[0] arrayid = data_set.arrays[parameter].array_id if parameter.endswith('set'): if data_array.ndarray.ndim == 1: set_array1 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) elif data_array.ndarray.ndim == 2 and not parameter.startswith( 'index'): set_array2 = DataArray(preset_data=data_array.ndarray, name=parameter, array_id=arrayid, is_setpoint=True) elif not parameter.endswith('set'): data_num = int(data_set.arrays[parameter].shape[1] / 2 / (repetition + 1) * repetition) qubit_data_num = int(data_num / qubit_num) data = np.ndarray(shape=(dimension_1, data_num)) marker = np.ndarray(shape=(dimension_1, data_num)) setpara = np.ndarray(shape=(dimension_1, qubit_data_num)) qubit_data = np.ndarray(shape=(qubit_num, dimension_1, qubit_data_num)) qubit_data_array = [] for k in range(dimension_1): raw_data = data_array[k][::2] raw_marker = data_array[k][1::2] for seg in range(seg_size * loop_num): if raw_marker[seg] > 0.1: ## a better threshold ??? break data[k] = raw_data[seg:data_num + seg] marker[k] = raw_marker[seg:data_num + seg] if sweep_array is None: setpara[k] = np.linspace(0, data_num - 1, qubit_data_num) else: sa = np.vstack( [np.repeat(sweep_array, int(seg_size), axis=0)] * repetition) setpara[k] = sa.reshape(sa.size, ) if qubit_num > 1: data_reshape = data[k].reshape(int(data_num / seg_size), seg_size) for q in range(qubit_num): qubit_data[q][k] = np.append( np.array([]), data_reshape[q::qubit_num]) elif qubit_num == 1: qubit_data[0][k] = data[k] set_array3 = DataArray(preset_data=setpara, name=name, array_id=name + '_set', is_setpoint=True) for q in range(qubit_num): qubit_data_array.append( DataArray(preset_data=qubit_data[q], name=parameter + 'qubit_%d' % (q + 1), array_id=arrayid + 'qubit_%d' % (q + 1), is_setpoint=False)) data_set_new = DataSet(location=new_location, io=NewIO, formatter=formatter) data_set_new.add_array(set_array1) data_set_new.add_array(set_array2) if loop_num != 1: data_set_new.add_array(set_array3) for q in range(qubit_num): data_set_new.add_array(qubit_data_array[q]) # data_set_new.add_array(data_array4) return data_set_new
def plot_dataset(dataset: DataSet, win: pyplot.PlotWindow = None): """ Plot the given dataset. If a window is given, then the dataset will be appended into the window, attempting to match the PlotItem to the correct dataset using the title. If a matching plot is not found, a new plot will be inserted. Args: dataset [qcodes.DataSet]: The qcodes dataset to plot. win Union[pyplot.PlotWindow, None]: The window to plot into, or none if we should create a new window. """ if win is None: win = pyplot.PlotWindow(title='ID: {}'.format(dataset.run_id)) appending = False elif isinstance(win, pyplot.PlotWindow): appending = True else: raise TypeError( f"Unexpected type for win. Expected pyplot.PlotWindow, got {type(win)}." ) # Plot each dependant dataset in the data data = dataset.to_pandas_dataframe_dict() for param in data: param = dataset.paramspecs[param] dep_params = [dataset.paramspecs[p] for p in param._depends_on] if len(dep_params) == 1: plot = None if appending: plot = find_plot_by_paramspec(win, dep_params[0], param) plot.plot_title += f" (id: {dataset.run_id})" if not plot.left_axis.checkParamspec(param): raise ValueError( f"Left axis label/units incompatible. " f"Got: {param}, expecting: {plot.left_axis.label}, {plot.left_axis.units}." ) if not plot.bot_axis.checkParamspec(dep_params[0]): raise ValueError( f"Bottom axis label/units incompatible. " f"Got: {dep_params[0]}, expecting: {plot.bot_axis.label}, {plot.bot_axis.units}." ) if plot is None: plot = win.addPlot() plot.plot_title = ( f"{dep_params[0].name} ({dep_params[0].label}) " f"v.<br>{param.name} ({param.label}) " f"(id: {dataset.run_id})") c_data = data[param.name] if c_data.isna().all(axis=None): # No data in plot continue add_line_plot(plot, c_data, x=dep_params[0], y=param) elif len(dep_params) == 2: plot = None if appending: plot = find_plot_by_paramspec(win, dep_params[0], dep_params[1]) plot.plot_title += f" (id: {dataset.run_id})" if not plot.left_axis.checkParamspec(dep_params[1]): raise ValueError( f"Left axis label/units incompatible. " f"Got: {dep_params[1]}, expecting: {plot.left_axis.label}, {plot.left_axis.units}." ) if not plot.bot_axis.checkParamspec(dep_params[0]): raise ValueError( f"Bottom axis label/units incompatible. " f"Got: {dep_params[0]}, expecting: {plot.bot_axis.label}, {plot.bot_axis.units}." ) histogram = plot.items[0].histogram if not histogram.axis.checkParamspec(param): raise ValueError( f"Color axis label/units incompatible. " f"Got: {param}, expecting: {histogram.axis.label}, {histogram.axis.units}." ) if plot is None: plot = win.addPlot() plot.plot_title = ( f"{dep_params[0].name} ({dep_params[0].label}) " f"v.<br>{dep_params[1].name} ({dep_params[1].label}) " f"(id: {dataset.run_id})") c_data = data[param.name].unstack().droplevel(0, axis=1) if c_data.isna().all(axis=None): # No data in plot continue add_image_plot(plot, c_data, x=dep_params[0], y=dep_params[1], z=param) else: raise ValueError( "Invalid number of dimensions in dataset. Can only plot 1D or 2D traces." ) return win