def save(self, HH_data=None): reps = self.adwin_var('adwin_lt1', 'completed_reps') self.save_adwin_data( 'adwin_lt1', 'data', [ 'CR_preselect', 'CR_probe', 'completed_reps', 'noof_starts', ('CR_hist_time_out', ADWINLT1_MAX_RED_HIST_CTS), ('CR_hist_all', ADWINLT1_MAX_RED_HIST_CTS), ('repump_hist_time_out', ADWINLT1_MAX_REPUMP_HIST_CTS), ('repump_hist_all', ADWINLT1_MAX_REPUMP_HIST_CTS), ('CR_after', reps), ('statistics', ADWINLT1_MAX_STAT), ('SSRO1_results', reps), ('SSRO2_results', reps), # ('PLU_Bell_states', reps), we took that out for now (oct 7, 2013) ('CR_before', reps), ('CR_probe_timer', reps), ]) reps = self.adwin_var('adwin_lt1', 'completed_reps') self.save_adwin_data('adwin_lt2', 'data', [ 'completed_reps', 'total_CR_counts', ('CR_before', reps), ('CR_after', reps), ('CR_hist', ADWINLT2_MAX_CR_HIST_CTS), ('CR_hist_time_out', ADWINLT2_MAX_CR_HIST_CTS), ('repump_hist_time_out', ADWINLT2_MAX_REPUMP_HIST_CTS), ('repump_hist_all', ADWINLT2_MAX_REPUMP_HIST_CTS), ('SSRO_lt2_data', reps), ('statistics', ADWINLT2_MAX_STAT) ]) params_lt1 = self.params_lt1.to_dict() lt1_grp = h5.DataGroup("lt1_params", self.h5data, base=self.h5base) for k in params_lt1: lt1_grp.group.attrs[k] = self.params_lt1[k] self.h5data.flush() params_lt2 = self.params_lt2.to_dict() lt2_grp = h5.DataGroup("lt2_params", self.h5data, base=self.h5base) for k in params_lt2: lt2_grp.group.attrs[k] = self.params_lt2[k] self.h5data.flush() if HH_data != None: self.h5data['HH_data'] = HH_data self.h5data.flush() self.save_params()
def save_adwin_data(self, name, variables): grp = h5.DataGroup(name, self.h5data, base=self.h5base) for v in variables: name = v if type(v) == str else v[0] data = self.adwin_var(v) if data != None: grp.add(name, data=data) # save all parameters in each group (could change per run!) self.save_params(grp=grp.group) # then save all specific adwin params, overwriting other params # if double adwinparams = self.adwin_process_params.to_dict() for k in adwinparams: grp.group.attrs[k] = adwinparams[k] self.h5data.flush()
def measure(self): x = np.linspace(0, self.params['xmax'], self.params['xpts']) y = np.linspace(0, self.params['ymax'], self.params['ypts']) z = np.zeros((self.params['xpts'], self.params['ypts'])) for i, xval in enumerate(x): print 'linesweep %d / %d ...' % (i + 1, self.params['xpts']) for j, yval in enumerate(y): qt.msleep(0.01) z[i, j] = xval * yval # save the data into the pre-created group. # note the passed meta-data (optional). # you can have a look at the data with HDFView # (you can get it from hdfgroup.com) grp = h5.DataGroup('xy-scan', self.h5data, base=self.h5base) grp.add('x', data=x, unit='um', note='somewhat inaccurate') grp.add('y', data=y, unit='um') grp.add('z', data=z, unit='counts per second', dimensions='1=x, 2=y') return
# fits into numpy arrays can be stuffed in there. dat['/my first group'].attrs['description'] = 'an utterly pointless group' dat['/my first group'].attrs['yo mama'] = 'probably fat' dat['/my first group/my first subgroup/an array'].\ attrs['unit'] = 'TT' dat['/my first group/my first subgroup/an array'].\ attrs['ridiculously large magnetic fields'] = True # don't forget closing! (ends up unreadable otherwise) dat.close() ### A simple approach to group several data sets (e.g., for a N-d matrix plus ### the N axes) into a group somewhat autmated (not too many features yet) dat = h5.HDF5Data(name='data_number_two') grp = h5.DataGroup('my_data', dat, description='pretty useless', taken_by='some student') # arbitrary metadata as kw # register some data dimensions grp.add_coordinate('lab temperature', unit='deg C') # arbitrary metadata as kw grp.add_coordinate('lab air pressure', unit='hPa') grp.add_value('overnight lab volume increase', unit='l') # set data (setting requires the dimensions to be set up) # with this group class we can re-set exisiting arrays; normally hdf5 requires # deletion and re-creation; like this we can keep the meta data. grp['lab temperature'] = np.arange(25) grp['lab air pressure'] = np.arange(25) + 1000 grp['overnight lab volume increase'] = np.random.rand(25, 25) print dat['/my_data/lab temperature']
def save_2D_data(self, timename='t (ns)', sweepname='sweep_value', ret=False): grp=h5.DataGroup('p7889_raw_data',self.h5data,base=self.h5base) self.save_params(grp=grp.group) x,yint,z = self._get_p7889_data() #### convert x to bins.... because this is what we see on the y=self.params['sweep_pts'] xx, yy = np.meshgrid(x,y) #dat=h5.HDF5Data(name=self.name) grp.add_coordinate(name='t (ns)',data=x, dtype='f8') grp.add_coordinate(name=self.params['sweep_name'],data=y) grp.add_value(name='Counts',data=z) # m2.save_instrument_settings_file(grp.group) #do post-processing grp1=h5.DataGroup('processed data',self.h5data,base=self.h5base) if self.params['Eval_ROI_start']<np.amin(x): startind=[0] else: startind=np.where(x==self.params['Eval_ROI_start']) if self.params['Eval_ROI_end']>np.amax(x): endind=[x.size-1] else: endind=np.where(x==self.params['Eval_ROI_end']) #strip the count array of points in time that lie outside of the specified ROIs # print x # print startind,endind sliced=z[:,startind[0]:endind[0]] # print sliced summation=np.array(range(self.params['pts'])) for i,row in enumerate(sliced): summation[i]=np.sum(row) grp1.add_coordinate(name=self.params['sweep_name'],data=y) grp1.add_value(name='Counts in ROI',data=summation) plt=qt.plot(name=self.mprefix+self.name,clear=True) plt.add(y,summation,'rO',yerr=np.sqrt(summation)) plt.add(y,summation,'r-') plt.set_plottitle(self.name) plt.set_legend(False) plt.set_xlabel(self.params['sweep_name']) plt.set_ylabel('counts') plt.save_png(self.datafolder+'\\'+self.name+'.png') self.h5data.flush() if ret: return x,y,z else: self.h5data.flush()