def load_peaks(self): sz = 5 # print "-----------Loading selected peaks data-------------------" tsmin = np.asarray(db.retrieve('b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12', \ 'Peak_Sampling', 'Shot = ' + str(self.par['shot']) +' AND ' + 'Channel = ' + \ str(self.par['channel'])))*us tsmax = np.asarray(db.retrieve('e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12', \ 'Peak_Sampling', 'Shot = ' + str(self.par['shot']) +' AND ' + 'Channel = ' + \ str(self.par['channel'])))*us Vtotal = np.zeros( int(sz * (self.par['rise_time'] + self.par['decay_time']) / self.dt)) for i in range(len(tsmin)): tmin = tsmin[i] tmax = tsmax[i] Vps = self.Vps td = self.td sl = fu.get_window_slice(tmin, td, tmax) Vt = Vps[sl] / (Vps.max()) ts = td[0:Vt.shape[0]] - td[0] F, alpha, beta, H, offset, x_0 = self.fit_shape(ts, Vt) pl.figure() pl.plot(ts, Vt, '.', label='Peak %d Normalized' % i) pl.plot(ts, F.func(ts), color='b', label='fit_line') pl.legend() pl.axis('tight') Vt = (Vt - offset) / H for i, V in enumerate(Vtotal): try: Vtotal[i] = Vtotal[i] + Vt[i + Vt.argmax() - int( sz * (self.par['rise_time'] / self.dt))] except: pass ttotal = td[0:Vtotal.shape[0]] - td[0] Vtotal = Vtotal / len(tsmin) F, alpha, beta, H, offset, x0 = self.fit_shape(ttotal, Vtotal) pl.figure() pl.plot(ttotal, Vtotal, '.', label='Average of peaks') pl.plot(ttotal, F.func(ttotal), color='b', label='fit_line') pl.legend() pl.axis('tight') sig = fu.peak(F.xpl - x0, alpha, beta)[0] self.par['decay_time'] = 1 / alpha self.par['rise_time'] = 1 / beta self.par['sig'] = sig db.writetodb( 'decay_time = ' + str(self.par['decay_time'] / us) + ', rise_time = ' + str(self.par['rise_time'] / us), 'Peak_Sampling', 'Shot =' + str(self.par['shot']) + ' AND Channel=' + str(self.par['channel'])) db.writetodb( 'sig = ' + str(self.par['sig'] / us), 'Raw_Fitting', 'Shot =' + str(self.par['shot']) + ' AND Channel=' + str(self.par['channel']))
def plot_data(channels): for i,ch in enumerate(channels):#file_array): #cdata = B.get_file(ctrl_dir + cf) # get time slices for analysis #f_name = "./Analysis_Results/"+str(29975)+"/Raw_Fitting/fit_results_4_"+ str(29975) + "_{0:5.3f}_{1:5.3f}_{2:d}.npz".format(0.000, 0.500, ch)#self.par['dtmin']/us, self.par['dtmax']/us, self.par['channel']) (dtmin,dtmax) = db.retrieve('dtmin, dtmax', 'Raw_Fitting', 'Shot = '+ str(shot) + ' AND Channel = '+ str(ch)) #cdata.par.get_value('input_result_file', str) pf_name = "../Analysis_Results/"+str(shot)+"/Rate_Plotting/rate_results_4_"+ str(shot) + "_{0:5.3f}_{1:5.3f}_{2:d}.npz".format(dtmin, dtmax, ch)#cdata.par.get_value('output_file', str) #f_name = res_dir + f_name #pf_name = res_dir + pf_name # shot = cdata.par.get_value('shot_number',str) try: d = np.load(pf_name) #pd = B.get_file(pf_name) except: print("cannot open : ", pf_name, " skipping") continue t = d['t']#B.get_data(pd, 't') Ap = d['Ap']#B.get_data(pd, 'Ap') dAp = d['dAp']#B.get_data(pd, 'dAp') # # At = B.get_data(pd, 'At') # dAt = B.get_data(pd, 'dAt') # # A = B.get_data(pd, 'A') # dA = B.get_data(pd, 'dA') # # Total signal B.plot_exp(t, Ap, dAp, color = colors[ channels[i] ], ecolor='grey', label = 'Ch {}'.format(channels[i]), capsize = 0.) # B.plot_line(t, A, color = colors[i]) # B.pl.xlabel('t [s]') B.pl.ylabel('Rate (p)') B.pl.title('Shot : '+ str(shot) ) #B.pl.ylim((A_min, A_max)) #B.pl.xlim((t_min, t_max)) B.pl.legend(loc = 'upper right') B.pl.show()
#---------------------------------------------------------------------- # get parameters from file #cd = pfile(args.control_file) def TruFal(a): if a == 'True': return True elif a == 'False': return False # Default setting is that Em depends only on psirel (use_all_variables, ) = db.retrieve( 'use_all_variables', 'Combined_Rates', 'Shot = ' + str(shot)) #cd.get_value('use_all_variables', var_type = cd.Bool) use_all_variables = TruFal(use_all_variables) # calculate rates (calc_rate, ) = db.retrieve( 'calc_rate', 'Combined_Rates', 'Shot = ' + str(shot)) #cd.get_value('calc_rate', var_type = cd.Bool) calc_rate = TruFal(calc_rate) # read the model (model, ) = db.retrieve('model', 'Combined_Rates', 'Shot = ' + str(shot)) #cd.get_value('fit_model') # set the initial values for the model chosen if model == 'pow': # simple power law alpha = 2.e5
# define the emissivity model #---------------------------------------------------------------------- view_files = [] mag_axis = [] det_exp = [] ch_exp = [] R_exp = [] dR_exp = [] R_exp_n = [] dR_exp_n = [] # get data (view_dir, ) = db.retrieve('view_dir', 'Combined_Rates', 'Shot = ' + str(shot)) #view_dir = cd.get_value('view_dir') (view_name, ) = db.retrieve('view_names', 'Combined_Rates', 'Shot = ' + str(shot)) #view_names, v_chan = get_names_ch( cd.get_value('views') ) (channels_str, ) = db.retrieve('Channels', 'Combined_Rates', 'Shot = ' + str(shot)) v_chan_f = list(map(int, channels_str.split(','))) v_chan = [[0, 1, 2, 3, 4, 5]] view_names = [] view_names.append(view_name) #v_chan.append(v_chan_f) v_chan_f = np.array(v_chan).flatten() #print view_names #print v_chan_f #print v_chan
def read_database_par(self): # read from database Raw_Fitting table interval limits for analysis # if DB doesnt contain parameters for selected shot and channel copy # them from another channel or even another shot shot = self.shot wheredb = self.wheredb (self.par['exp_dir'], self.par['exp_file']) = db.retrieve('Folder, File_Name', 'Shot_List', 'Shot = ' + str(shot)) (self.par['dtmin'], self.par['dtmax']) = np.asarray( db.retrieve('dtmin, dtmax', 'Raw_Fitting', wheredb)) * us # read other parameters (self.par['poly_order'], self.par['n_peaks_to_fit']) = db.retrieve( 'poly_order, n_peaks_to_fit', 'Raw_Fitting', wheredb) (self.par['add_pulser'], self.par['pulser_rate'], self.par['P_amp']) = db.retrieve('add_pulser, pulser_rate, P_amp', 'Raw_Fitting', wheredb) (self.par['use_threshold'], self.par['Vstep'], self.par['Vth']) = db.retrieve('use_threshold, Vth, Vstep', 'Raw_Fitting', wheredb) # laod parameters for finding peaks (self.par['n_sig_low'], self.par['n_sig_high'], self.par['n_sig_boundary']) = db.retrieve( 'n_sig_low, n_sig_high, n_sig_boundary', 'Raw_Fitting', wheredb) # n_sig_high not used anywhere self.par['sig'] = db.retrieve('sig', 'Raw_Fitting', wheredb)[0] * us # read peak shape parameters from database Peak_Sampling table (decay_time, rise_time) = db.retrieve('decay_time, rise_time', 'Peak_Sampling', wheredb) self.par['decay_time'] = decay_time * us # converted to microseconds self.par['rise_time'] = rise_time * us # converted to microseconds #self.par['position'] = position*us # converted to microseconds # -------- load parameters------- try: read_database_par() except: print("Couldn't read parameters for Shot %d Channel %d" % (shot, channel)) shot_cp = self.par['shot'] ch_cp = self.par['channel'] - 1 # copy param from prev channel wheredb_cp = ('Shot = ' + str(shot_cp) + ' AND Channel = ' + str(ch_cp)) try: db.copyrow( 'Raw_Fitting', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) db.copyrow( 'Peak_Sampling', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) db.copyrow( 'Rates_Plotting', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) read_database_par() print('Coppied parameters from previous channel') except: print( "Couldn't copy paramateres from previous channel, will try the previous shot!" ) #try to copy from previous shot in shotlist table try: shot_cp = db.prevshot( self.par['shot']) # copy param from previous shot ch_cp = self.par['channel'] # copy param from prev channel wheredb_cp = ('Shot = ' + str(shot_cp) + ' AND Channel = ' + str(ch_cp)) db.copyrow( 'Raw_Fitting', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) db.copyrow( 'Peak_Sampling', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) db.copyrow( 'Rates_Plotting', wheredb_cp, 'Shot = ' + str(shot) + ', Channel = ' + str(channel)) read_database_par() print('Coppied parameters from previous shot.') except: print( "Couldn't copy parameters from previous shot. Input parameters manually in DB" ) return # ------------assign class variables ------------- # order for background fit and set vary codes variables self.var['vary_codes_bkg'] = (self.par['poly_order'] + 1) * [1] self.var['bkg_len'] = len(self.var['vary_codes_bkg']) self.var['peak_num'] = 1 # running number for peak selection self.var['data_plot'] = None # assign directories for results (edit later for good tree structure!!) self.var['res_dir'] = ('../Analysis_Results/' + str(shot) + '/Raw_Fitting/') print('Analysis results will be placed in: ', self.var['res_dir']) # -------------------------------- # -------------------------------- # ######## Load raw data ######### f = h5py.File(self.par['exp_dir'] + self.par['exp_file'], 'r') data_root = 'wfm_group0/traces/trace' + str(self.par['channel']) + '/' print("-----------------------Getting data------------------------") # load time information t0 = f[data_root + 'x-axis'].attrs['start'] * us dt = f[data_root + 'x-axis'].attrs['increment'] * us # load scale coeeff and scale dataset scale = f[data_root + 'y-axis/scale_coef'].value # get the y dataset length nall = f[data_root + 'y-axis/data_vector/data'].shape[0] # make time array based on number of points in y data tall = t0 + dt * np.arange(nall, dtype=float) # data window for analysis (indices in all data array) #tds = fu.get_window_slice(self.par['dtmin'], tall, self.par['dtmax']) # get the y dataset (measured data) if convert: ydata = f[data_root + 'y-axis/data_vector/data'].value.astype('int16') else: ydata = f[data_root + 'y-axis/data_vector/data'].value # calculate voltage for dataset V = scale[0] + scale[1] * ydata print("-----------------------Data loaded-------------------------") # save data for future use self.td = tall # time data (microseconds) in analysis interval self.Vps = V # voltage data self.dt = dt # time step (microseconds) # testing of fitting pulser # self.td = self.td[0:1000000] # self.Vps = np.zeros_like(self.td) # add pulser to data if add_pulser parameter set to True if self.par['add_pulser'] == 'True': self.add_pulser()
B.pl.show() # all done # get the control file parser = AG.ArgumentParser() parser.add_argument("Shot", nargs = '?', help="Control file ", default = 29975) #parser.add_argument("control_file", nargs = '?', help="Control file ", default = 'control_combine.data') args = parser.parse_args() shot=args.Shot #cdata = pfile(args.control_file) # get control file names #c_file = cdata.get_value('control_files').split(',') (channels_str,) = db.retrieve('Channels', 'Combined_Rates', 'Shot = '+str(shot)) #channels_str = cdata.get_value('channels') channels = list(map(int, channels_str.split(','))) #detectors_str = cdata.get_value('detectors') #detectors = map(int, detectors_str.split(',')) #shot = 29975 #shot = cdata.get_value('shot_number',str) # get directories #try: # rdir = cdata.get_value('result_directory') # res_dir = './' + shot + '/' + rdir #except:
def __init__(self, shot, channel, ifile): #frequently used variable to retrieve data from database, indicates shot and chennel wheredb = 'Shot = ' + str(shot) + ' AND Channel = ' + str(channel) self.par = {} self.var = {} self.par['shot'] = shot self.par['channel'] = channel (time_slice_width, ) = db.retrieve('time_slice_width', 'Rates_Plotting', wheredb) self.par['time_slice_width'] = time_slice_width (h_min, h_max, h_bins) = db.retrieve('h_min, h_max, h_bins', 'Rates_Plotting', wheredb) self.par['h_min'] = h_min self.par['h_max'] = h_max h_bins = int(h_bins) self.par['h_bins'] = h_bins (draw_p, draw_t, draw_sum) = db.retrieve('draw_p, draw_t, draw_sum', 'Rates_Plotting', wheredb) self.par['draw_p'] = draw_p self.par['draw_t'] = draw_t self.par['draw_sum'] = draw_sum (p_min, p_max, t_min, t_max, pul_min, pul_max) = db.retrieve( 'p_min, p_max, t_min, t_max, \ pul_min, pul_max', 'Rates_Plotting', wheredb) self.par['p_min'] = p_min self.par['p_max'] = p_max self.par['t_min'] = t_min self.par['t_max'] = t_max self.par['pulser_min'] = pul_min self.par['pulser_max'] = pul_max (A_init, sig_init, sig_ratio) = db.retrieve('A_init, sig_init, sig_ratio', 'Rates_Plotting', wheredb) self.par['A_init'] = A_init self.par['sig_init'] = sig_init self.par['sig_ratio'] = sig_ratio (t_offset, ) = db.retrieve('t_offset', 'Shot_List', 'Shot = ' + str(shot)) self.par['t_offset'] = t_offset (dtmin, dtmax) = np.asarray( db.retrieve('dtmin, dtmax', 'Raw_Fitting', wheredb)) * us self.par['dtmin'] = dtmin self.par['dtmax'] = dtmax (add_pulser, ) = db.retrieve('add_pulser', 'Raw_Fitting', wheredb) self.par['add_pulser'] = add_pulser #------------assign class variables ------------- self.var['f_name'] = ifile #'../Analysis_Results/' + str(shot) + '/Raw_Fitting/' + "fit_results_4_"+ str(shot) + "_{0:5.3f}_{1:5.3f}_{2:d}.npz".format(dtmin/us, dtmax/us, channel) self.var['of_name'] = '../Analysis_Results/' + str( shot) + '/Rate_Plotting/' + "rate_results_4_" + str( shot) + "_{0:5.3f}_{1:5.3f}_{2:d}.npz".format( dtmin / us, dtmax / us, channel)
#print 'using steps directory : ', step_dir step_dir = './Analysis_Results/' + str(shot) + '/Step_Dir/' #try: # cdir = cdata.get_value('control_directory') # ctrl_dir = './' + shot + '/' + cdir + '/' #except: # ctrl_dir = './' + shot + '/control/' #print 'using control directory : ', ctrl_dir # read the data files to be combines #c_file = cdata.get_value('control_files').split(',') #print c_file # plotting limits (A_min, A_max) = db.retrieve('A_min, A_max', 'Combined_Rates', 'Shot = ' + str(shot)) #A_min = cdata.get_value('A_min') #A_max = cdata.get_value('A_max') (t_min_pl, t_max_pl) = db.retrieve('t_min, t_max', 'Combined_Rates', 'Shot = ' + str(shot)) #t_min_pl = cdata.get_value('t_min') #t_max_pl = cdata.get_value('t_max') R_exp_min = A_min #cdata.get_value('R_min') R_exp_max = A_max #cdata.get_value('R_max') R_norm_min = A_min #cdata.get_value('R_norm_min') R_norm_max = A_max #cdata.get_value('R_norm_max') y_scale = 1. #cdata.get_value('norm_scale')