def test_fitter(num_points): wavelength_in = np.arange(num_points) data_in = np.random.rand(num_points)*10.0 error_in = np.ones(num_points)*5.0 models_in = np.zeros((100,num_points)) for m in range(len(models_in)): models_in[m,:] = np.random.rand(num_points)*10.0 fitter(wavelength_in,data_in,error_in,models_in)
def fit(self,config=False): ''' performs the fit of all processes and returns the final parameter values (list) as well as the model function values as a list of arrays and the ''' ppcross=crss.crossSection(self.ppPath) print 'Setting up fitting routine using:' print 'evaluating processes %s specs %s with initial parmeters %s'%(self.processes,self.parentspec,self.inPara) modelfit=fitter.fitter(self.parentspec,self.inPara,self.processes,self.data,self.source,self.confDict,ppcross) fitFunc=modelfit.fit(config) #print 'return from fitter in sedfitter: ',fitFunc ''' Produce finer sampled model functions compared to the fit functions, these are used for nice plots instead of the data point sampled graphs that are for test purposes and for the fit ''' fineGammaY=[] fineGammaX=[] counter=0 for p in self.processes: finalmodel=mod.model(self.parentspec[counter],p,self.data,self.source,ppcross) finalmodel.set_final_gamma_prec(300) if p=='pp': ''' general pp interaction returns secondary emission processes (Bremsstrahlung and IC) as separated spectra in addition to the pp spectrum and the total spectrum ''' fineResults=finalmodel.model_val(fitFunc[0][counter]) fineGammaY.append(fineResults[0][0])#total spec fineGammaY.append(fineResults[0][1])# only pp fineGammaY.append(fineResults[0][2])# only bremsstrahlung fineGammaY.append(fineResults[0][3])# only IC fineGammaX.append(fineResults[1]) fineGammaX.append(fineResults[1]) fineGammaX.append(fineResults[1]) fineGammaX.append(fineResults[1]) counter+=4 else: fineResults=finalmodel.model_val(fitFunc[0][counter]) fineGammaY.append(fineResults[0]) fineGammaX.append(fineResults[1]) counter+=1 print self.print_fit_results(fitFunc) return fitFunc+(fineGammaY,fineGammaX)
def fit(name, selected_branches, fit_params, results, outputfile): branches = np.array(selected_branches, dtype=[('BToKEE_fit_mass', 'f4')]) tree = array2tree(branches) outputname = outputfile + '_{0}_mva_{1:.3f}'.format(name, fit_params['mvaCut']).replace('.','-') + '.pdf' #Stot, StotErr, S, SErr, B, BErr= fit_unbinned.fit(tree, outputname, **fit_params) #output = fit_unbinned.fit(tree, outputname, **fit_params) #output = fit_unbinned.fit(tree, outputname, **fit_params) b_fitter = fitter() b_fitter.init_fit_data(**fit_params) output = b_fitter.fit(tree, outputname) results['Stot_{}'.format(name)].append(output['Stot']) results['StotErr_{}'.format(name)].append(output['StotErr']) results['S_{}'.format(name)].append(output['S']) results['SErr_{}'.format(name)].append(output['SErr']) results['B_{}'.format(name)].append(output['B']) results['BErr_{}'.format(name)].append(output['BErr']) results['SNR_{}'.format(name)].append(output['S']/np.sqrt(output['S'] + output['B'])) results['exp_alpha_{}'.format(name)].append(output['exp_alpha']) return results, outputname
parser.add_option('--analysis_file', type='string',action='store',default='analysis', dest='analysis_file', help='Path to theta analysis script') parser.add_option('--out_name', type='string',action='store',default='result', dest='out_name', help='Name of output file that will have all the plots and stuff in it') (options, args) = parser.parse_args() #set up the input and output files and run name input_file_path = './' templates_filename = input_file_path+options.templates_file if not templates_filename.endswith('.root') : templates_filename+='.root' analysis_file_path = './' analysis_filename = analysis_file_path+options.analysis_file if not analysis_filename.endswith('.py') : analysis_filename+='.py' output_name = options.out_name if not output_name.endswith('.root') : output_name += '.root' runname = options.run_name if runname == 'new_run' : runname+='_'+strftime('%Y-%m-%d_%X') #make a new fitter fit_obj = fitter(runname,templates_filename,output_name) #build the template file fit_obj.makeTemplateFile(output_name) #fit the thing fit_obj.fit(analysis_filename) #make plots save_pdfs = True fit_obj.makeComparisonPlots(save_pdfs) #more plots go here or whatever #clean up after yourself del fit_obj
def test_fitter_mocks(): import pyfits import random import os metal_used = ['z001','z002','z004','z0001.bhb','z0001.rhb','z10m4.bhb','z10m4.rhb'] age_used = [ '3M','3_5eM','4M','4_5eM','5M','5_5eM','6M','6_5eM',\ '7M','7_5eM','8M','8_5eM','9M','9_5eM',\ '10M','15M','20M','25M','30M','35M','40M','45M','50M','55M',\ '60M','65M','70M','75M','80M','85M','90M','95M',\ '100M','200M','300M','400M','500M','600M','700M','800M','900M',\ '1G','1G_500M','2G','3G','4G','5G','6G','7G','8G','9G',\ '10G','11G','12G','13G','14G','15G'] sigma_use = '100' model_used_array = ['MILES_UVextended'] signal_to_noise = 20.0 initial_model_flux = [] save_norm_models = [] on = True for j in range(len(metal_used)): for i in range(len(age_used)): input_data = metal_used[j]+'/CONVERTED_'+age_used[i]+'_log' file_open = '/Users/david/Downloads/ss/'+input_data+'.fits' test_file = os.path.isfile(file_open) if test_file == False: print "CANNOT FIND FILE" print file_open trust_flag = 0 null_output_structure = {'flux':[0,0],'error':[0,0],'wavelength':[0,0],\ 'redshift':0,'sigma_in':0,'eline_correction':0,'trust_flag':0,\ 'out_correction':1} else: hdus = pyfits.open(file_open) crval1 = hdus[0].header['CRVAL1'] cdelt = hdus[0].header['CD1_1'] naxis = hdus[0].header['NAXIS1'] crpix=0. crval=crval1-(crpix*cdelt) initial_data_wavelength = 10**(np.arange(naxis)*cdelt+crval) flux_int = hdus[0].data initial_data_flux_list = [] initial_data_error_list = [] bad_flags = np.zeros(len(initial_data_wavelength)) bad_flags = bad_flags + 1 save_norm_models.append(np.sum(flux_int[100:-20])) initial_model_flux.append(np.asarray(flux_int[100:-20])/np.sum(flux_int[100:-20])) # CSP test: file_open='/Users/david/Downloads/log_miles_kr_tau_1_dust_no_age_5.fits' print file_open test_file = os.path.isfile(file_open) if test_file == False: trust_flag = 0 null_output_structure = {'flux':[0,0],'error':[0,0],'wavelength':[0,0],\ 'redshift':0,'sigma_in':0,'eline_correction':0,'trust_flag':0,\ 'out_correction':1} else: hdus = pyfits.open(file_open) data_in = hdus[1].data initial_data_wavelength = 10**np.asarray(data_in['wavelength'])[0] flux_int = np.asarray(data_in['flux']) signal_to_noise = 20.0 # typical BOSS x 10 initial_data_flux_list = [] initial_data_error_list = [] for i in range(len(flux_int[0])): random.seed() initial_data_flux_list.append(random.gauss(flux_int[0][i],flux_int[0][i]/signal_to_noise)) initial_data_error_list.append(flux_int[0][i] / signal_to_noise) initial_data_flux = np.asarray(initial_data_flux_list)[100:-20] initial_data_error= np.asarray(initial_data_error_list)[100:-20] # MULTI-SSP test: # initial_data_flux = np.zeros(len(initial_model_flux[0])) # initial_data_error = np.zeros(len(initial_model_flux[0])) # for j in range(len(metal_used)): # for i in range(len(age_used)): # if metal_used[j] == 'z002' and age_used[i] in ['10G','1G','5G','7G','500M']: # input_data = metal_used[j]+'/CONVERTED_'+age_used[i]+'_log' # file_open = '/Users/david/Downloads/kr/'+input_data+'.fits' # test_file = os.path.isfile(file_open) # if test_file == False: # print "CANNOT FIND FILE" # print file_open # trust_flag = 0 # null_output_structure = {'flux':[0,0],'error':[0,0],'wavelength':[0,0],\ # 'redshift':0,'sigma_in':0,'eline_correction':0,'trust_flag':0,\ # 'out_correction':1} # else: # hdus = pyfits.open(file_open) # crval1 = hdus[0].header['CRVAL1'] # cdelt = hdus[0].header['CD1_1'] # naxis = hdus[0].header['NAXIS1'] # crpix=0. # crval=crval1-(crpix*cdelt) # initial_data_wavelength = 10**(np.arange(naxis)*cdelt+crval) # flux_int = hdus[0].data # initial_data_flux_list = [] # initial_data_error_list = [] # bad_flags = np.zeros(len(initial_data_wavelength)) # bad_flags = bad_flags + 1 # # for i in range(len(flux_int)): # # random.seed() # # initial_data_flux_list.append(random.gauss(flux_int[i],flux_int[i]/signal_to_noise)) # # initial_data_error_list.append(flux_int[i]/signal_to_noise) # # if on: # # initial_data_flux = np.asarray(initial_data_flux_list) # # initial_data_error= np.asarray(initial_data_error_list) # # on = False # # else: # # print np.shape(np.asarray(initial_data_flux_list)) # # print np.shape(initial_data_flux) # initial_data_flux = initial_data_flux + flux_int[100:-20] initial_data_error= initial_data_flux/signal_to_noise models = np.asarray(initial_model_flux) norm_models = np.asarray(save_norm_models) norm_factor = 1.0/(norm_models/np.sum(initial_data_flux)) error = initial_data_error/np.sum(initial_data_flux) data = initial_data_flux/np.sum(initial_data_flux) # plt.plot(data) # plt.plot(error) # plt.show() weights,chis,branch = fitter(initial_data_wavelength[100:-20],data,error,models) best_sol = np.argmin(chis) mass_estimate = np.dot(norm_factor,weights[best_sol]) all_masses = np.dot(norm_factor,weights.T) ind_sort = np.argsort(chis) # plt.plot(all_masses,chis,'o') # plt.show() # Tracer()() # Convert chis to probs: # assume normal distro with best fit = mean # hence variance is sqrt(2*mean) min_chis = np.min(chis) var_derive = 2*min_chis prob = np.exp((min_chis-chis)/2/var_derive) sns.tsplot(walks, ci=100, color=pal) plt.plot(all_masses,prob,'o') plt.show()
from electronResponse import electronResponse from gammaResponse import gamma from fitter import fitter if __name__ == "__main__": #electronResponse.initialize() #E, kB, Ysct, kC = 1, 6.5e-3, 1400, 1.0 #print(E, electronResponse.get_Nsct(E, kB, Ysct), electronResponse.get_Ncer(E, kC), electronResponse.get_Nsigma(E)) #Cs137 = gamma("Cs137", 0.662) #Cs137.load_npe_spectrum() #Cs137.load_gamma_samples() #Cs137.prediction() myFitter = fitter() myFitter.fit() myFitter.plot()