def __init__(self, sn_name=None): lds = sugar.load_data_sugar() lds.load_salt2_data() if sn_name is None: Filtre = N.array([True] * len(lds.sn_name)) else: Filtre = N.array([True] * len(lds.sn_name)) for sn in range(len(lds.sn_name)): if lds.sn_name[sn] not in sn_name: Filtre[sn] = False self.sn_name = lds.sn_name[Filtre] cov = N.zeros((N.sum(Filtre), 3, 3)) data = N.zeros((N.sum(Filtre), 2)) data[:, 0] = lds.X1[Filtre] data[:, 1] = lds.C[Filtre] cov[:, 0, 0] = (lds.mb_err**2)[Filtre] cov[:, 1, 1] = (lds.X1_err**2)[Filtre] cov[:, 2, 2] = (lds.C_err**2)[Filtre] cov[:, 1, 2] = lds.X1_C_cov[Filtre] cov[:, 2, 1] = lds.X1_C_cov[Filtre] cov[:, 0, 1] = lds.X1_mb_cov[Filtre] cov[:, 1, 0] = lds.X1_mb_cov[Filtre] cov[:, 0, 2] = lds.C_mb_cov[Filtre] cov[:, 2, 0] = lds.C_mb_cov[Filtre] Hubble_diagram.__init__(self, lds.mb[Filtre], data, cov, lds.zhelio[Filtre], lds.zcmb[Filtre], lds.zerr[Filtre]) self.Make_hubble_diagram()
def run_emfa_analysis(path_input, path_output, sigma_clipping=False): snia = sugar.load_data_sugar(path_input=path_input) snia.load_spectral_indicator_at_max() si_analysis = emfa_si_analysis(snia.spectral_indicators, snia.spectral_indicators_error, snia.sn_name, missing_data=True) output_file = os.path.join(path_output, 'emfa_output.pkl') si_analysis.emfa(output_file, sigma_clipping=sigma_clipping, chi2emfa=True, bic=False)
def load_spectra_max(self): lds = sugar.load_data_sugar(path_input=self.path_input) lds.load_spectra_at_max() lds.load_salt2_data() self.FILTRE = np.array([True]*len(lds.sn_name)) for sn in range(len(lds.sn_name)): if lds.sn_name[sn] not in self.sn_name: self.FILTRE[sn] = False self.y = lds.spectra_at_max[self.FILTRE] self.var = lds.spectra_at_max_variance[self.FILTRE] self.wavelength = lds.spectra_at_max_wavelength[0] self.covy = np.zeros((len(self.var),len(self.wavelength),len(self.wavelength))) zcmb = lds.zcmb[self.FILTRE] zerr = lds.zerr[self.FILTRE] for sn in range(len(self.covy)): dmz = (5./np.log(10)) * np.sqrt(zerr[sn]**2 + 0.001**2) / zcmb[sn] self.covy[sn] = np.eye(len(self.wavelength))*self.var[sn] + dmz**2 *np.ones((len(self.wavelength),len(self.wavelength)))
def __init__(self, path_input = path + '/data_input/'): """ create data used for gp for a given bin. """ self.path_input = path_input self.lds = sugar.load_data_sugar(path_input=path_input) self.lds.load_spectra() self.wavelength = self.lds.spectra_wavelength[self.lds.sn_name[0]]['0'] self.sn_name = self.lds.sn_name self.y = [] self.y_err = [] self.time = [] self.mean_time = [] self.mean = [] self.mean_wavelegth = [] self.diff = []
def plot_snia_interpolation(sn_name): """ Plot one example of SNIa interpolation. """ try: gp_output = np.loadtxt(path + '/data_output/gaussian_process/gp_info.dat', comments='#') except ValueError: "%s does not exist" % ( path + '/sugar/data_output/gaussian_process/gp_info.dat') lds = sugar.load_data_sugar() lds.load_spectra() gp_interp = np.zeros((len(lds.spectra_phases[sn_name].keys()), len(lds.spectra_wavelength[sn_name]['0']))) ldbg = sugar.load_data_bin_gp() ldbg.build_difference_mean() diff = ldbg.diff ind_sn = list(ldbg.sn_name).index(sn_name) for i in range(len(gp_interp[0])): ldbg.load_data_bin(i) ldbg.load_mean_bin(i, average=True) gpr = cosmogp.gaussian_process_nobject(ldbg.y, ldbg.time, kernel='RBF1D', y_err=ldbg.y_err, diff=diff, Mean_Y=ldbg.mean, Time_mean=ldbg.mean_time, substract_mean=False) gpr.nugget = 0.03 gpr.hyperparameters = [gp_output[i, 1], gp_output[i, 2]] gpr.get_prediction(new_binning=ldbg.time[ind_sn], COV=True, svd_method=False) gp_interp[:, i] = gpr.Prediction[ind_sn] plt.figure(figsize=(8, 12)) plt.subplots_adjust(top=0.98, bottom=0.08, left=0.08, right=0.85, hspace=0.0) CST = 19.2 y_label_position = [] y_label = [] for i in range(len(lds.spectra_phases[sn_name].keys())): if i == 0: plt.plot(lds.spectra_wavelength[sn_name]['%i' % (i)], lds.spectra[sn_name]['%i' % (i)] + CST, 'r', linewidth=4, label=sn_name) plt.plot(lds.spectra_wavelength[sn_name]['%i' % (i)], gp_interp[i] + CST, 'b', linewidth=2, label='gaussian process interpolation') else: plt.plot(lds.spectra_wavelength[sn_name]['%i' % (i)], lds.spectra[sn_name]['%i' % (i)] + CST, 'r', linewidth=4) plt.plot(lds.spectra_wavelength[sn_name]['%i' % (i)], gp_interp[i] + CST, 'b', linewidth=2) y_label_position.append(gp_interp[i, -1] + CST) y_label.append('%.2f days' % (ldbg.time[ind_sn][i])) CST += 1 plt.legend(loc=4) plt.ylim(-0.5, 14.2) plt.gca().invert_yaxis() xlim = [3300, 8600] ax1 = plt.gca() ax2 = plt.gca().twinx() ax2.set_yticks(y_label_position) ax2.set_yticklabels(y_label, fontsize=20) ax2.set_ylim(-0.5, 14.2) ax2.invert_yaxis() ax1.set_xlim(xlim[0], xlim[1]) ax1.set_xlabel('wavelength [$\AA$]', fontsize=20) ax1.set_ylabel('mag AB + cst.', fontsize=20)
def plot_pf_corr_factor_salt2(self, split=5): """ plot corr coeff between fa component and si. split: int, number of component that you want to draw. """ data = self.si_norm err = self.si_norm_err new_base = sugar.passage(data, err, self.vec, sub_space=10) new_err = sugar.passage_error(err, self.vec, sub_space=10, return_std=True) new_base = new_base[:, :split] new_err = new_err[:, :split] #new_base[:,0] *= -1 #new_base[:,1] *= -1 data_sugar = sugar.load_data_sugar() data_sugar.load_salt2_data() delta_mu = copy.deepcopy(data_sugar.mb) for i in range(len(data_sugar.zhelio)): delta_mu[i] -= sugar.distance_modulus(data_sugar.zhelio[i], data_sugar.zcmb[i]) data = np.array([data_sugar.X1, data_sugar.C, delta_mu]).T[self.filtre] err = np.array( [data_sugar.X1_err, data_sugar.C_err, data_sugar.mb_err]).T[self.filtre] self.data = data self.err = err self.new = new_base self.new_err = new_err nsil = [r'$X_1$', r'$C$', r'$\Delta \mu_B$'] dic_corr_vec = {} dic_corr_vece = {} neff = [] X = [] Y = [] for i in range(len(new_base[0])): dic_corr_vec.update({'corr_vec%i' % (i): np.zeros(len(nsil))}) dic_corr_vece.update({'corr_vec%ie' % (i): np.zeros(len(nsil))}) neff.append([]) X.append([]) Y.append([]) for j in range(len(new_base[0])): for i in range(len(nsil)): print j, i dic_corr_vec['corr_vec%i' % (j)][i], dic_corr_vece[ 'corr_vec%ie' % (j)][i] = Statistics.correlation_weighted( data[:, i], new_base[:, j], error=True, symmetric=True) neff[j].append( Statistics.neff_weighted(1. / (np.ones_like(data[:, i])))) X[j].append(data[:, i]) Y[j].append(new_base[:, j]) cmap = plt.matplotlib.cm.get_cmap('Blues', 9) fig = plt.figure(figsize=(5, 5), dpi=100) ax = fig.add_axes([0.05, 0.07, 0.9, 0.85]) #plt.subplots_adjust(top=0.5,bottom=0.2,left=0.1,right=1.1,hspace=0.0) xstart, xplus, ystart = 0.1, 0.37, 1.01 cmap.set_over('r') bounds = [0, 1, 2, 3, 4, 5] norm = plt.matplotlib.colors.BoundaryNorm(bounds, cmap.N) ylabels = [] corrs = [] Ticks = [] for j in range(len(new_base[0])): corrs.append(dic_corr_vec['corr_vec%i' % (j)]) ylabels.append(r'$q_{%i}$' % (j + 1)) Ticks.append(4 - j) for i, corr in enumerate(corrs): sig = np.array([ Statistics.correlation_significance(np.abs(c), n, sigma=True) for c, n in zip(corr, neff[i]) ]) Sig = copy.deepcopy(sig) sig /= bounds[-1] cols = cmap(sig) mat = [[[0.25, rho * 0.25], [rho * 0.25, 0.25]] for rho in corr] MPL.errorellipses(ax, range(1, len(nsil) + 1), [4 - i] * len(corr), mat, color=cols, alpha=1, **{'ec': 'k'}) for j, c in enumerate(corr): x = (X[i][j] - np.min(X[i][j])) / np.max(X[i][j] - np.min(X[i][j])) - 0.5 y = (Y[i][j] - np.min(Y[i][j])) / np.max(Y[i][j] - np.min(Y[i][j])) - 0.5 x += (j + 1) y += -np.mean(y) + (4 - i) esty = loess(x, y) isort = np.argsort(x) lkwargs = SP_set_kwargs({}, 'loess', c='b', alpha=0.7, ls='-', lw=1) if Sig[j] > 4 and Sig[j] < 5: if c < 0.9: ax.annotate( '%.2f' % c, (j + 1, 4 - i), color='w', ha='center', va='center', ) else: ax.annotate( '%.2f' % c, (j + 1, 4 - i), color='w', fontsize=9, ha='center', va='center', ) else: ax.annotate( '%.2f' % c, (j + 1, 4 - i), ha='center', va='center', ) x = xstart toto = 1 for leg in nsil: ax.annotate(leg, (x, ystart), xycoords='axes fraction', size='large', ha='left', va='bottom') toto += 1 x += xplus ax.set_xticks([]) ax.set_yticks(Ticks) ax.set_yticklabels(ylabels, size='xx-large', rotation=90) ax.set_ylim(ymin=4.4 - len(new_base[0]), ymax=4.6) ax.set_xlim(xmin=0.4, xmax=len(nsil) + 0.6) ax.set_aspect('equal', adjustable='box-forced', anchor='C') im = ax.imshow([[0, 5]], cmap=cmap, extent=None, origin='upper', interpolation='none', visible=False) cax, kw = plt.matplotlib.colorbar.make_axes(ax, orientation='horizontal', pad=0.02) cb = plt.matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, boundaries=bounds + [9], extend='max', ticks=bounds, spacing='proportional', orientation='horizontal') cb.set_label('Pearson correlation coefficient significance ($\sigma$)', fontsize=14)
def plot_spectrum_corrected(self,No_corrected=True): Mag_all_sn=copy.deepcopy(self.Mag_no_corrected) Mag_all_sn_var = N.zeros_like(Mag_all_sn) wRMS = N.zeros(len(self.X)) wRMS_no_correct = N.zeros(len(self.X)) for Bin in range(len(self.X)): if self.Rv>0: Mag_all_sn[:,Bin]-=(N.dot(self.alpha[Bin],self.data.T))+self.trans+(self.Av*Astro.Extinction.extinctionLaw(self.X[Bin],Rv=self.Rv)) else: Mag_all_sn[:,Bin]-=(N.dot(self.alpha[Bin],self.data.T))+self.trans Mag_all_sn_var[:,Bin] = self.Y_err[:,Bin]**2 + self.Y_build_error[:,Bin]**2 + self.disp_matrix[Bin,Bin] wRMS[Bin] = sugar.comp_rms(Mag_all_sn[:,Bin]-self.M0[Bin], 'francis', err=False, variance=Mag_all_sn_var[:,Bin]) wRMS_no_correct[Bin] = sugar.comp_rms(self.Mag_no_corrected[:,Bin] - N.average(self.Mag_no_corrected[:,Bin],weights=1./self.Y_err[:,Bin]**2), 'francis', err=False, variance=self.Y_err[:,Bin]**2) self.MAG_CORRECTED= Mag_all_sn self.WRMS_CORR = wRMS self.WRMS_NO_CORR = wRMS_no_correct indice = N.linspace(0,len(self.Av)-1,len(self.Av)).astype(int) Av, indice = zip(*sorted(zip(self.Av, indice))) colors = P.cm.coolwarm(self.Av) Grey_scatter = N.zeros(len(self.Av)) #P.figure(42) fig = P.figure(1,figsize=(12,12)) P.subplots_adjust(left=0.06, bottom=0.06, right=0.99, top=0.9,hspace=0.001) CST_MANU = 0 for sn in range(len(self.sn_name)): P.subplot(2,1,1) if No_corrected: if sn==0: P.plot(self.X,self.Mag_no_corrected[sn]+16.7,color=colors[sn],linewidth=3,zorder=self.Av[sn]) else: P.plot(self.X,self.Mag_no_corrected[sn]+16.7,color=colors[sn],linewidth=3,zorder=self.Av[sn]) if sn==0: P.plot(self.X,Mag_all_sn[sn]-self.M0+N.mean(self.M0)+22.5,color=colors[sn],linewidth=3,alpha=0.5,zorder=self.Av[sn]) else: P.plot(self.X,Mag_all_sn[sn]-self.M0+N.mean(self.M0)+22.5,color=colors[sn],linewidth=3,alpha=0.5,zorder=self.Av[sn]) #P.figure(42) #P.plot(self.X,Mag_all_sn[sn]-self.M0+N.mean(self.M0)+22.5+CST_MANU,color=colors[sn],linewidth=3,alpha=1,zorder=self.Av[sn]) #Grey_scatter[sn] = N.average(Mag_all_sn[sn]-self.M0, weights = 1./Mag_all_sn_var[sn]) #CST_MANU+=0.2 #P.figure(1) #P.subplot(2,1,1) P.text(6500,-3.7,'Observed spectra',fontsize=20) P.text(6000,2.5,'Corrected residuals ($q_1$, $q_2$, $q_3$, $A_{\lambda_0}$)',fontsize=20) scat = P.scatter(self.Av+2500,self.Av+2500,c=self.Av,cmap=P.cm.coolwarm) ax_cbar1 = fig.add_axes([0.08, 0.92, 0.88, 0.025]) P.subplot(2,1,1) cb = P.colorbar(scat, cax=ax_cbar1, orientation='horizontal') cb.set_label('$A_{\lambda_0}$',fontsize=20, labelpad=-67) P.ylabel('Mag AB + cst',fontsize=20) P.ylim(-5,7) P.xticks([2500.,9500.],['toto','pouet']) P.xlim(self.X[0]-60,self.X[-1]+60) #P.legend(loc=4) P.gca().invert_yaxis() #STD=N.std(Mag_all_sn,axis=0) #STD_no_correct=N.std(self.Mag_no_corrected,axis=0) P.subplot(2,1,2) if No_corrected: P.plot(self.X,wRMS_no_correct,'r',linewidth=3,label=r'Observed wRMS, average between $[6360\AA,6600\AA]$ = %.2f mag' %(N.mean(wRMS_no_correct[self.floor_filter]))) P.plot(self.X,wRMS,'b',linewidth=3,label=r'Corrected wRMS, average between $[6360\AA,6600\AA]$ = %.2f mag' %(N.mean(wRMS[self.floor_filter]))) P.plot(self.X,N.zeros(len(self.X)),'k') P.ylabel('wRMS (mag)',fontsize=20) P.xlabel('wavelength [$\AA$]',fontsize=20) P.xlim(self.X[0]-60,self.X[-1]+60) P.ylim(0.0,0.62) P.legend() #P.figure(42) #P.gca().invert_yaxis() #P.ylabel('residuals + cst.',fontsize=20) #P.xlabel('wavelength [$\AA$]',fontsize=20) A = sugar.load_data_sugar() A.load_salt2_data() zhelio = [] for sn in range(len(self.sn_name)): for sn2 in range(len(A.sn_name)): if A.sn_name[sn2] == self.sn_name[sn]: zhelio.append(A.zhelio[sn2]) zhelio = N.array(zhelio)
def comp_mean(phase_min=-12, phase_max=48, draw=False): slds = sugar.load_data_sugar() slds.load_spectra() sn_name = slds.spectra.keys() wave = slds.spectra_wavelength[sn_name[0]]['0'] number_bin_wavelength = len(slds.spectra_wavelength[sn_name[0]]['0']) spectra_bin_phase = np.linspace(phase_min, phase_max, int(phase_max - phase_min) / 2 + 1) number_bin_phases = len(spectra_bin_phase) hist_spectra = np.zeros( (len(sn_name), number_bin_wavelength * number_bin_phases)) hist_spectra_weights = np.zeros( (len(sn_name), number_bin_wavelength * number_bin_phases)) wavelength = np.zeros(number_bin_wavelength * number_bin_phases) phases = np.zeros(number_bin_wavelength * number_bin_phases) reorder_spec_to_lc = np.arange( number_bin_wavelength * number_bin_phases).reshape( number_bin_phases, number_bin_wavelength).T.reshape(-1) reorder_lc_to_spec = np.arange( number_bin_wavelength * number_bin_phases).reshape( number_bin_wavelength, number_bin_phases).T.reshape(-1) for sn in range(len(sn_name)): print sn_name[sn], '%i/%i' % ((sn + 1, len(sn_name))) for t in range(len(spectra_bin_phase)): for key in slds.spectra_phases[sn_name[sn]].keys(): if abs(spectra_bin_phase[t] - slds.spectra_phases[sn_name[sn]][key]) < 1: hist_spectra[sn, number_bin_wavelength * t:number_bin_wavelength * (t + 1)] = slds.spectra[sn_name[sn]][key] hist_spectra_weights[ sn, number_bin_wavelength * t:number_bin_wavelength * (t + 1)] = 1. / slds.spectra_variance[sn_name[sn]][key] wavelength[number_bin_wavelength * t:number_bin_wavelength * (t + 1)] = wave phases[number_bin_wavelength * t:number_bin_wavelength * (t + 1)] = spectra_bin_phase[t] average_function_spectra = np.average(hist_spectra, weights=hist_spectra_weights, axis=0) average_function_light_curve = average_function_spectra[reorder_spec_to_lc] wavelength = wavelength[reorder_spec_to_lc] phases = phases[reorder_spec_to_lc] average_function_light_curve_smooth = np.zeros_like( average_function_light_curve) for i in range(number_bin_wavelength): print i average_function_light_curve_smooth[ number_bin_phases * i:number_bin_phases * (i + 1)] = savgol_filter( average_function_light_curve[number_bin_phases * i:number_bin_phases * (i + 1)], 15, 2) average_function_spectra_smooth = average_function_light_curve_smooth[ reorder_lc_to_spec] fichier = open('average_for_gp.dat', 'w') for i in range(len(wavelength)): fichier.write('%.5f %.5f %.5f \n' % ((phases[i], wavelength[i], average_function_light_curve_smooth[i]))) fichier.close() if draw: import pylab as plt plt.figure() plt.imshow(hist_spectra, interpolation='nearest', aspect='auto', cmap=plt.cm.Greys) plt.figure() plt.imshow(hist_spectra_weights, interpolation='nearest', aspect='auto', cmap=plt.cm.Greys_r) plt.figure() plt.plot(average_function_light_curve) plt.plot(average_function_light_curve_smooth) plt.plot(average_function_spectra + 5) plt.plot(average_function_spectra_smooth + 5) plt.gca().invert_yaxis() plt.show()