Exemple #1
0
def test_selfChi2():

    ens = Ensemble.read(
        "Data/all/Om0.295_Ol0.705_w-1.878_ns0.960_si0.100/subfield1/sigma05/power_spectrum.npy"
    )
    chi2 = ens.selfChi2()
    assert chi2.shape[0] == ens.data.shape[0]

    #Plot histogram
    fig, ax = plt.subplots()
    n, bins, patch = ax.hist(chi2,
                             bins=50,
                             normed=True,
                             histtype="stepfilled",
                             alpha=0.5)

    #Compare to chi2 distribution
    ax.plot(stats.chi2.pdf(bins, ens.data.shape[1]))

    #Labels
    ax.set_xlabel(r"$\chi^2$")
    ax.set_ylabel(r"$P(\chi^2)$")

    #Save figure
    fig.savefig("self_chi2.png")
Exemple #2
0
def test_save_and_load():

    conv_ensemble.save("ensemble_saved.npy")
    conv_ensemble.save("ensemble_saved", format="matlab", appendmat=True)
    conv_ensemble_new = Ensemble.read("ensemble_saved.npy")

    assert conv_ensemble_new.num_realizations == conv_ensemble.num_realizations
    assert conv_ensemble_new.data.shape == conv_ensemble.data.shape
def test_save_and_load():

	conv_ensemble.save("ensemble_saved.npy")
	conv_ensemble.save("ensemble_saved",format="matlab",appendmat=True)
	conv_ensemble_new = Ensemble.read("ensemble_saved.npy")

	assert conv_ensemble_new.num_realizations == conv_ensemble.num_realizations
	assert conv_ensemble_new.data.shape == conv_ensemble.data.shape
Exemple #4
0
def confusionMatrix(descriptor,measurement_list,measurement_covariance):

	#Instantiate a FisherAnalysis instance
	analysis = FisherAnalysis()

	#Populate with the models
	for measurement in measurement_list:
		ens = Ensemble.read(measurement.savename(descriptor))
		analysis.add_model(measurement.model.squeeze(with_ns=True),ens.mean())

	#Compute the covariance matrix
	covariance = Ensemble.read(measurement_covariance.savename(descriptor)).covariance()

	#################################################################################
	#####Now we are ready to compute the confusion matrix, for each parameter########
	#################################################################################

	#Allocate space for confusion matrix
	confusion_matrix = np.zeros((4,3,3))

	#Find where are the variations for each model parameter
	locations = analysis.where()

	#Cycle over parameters
	for n in range(4):

		l0 = analysis._fiducial
		l1,l2 = locations[n]

		print("[+] n={0}, fiducial: {1}, variation 1: {2}, variation 2:{3}".format(n,analysis.parameter_set[l0],analysis.parameter_set[l1],analysis.parameter_set[l2]))

		for i,m in enumerate([l0,l1,l2]):
		
			#Load the ensemble
			ens = Ensemble.read(measurement_list[m].savename(descriptor))
			
			#Compute the confusion matrix
			confusion_matrix[n,i] = analysis.classify(ens.data,covariance,labels=[l0,l1,l2],confusion=True)


	#Return the confusion matrix for the selected descriptor
	return confusion_matrix
Exemple #5
0
def test_pca():

    pca_ensemble = Ensemble.read("Data/ensemble_pca.npy")
    pca = pca_ensemble.principalComponents()
    assert len(pca.explained_variance_) == pca_ensemble.data.shape[1]

    fig, ax = plt.subplots(1, 2, figsize=(16, 8))
    ax[0].plot(pca.explained_variance_)
    ax[1].plot(pca.explained_variance_.cumsum())
    ax[0].set_xlabel(r"$n$")
    ax[1].set_xlabel(r"$n$")
    ax[0].set_ylabel(r"$\lambda_n$")
    ax[1].set_ylabel(r"$\sum^n\lambda_n$")

    fig.savefig("pca.png")
def test_pca():

	pca_ensemble = Ensemble.read("Data/ensemble_pca.npy")
	pca = pca_ensemble.principalComponents()
	assert len(pca.explained_variance_)==pca_ensemble.data.shape[1]
	
	fig,ax = plt.subplots(1,2,figsize=(16,8))
	ax[0].plot(pca.explained_variance_)
	ax[1].plot(pca.explained_variance_.cumsum())
	ax[0].set_xlabel(r"$n$")
	ax[1].set_xlabel(r"$n$")
	ax[0].set_ylabel(r"$\lambda_n$")
	ax[1].set_ylabel(r"$\sum^n\lambda_n$")

	fig.savefig("pca.png")
def test_selfChi2():

	ens = Ensemble.read("Data/all/Om0.295_Ol0.705_w-1.878_ns0.960_si0.100/subfield1/sigma05/power_spectrum.npy")
	chi2 = ens.selfChi2()
	assert chi2.shape[0]==ens.data.shape[0]

	#Plot histogram
	fig,ax = plt.subplots()
	n,bins,patch = ax.hist(chi2,bins=50,normed=True,histtype="stepfilled",alpha=0.5)

	#Compare to chi2 distribution
	ax.plot(stats.chi2.pdf(bins,ens.data.shape[1]))

	#Labels
	ax.set_xlabel(r"$\chi^2$")
	ax.set_ylabel(r"$P(\chi^2)$")

	#Save figure
	fig.savefig("self_chi2.png")
Exemple #8
0
def pbBias(cmd_args,feature_name="convergence_power_s0_nb100",title="Power spectrum",kappa_models=("Born",),callback=None,variation_idx=(0,),bootstrap_size=1000,resample=1000,return_results=False,fontsize=22):
	
	#Initialize plot
	fig,ax = plt.subplots(len(variation_idx),3,figsize=(24,8*len(variation_idx)))
	ax = np.atleast_2d(ax)

	##################
	#Load in the data#
	##################

	#Observation
	bootstrap_mean = lambda e: e.values.mean(0)
	feature_ray = Ensemble.read(os.path.join(fiducial["c0"].getMapSet("kappa").home,feature_name+".npy"),callback_loader=callback).bootstrap(bootstrap_mean,bootstrap_size=bootstrap_size,resample=resample,seed=0)

	#Containers for cosmological model
	modelFeatures = dict()
	for mf in kappa_models:
		modelFeatures[mf] = dict()

	parameters = dict()

	for model in models:
		parameters[model.cosmo_id] = np.array([model.cosmology.Om0,model.cosmology.w0,model.cosmology.sigma8])
		for mf in kappa_models:

			try:
				modelFeatures[mf][model.cosmo_id] = Ensemble.read(os.path.join(model["c0"].getMapSet("kappa"+mf).home,feature_name+".npy"),callback_loader=callback)
			except IOError:
				pass

	#Fit each model
	for mf in kappa_models:

		#Select correct 
		features = modelFeatures[mf]

		###############################
		#Compute the covariance matrix#
		###############################

		features_covariance = features[fiducial.cosmo_id].cov()

		################################################
		#Load in the feature to fit, bootstrap the mean#
		################################################
	
		feature_born = features[fiducial.cosmo_id].bootstrap(bootstrap_mean,bootstrap_size=bootstrap_size,resample=resample,seed=0)

		for nv,v in enumerate(variation_idx):

			###############################
			#Initialize the FisherAnalysis#
			###############################

			ftr = np.array([features[m.cosmo_id].values.mean(0) for m in [fiducial] + variations[v]])
			par = np.array([parameters[m.cosmo_id] for m in [fiducial] + variations[v]])
			fisher = FisherAnalysis.from_features(ftr,par,parameter_index=["Om","w0","si8"])

			#############
			####Fit######
			#############

			fitted_parameters_born = fisher.fit(feature_born,features_covariance)
			fitted_parameters_ray = fisher.fit(feature_ray,features_covariance)

			if return_results:
				assert len(kappa_models)==1
				assert len(variation_idx)==1

				return fitted_parameters_born,fitted_parameters_ray

			##########
			#Plotting#
			##########

			for n,p in enumerate(fisher.parameter_names):
				fitted_parameters_born[p].plot.hist(bins=50,ax=ax[nv,n],edgecolor="none",label=r"${\rm Control}$")
				fitted_parameters_ray[p].plot.hist(bins=50,ax=ax[nv,n],edgecolor="none",label=r"${\rm Observation}$")
				
				ax[nv,n].set_xlabel(plab[p],fontsize=fontsize)
				ax[nv,n].set_title(title)
				ax[nv,n].legend(loc="upper right",mode="expand",ncol=2,prop={"size":20})

	#Labels
	for a in ax.flatten():
		plt.setp(a.get_xticklabels(),rotation=30)
	
	#Save
	fig.tight_layout()
	fig.savefig("{0}/bornBias_{1}.{0}".format(cmd_args.type,feature_name))