Пример #1
0
    def __init__(self, path, data, command_line):

        likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        for line in open(self.data_directory + self.file, 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]

        for i in range(self.num_points):
            if self.type[i] == 3:
                self.data[i] = self.data[i] * self.known_rs
                self.error[i] = self.data[i] * sqrt(
                    (self.error[i] / self.data[i]) ** 2 + (self.rs_error / self.known_rs) ** 2)
                self.type[i] = 4
Пример #2
0
    def __init__(self, path, data, command_line):
        """
        The structure differs significantly from other likelihoods, in order to
        follow as simply as possible the data structure.
        
        Each redshift bin in WiggleZ contains a .dataset file with information
        on this redshift bin. The structure to read this has been encoded in
        the class likelihood_mpk. It will be used also with the next data
        release of SDSS.

        The whole WiggleZ is then made out of the four likelihood_mpk:
        WiggleZ_a, b, c and d, which are **defined dynamically** thanks to the
        :func:`type` function in python, inheriting from
        :class:`likelihood_mpk`. 

        Some additional keyword arguments are sent to the initialization of
        these classes, in order to use the function
        :meth:`add_common_knowledge`. It then gives a dictionary of shared
        attributes that should be distributed to all four redshift bins.

        """

        likelihood.__init__(self, path, data, command_line)

        # This obscure command essentially creates dynamically 4 likelihoods,
        # respectively called WiggleZ_a, b, c and d, inheriting from
        # likelihood_mpk. 
        for elem in ['a', 'b', 'c', 'd']:
            exec("WiggleZ_%s = type('WiggleZ_%s', (likelihood_mpk, ), {})" % \
                (elem, elem))

        # Initialize one after the other the four independant redshift bins (note:
        # the order in the array self.redshift_bins_files) must be respected !
        self.wigglez_a = WiggleZ_a(
            self.data_directory + self.redshift_bins_files[0], data,
            command_line, common=True, common_dict=self.common)

        self.wigglez_b = WiggleZ_b(
            self.data_directory + self.redshift_bins_files[1], data,
            command_line, common=True, common_dict=self.common)

        self.wigglez_c = WiggleZ_c(
            self.data_directory + self.redshift_bins_files[2], data,
            command_line, common=True, common_dict=self.common)

        self.wigglez_d = WiggleZ_d(
            self.data_directory + self.redshift_bins_files[3], data,
            command_line, common=True, common_dict=self.common)
Пример #3
0
    def __init__(self, path, data, command_line):

        likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        for line in open(self.data_directory + self.file, 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
Пример #4
0
    def __init__(self, path, data, command_line):

        likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.moduli = np.array([], 'float64')

        # read redshifts and data points
        for line in open(self.data_directory + self.z_mu_dmu, 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[1]))
                self.moduli = np.append(self.moduli, float(line.split()[2]))

        # number of data points
        self.num_points = np.shape(self.z)[0]

        # define correlation m,atrix
        covmat = np.zeros((self.num_points, self.num_points), 'float64')

        # file containing correlation matrix
        if self.has_syscovmat:
            covmat_filename = self.covmat_sys
        else:
            covmat_filename = self.covmat_nosys

        # read correlation matrix
        i = 0
        for line in open(self.data_directory + covmat_filename, 'r'):
            if (line.find('#') == -1):
                covmat[i] = line.split()
                i += 1

        # invert correlation matrix
        self.inv_covmat = np.linalg.inv(covmat)

        # find sum of all matrix elements (sounds odd that there is
        # not a trace here instead, but this is correct!)
        self.inv_covmat_sum = np.sum(self.inv_covmat)
Пример #5
0
    def __init__(self, path, data, command_line):

        likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.zd = np.array([], 'float64')
        self.zs = np.array([], 'float64')
        self.lambda_d = np.array([], 'float64')
        self.mu_d = np.array([], 'float64')
        self.sigma_d = np.array([], 'float64')

        # read redshifts and data points
        for line in open(self.data_directory + self.file, 'r'):
            if (line.find("#") == -1):
                self.zd = np.append(self.zd, float(line.split()[0]))
                self.zs = np.append(self.zs, float(line.split()[1]))
                self.lambda_d = np.append(
                    self.lambda_d, float(line.split()[2]))
                self.mu_d = np.append(self.mu_d, float(line.split()[3]))
                self.sigma_d = np.append(self.sigma_d, float(line.split()[4]))

        # number of data points
        self.num_points = np.shape(self.zd)[0]
Пример #6
0
    def __init__(self, path, data, command_line):

        # Standard initialization, reads the .data
        likelihood.__init__(self, path, data, command_line)

        # Extra needed cosmological paramters
        self.need_cosmo_arguments(
            data, {'output': 'tCl pCl lCl', 'lensing': 'yes'})

        try:
            import pywlik
        except ImportError:
            print " /|\  You must first activate the binaries from the wmap wrapper code,"
            print "/_o_\ please run : source /path/to/montepython/wrapper_wmap/bin/clik_profile.sh"
            print "      and try again."
            exit()

        # try importing the wrapper_wmap
        self.wmaplike = pywlik.wlik(self.large_data_directory, self.ttmin,
                                    self.ttmax, self.temin, self.temax, self.use_gibbs, self.use_lowlpol)

        # self.cls = np.loadtxt(self.cl_test_file)

        # loglike = self.wmaplike(self.cls)
        # print "got %g expected %g"%(loglike,-845.483)

        self.l_max = max(self.ttmax, self.temax)
        self.need_cosmo_arguments(data, {'l_max_scalars': self.l_max})

        # deal with nuisance parameters
        try:
            self.use_nuisance
        except:
            self.use_nuisance = []
        self.read_contamination_spectra(data)

        pass
Пример #7
0
  def __init__(self, path, data, command_line):

    likelihood.__init__(self, path, data, command_line)

    self.need_cosmo_arguments(data, {'output':'mPk'})
    
    #################
    # find number of galaxies for each mean redshift value
    #################

    # Compute the number of galaxies for each \bar z
    # For this, one needs dn/dz TODO
    # then n_g(\bar z) = int_{\bar z - dz/2}^{\bar z + dz/2} dn/dz dz
    # self.z_mean will contain the central values

    self.z_mean = np.zeros(self.nbin,'float64')

    # Deduce the dz step from the number of bins and the edge values of z
    self.dz = (self.zmax-self.zmin)/(self.nbin-1.)
    i=0
    for z in np.arange(self.zmin,self.zmax+self.dz,self.dz):
      self.z_mean[i] = z
      i+=1

    # Store the z edge values
    self.z_edges = np.zeros(self.nbin+1,'float64')
    i = 0
    for z in np.arange(self.zmin-self.dz/2.,self.zmax+self.dz,self.dz):
      self.z_edges[i] = z
      i+=1

    # Store the total vector z, with edges + mean
    self.z = np.zeros(2*self.nbin+1,'float64')
    i=0
    for z in np.arange(self.zmin-self.dz/2.,self.zmax+self.dz,self.dz/2.):
      self.z[i] = z
      i+=1

    self.need_cosmo_arguments(data,{'z_max_pk':self.z[-1]})

    # For each bin, compute the biais function,
    self.b = np.zeros(self.nbin,'float64')
    for Bin in range(self.nbin):
      self.b[Bin] = sqrt(self.z_mean[Bin]+1.)

    # Force the cosmological module to store Pk for k up to an arbitrary number
    # (since self.r is not yet decided)... TODO
    self.need_cosmo_arguments(data,{'P_k_max_1/Mpc':1.5*self.kmax})

    # Define the k values for the integration (from kmin to kmax), at which the
    # spectrum will be computed (and stored for the fiducial model)
    # k_size is deeply arbitrary here, TODO
    
    self.k_fid = np.zeros(self.k_size,'float64')
    for i in range(self.k_size):
      self.k_fid[i] = exp( i*1.0 /(self.k_size-1) * log(self.kmax/self.kmin) + log(self.kmin))

    ################
    # Noise spectrum TODO properly
    ################

    self.n_g = np.zeros(self.nbin,'float64')
    #for index_z in range(self.nbin):
      #print self.z_mean[index_z],self.z[2*index_z],self.z[2*index_z+2]
      #self.n_g[index_z] = self.galaxy_distribution(self.z[2*index_z+2]) - self.galaxy_distribution(self.z[2*index_z])
    #print self.n_g

    self.n_g[0] = 6844.945
    self.n_g[1] = 7129.45
    self.n_g[2] = 7249.912
    self.n_g[3] = 7261.722
    self.n_g[4] = 7203.825
    self.n_g[5] = 7103.047
    self.n_g[6] = 6977.571
    self.n_g[7] = 6839.546
    self.n_g[8] = 6696.957
    self.n_g[9] = 5496.988
    self.n_g[10] = 4459.240
    self.n_g[11] = 3577.143
    self.n_g[12] = 2838.767
    self.n_g[13] = 2229.282
    self.n_g[14] = 1732.706
    self.n_g[15] = 1333.091

    self.n_g = self.n_g * self.efficiency * 41253.

    # If the file exists, initialize the fiducial values,
    # the spectrum will be read first, with k_size values of k and nbin values
    # of z. Then, H_fid and D_A fid will be read (each with nbin values).
    #self.Cl_fid = np.zeros((self.nlmax,self.nbin,self.nbin),'float64')
    self.fid_values_exist = False
    self.pk_nl_fid = np.zeros((self.k_size,2*self.nbin+1),'float64')
    self.H_fid       = np.zeros(2*self.nbin+1,'float64')
    self.D_A_fid     = np.zeros(2*self.nbin+1,'float64') 
    self.sigma_r_fid = np.zeros(self.nbin,'float64')

    if os.path.exists(self.data_directory+'/'+self.fiducial_file):
      self.fid_values_exist = True
      fid_file = open(self.data_directory+'/'+self.fiducial_file,'r')
      line = fid_file.readline()
      while line.find('#')!=-1:
	line = fid_file.readline()
      while (line.find('\n')!=-1 and len(line)==1):
	line = fid_file.readline()
      for index_k in range(self.k_size):
	for index_z in range(2*self.nbin+1):
	  self.pk_nl_fid[index_k,index_z] = float(line)
	  line = fid_file.readline()
      for index_z in range(2*self.nbin+1):
	self.H_fid[index_z]   = float(line.split()[0])
	self.D_A_fid[index_z] = float(line.split()[1])
	line = fid_file.readline()
      for index_z in range(self.nbin):
	self.sigma_r_fid[index_z] = float(line)
	line = fid_file.readline()
      fid_file.seek(0)
      fid_file.close()
      
    # Else the file will be created in the loglkl() function. 
    return
Пример #8
0
  def __init__(self, path, data, command_line):

    likelihood.__init__(self, path, data, command_line)

    ############ read data   ###########

    self.z = np.zeros(self.num_z,'float64')
    self.k = np.zeros(self.num_k,'float64')
    self.pk_obs = np.zeros((self.num_k,self.num_z),'float64')
    self.npk_obs = np.zeros((self.num_k,self.num_z),'float64')
    covmat = np.zeros((self.num_k*self.num_z,self.num_k*self.num_z),'float64')
    invcovmat = np.zeros((self.num_k*self.num_z,self.num_k*self.num_z),'float64')
    self.inv_covmat = np.zeros((self.num_k,self.num_z,self.num_k,self.num_z),'float64')
    self.dampcorr = np.zeros((self.num_k,self.num_z),'float64')

    # k, z, pk, noise

    inputfile = open(self.data_directory+self.table,'r')

    for zz in range(self.num_z):
      for kk in range(self.num_k):

        line = inputfile.readline()

        zzz = float(line.split()[0])
        if self.z[zz] == 0.:
          self.z[zz]=zzz
        else:
          if self.z[zz] != zzz:
            print 'input data table not arranged as expected',self.z[zz],zzz
            exit()

        kkk = float(line.split()[1])
        if self.k[kk] == 0.:
          self.k[kk]=kkk
        else:
          if self.k[kk] != kkk:
            print 'input data table not arranged as expected',self.k[kk],kkk
            exit()

        self.pk_obs[kk,zz] = line.split()[2]
        self.npk_obs[kk,zz] = line.split()[4]

    inputfile.close()    

    # covariance matrix

    inputfile = open(self.data_directory+self.covar,'r')

    for np1 in range(self.num_k*self.num_z):
      line = inputfile.readline()
      for np2 in range(self.num_k*self.num_z):
        covmat[np1,np2] = float(line.split()[np2])

    inputfile.close() 
    
    # invert covariance matrix and store it with good indicing

    invcovmat = np.linalg.inv(covmat)

    for zz1 in range(self.num_z):
      for kk1 in range(self.num_k):
        for zz2 in range(self.num_z):
          for kk2 in range(self.num_k):
            self.inv_covmat[kk1,zz1,kk2,zz2]=invcovmat[zz1*self.num_k+kk1,zz2*self.num_k+kk2]

    # damping factors

    inputfile = open(self.data_directory+self.damp,'r')

    for zz in range(self.num_z):
      for kk in range(self.num_k):
        line = inputfile.readline()
        self.dampcorr[kk,zz] = float(line)

    inputfile.close() 

    ############ read theory ###########

    # index of parameters entering in calculation of theoretical flux spectrum (Taylor expansion parameters)

    index = 0
    self.index_s8 = index
    index+=1
    self.index_ns = index
    index+=1
    self.index_Om = index
    index+=1
    self.index_H0 = index
    index+=1
#    self.index_zr = index
#    index+=1
    self.index_g = index
    index+=1
    self.index_tf = index
    index+=1
    self.index_t0 = index
    index+=1
    self.index_num = index

    # read Taylor coefficients for these parameters

    # each table contains:
    #
    # [k1, z1, 2nd order]  ......  [k1, zmax, 2nd order]
    # [k2, z1, 2nd order]  ......  [k2, zmax, 2nd order]
    # ...............................................
    # [kmax, z1, 2nd order] ...... [kmax, zmax, 2nd order]
    # [k1, z1, 1st order]  ......  [k1, zmax, 1st order]
    # [k2, z1, 1st order]  ......  [k2, zmax, 1st order]
    # ...............................................
    # [kmax, z1, 1st order] ...... [kmax, zmax, 1st order]

    self.taylor = np.zeros((self.index_num,self.num_k,self.num_z,2),'float64')

    for param in range(self.index_num):

      if (param == self.index_s8):
        table_name = 's8_gamma1_new.dat'
      if (param == self.index_ns):
        table_name = 'n_gamma1_new.dat'
      if (param == self.index_Om):
        table_name = 'om_gamma1_new.dat'
      if (param == self.index_H0):
        table_name = 'h_gamma1_new.dat'
      if (param == self.index_g):
        table_name = 'coeff_gammaEQ1_all_pk_new.dat'
      if (param == self.index_tf):
        table_name = 'meanflux_gamma1_new.dat'
      if (param == self.index_t0):
        table_name = 't0_gamma1_new.dat'
#     if (param == self.index_zr):
#        table_name = 'highzreion.dat'

      inputfile = open(self.data_directory+table_name,'r')

      # reio table is special: only 1st order coefs, only 2nd half of table
#      if (param == self.index_zr):
#        for kk in range(self.num_k):
#          for zz in range(self.num_z):
#            self.taylor[param,kk,zz,1] = 0. # 2nd order fixed to zero
#        for kk in range(self.num_k):
#          line = inputfile.readline()for kk in range(self.num_k):
#          for zz in range(self.num_z):
#            self.taylor[param,kk,zz,0] = line.split()[zz]-1. # 1st order gets correction by one
#      else:
      # other tables

      for kk in range(self.num_k):
        line = inputfile.readline()
        for zz in range(self.num_z):
          self.taylor[param,kk,zz,1] = float(line.split()[zz])
      for kk in range(self.num_k):
        line = inputfile.readline()
        for zz in range(self.num_z):
          self.taylor[param,kk,zz,0] = float(line.split()[zz])

      inputfile.close()

    # read best-fit values around which Taylor expansion is performed
    self.taylor_expansion_point = np.zeros((self.index_num,self.num_z),'float64')

    self.taylor_expansion_point[self.index_s8,0] = 0.85
    self.taylor_expansion_point[self.index_ns,0] = 0.95
    self.taylor_expansion_point[self.index_Om,0] = 0.26
    self.taylor_expansion_point[self.index_H0,0] = 72.
#    self.taylor_expansion_point[self.index_zr,0] = 6.

    self.taylor_expansion_point[self.index_tf,0] = 0.178000
    self.taylor_expansion_point[self.index_tf,1] = 0.2192
    self.taylor_expansion_point[self.index_tf,2] = 0.2714000
    self.taylor_expansion_point[self.index_tf,3] = 0.3285330
    self.taylor_expansion_point[self.index_tf,4] = 0.379867
    self.taylor_expansion_point[self.index_tf,5] = 0.42900
    self.taylor_expansion_point[self.index_tf,6] = 0.513000
    self.taylor_expansion_point[self.index_tf,7] = 0.600400
    self.taylor_expansion_point[self.index_tf,8] = 0.657800
    self.taylor_expansion_point[self.index_tf,9] = 0.756733
    self.taylor_expansion_point[self.index_tf,10] = 0.896000

    self.taylor_expansion_point[self.index_g,0] =  1.05
    self.taylor_expansion_point[self.index_g,1] =  1.05
    self.taylor_expansion_point[self.index_g,2] =  1.05
    self.taylor_expansion_point[self.index_g,3] =  1.05
    self.taylor_expansion_point[self.index_g,4] =  1.05
    self.taylor_expansion_point[self.index_g,5] =  1.05
    self.taylor_expansion_point[self.index_g,6] =  1.04
    self.taylor_expansion_point[self.index_g,7] =  1.04
    self.taylor_expansion_point[self.index_g,8] =  1.03
    self.taylor_expansion_point[self.index_g,9] =  1.03
    self.taylor_expansion_point[self.index_g,10] = 1.03

    self.taylor_expansion_point[self.index_t0,0] =  20600
    self.taylor_expansion_point[self.index_t0,1] =  21100
    self.taylor_expansion_point[self.index_t0,2] =  21600
    self.taylor_expansion_point[self.index_t0,3] =  22000
    self.taylor_expansion_point[self.index_t0,4] =  22300
    self.taylor_expansion_point[self.index_t0,5] =  22492
    self.taylor_expansion_point[self.index_t0,6] =  22600
    self.taylor_expansion_point[self.index_t0,7] =  22600
    self.taylor_expansion_point[self.index_t0,8] =  22505
    self.taylor_expansion_point[self.index_t0,9] = 22200
    self.taylor_expansion_point[self.index_t0,10] = 21529

    # flux spectrum of best-fit model

    self.pkth = np.zeros((self.num_k,self.num_z),'float64')

    inputfile = open(self.data_directory+'B2_gamma1_all.txt','r')

    for zz in range(self.num_z):
      for kk in range(self.num_k):
        self.pkth[kk,zz] = float(inputfile.readline())

    inputfile.close()    

    # resolution box size correction

    inputfile = open(self.data_directory+'res_box_table_all.txt','r')

    for kk in range(self.num_k):
      line = inputfile.readline()
      for zz in range(self.num_z):
        rb = float(line.split()[zz])
        self.pkth[kk,zz] *= rb

    inputfile.close() 

    ######## end read theory ##########

    # necessary class parameters. mPk and k_max=2h/Mpc needed to have sigma_8 computed and accurate !

    self.need_cosmo_arguments(data,{'output':'mPk', 'P_k_max_h/Mpc':2})
 
    return
  def __init__(self, path, data, command_line):

    likelihood.__init__(self, path, data, command_line)

    self.need_cosmo_arguments(data,{'output':'mPk'})

    # Define array of l values, and initialize them
    self.l = np.zeros(self.nlmax,'float64')
    for nl in range(self.nlmax):
      self.l[nl] = 1.*math.exp(self.dlnl*nl)
    
    #print self.l[:]
    #exit()

    ########################################################
    # Find distribution of dn_dz (not normalized) in each bin
    ########################################################
    
    # Assuming each bin contains the same number of galaxies, we find the bin
    # limits in z space

    # Compute the total number of galaxies until zmax (no normalization yet)

    n_tot = 0.
    for z in np.arange(0,self.zmax+self.dz,self.dz):
      gd_1 = self.galaxy_distribution(z)
      gd_2 = self.galaxy_distribution(z+self.dz)
      n_tot += 0.5*(gd_1+gd_2)*self.dz

    # For each bin, compute the limit in z space
  
    # Create the array that will contain the z boundaries for each bin. The
    # first value is already correctly set to 0.
    self.z_bin_edge = np.zeros(self.nbin+1,'float64')

    for Bin in range(self.nbin-1):

      bin_count = 0.
      z =self.z_bin_edge[Bin]

      while (bin_count <= n_tot/self.nbin):
	gd_1 = self.galaxy_distribution(z)
	gd_2 = self.galaxy_distribution(z+self.dz)
	bin_count += 0.5*(gd_1+gd_2)*self.dz
	z += self.dz

      self.z_bin_edge[Bin+1] = z

    self.z_bin_edge[self.nbin] = self.zmax
    
    # Fill array of discrete z values
    self.z = np.zeros(self.nzmax,'float64')
    for nz in range(self.nzmax):
      self.z[nz] = (nz*1.0)/(self.nzmax-1.0)*self.zmax

    # Force the cosmological module to store Pk for redshifts up to max(self.z)
    self.need_cosmo_arguments(data,{'z_max_pk':self.z[-1]})
    # Force the cosmological module to store Pk for k up to an arbitrary number
    # (since self.r is not yet decided)... TODO
    self.need_cosmo_arguments(data,{'P_k_max_1/Mpc':self.k_max})

    # Fill distribution for each bin (convolving with photo_z distribution)
    self.eta_z = np.zeros((self.nzmax,self.nbin),'float64')
    for Bin in range(self.nbin):

      for nz in range(self.nzmax):
	z = self.z[nz]
	self.eta_z[nz,Bin] = 0.

	for nz2 in range(1,self.nzmax):

	  if ((self.z[nz2] >= self.z_bin_edge[Bin]) and (self.z[nz2] <= self.z_bin_edge[Bin+1])):
	    gd  = self.galaxy_distribution(self.z[nz2])
	    pzd = self.photo_z_distribution(z,self.z[nz2])
	    integrand_plus = gd*pzd
	  else:
	    integrand_plus = 0.

	  if ((self.z[nz2-1] >= self.z_bin_edge[Bin]) and (self.z[nz2-1] <= self.z_bin_edge[Bin+1])):
	    gd  = self.galaxy_distribution(self.z[nz2-1])
	    pzd = self.photo_z_distribution(z,self.z[nz2-1])
	    integrand_minus = gd*pzd
	  else:
	    integrand_minus = 0.
	  
	  self.eta_z[nz,Bin] += 0.5*(integrand_plus+integrand_minus)*(self.z[nz2]-self.z[nz2-1])

    #for nz in range(self.nzmax):
    #  print self.z[nz],self.eta_z[nz,0],self.eta_z[nz,1],self.eta_z[nz,2],self.eta_z[nz,3],self.eta_z[nz,4],self.galaxy_distribution(self.z[nz])
    #exit()  

    # integrate eta(z) over z (in view of normalizing it to one)
    self.eta_norm = np.zeros(self.nbin,'float64')
    for Bin in range(self.nbin):
      self.eta_norm[Bin] = np.sum(0.5*(self.eta_z[1:,Bin]+self.eta_z[:-1,Bin])*(self.z[1:]-self.z[:-1]))

    ################
    # Noise spectrum 
    ################
    
    # Number of galaxies per steradian
    self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2

    # Number of galaxies per steradian per bin
    self.noise = self.noise/self.nbin
    
    # Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
    self.noise = self.rms_shear**2/self.noise

    # TEST
    #self.noise = 0 
    
    
    ###########
    # Read data
    ###########

    # If the file exists, initialize the fiducial values
    self.Cl_fid = np.zeros((self.nlmax,self.nbin,self.nbin),'float64')
    self.fid_values_exist = False
    if os.path.exists(self.data_directory+'/'+self.fiducial_file):
      self.fid_values_exist = True
      fid_file = open(self.data_directory+'/'+self.fiducial_file,'r')
      line = fid_file.readline()
      while line.find('#')!=-1:
	line = fid_file.readline()
      while (line.find('\n')!=-1 and len(line)==1):
	line = fid_file.readline()
      for nl in range(self.nlmax):
	for Bin in range(self.nbin):
	  for Bin2 in range(self.nbin):
	    self.Cl_fid[nl,Bin,Bin2] = float(line)
	    line = fid_file.readline()
      
    # Else the file will be created in the loglkl() function. 
    return