# continue new_value[key] = par_gaps[key] * step + fid[key] for key in values.keys(): par_gaps[key] = np.abs(new_value[key][0] - new_value[key][1]) new_value = collections.OrderedDict( sorted(new_value.items(), key=lambda t: t[0])) values = new_value print values, par_gaps # # =============== print par_gaps par_gaps, values, fid = utils.exclude_parameters(exclude, par_gaps, values, fid) print 'loading files' # to do this is the bottleneck. it can be speeded up. dats = utils.load_data(data_folder, values, lensed) # the index of lmax etc can be different for lmax. For example cause CAMB start from l=2. n_values = np.size(values.keys()) lmax_index = np.where(dats[:, 0, 0] == lmax)[0][0] ltmax_index = np.where(dats[:, 0, 0] == l_t_max)[0][0] lmin_index = np.where(dats[:, 0, 0] == lmin)[0][0] # cut Cl^T at ells bigger than l_t_max. WE can not clean point sources there. dats[ltmax_index:, 1, 1:] = 0. # phi_T has oscillations in it.
# =============== # use different order formula same gap order = 5 # # step = np.array([-4,-3,-2,-1,1,2,3,4]) # step = np.array([-8,-6,-4,-2,2,4,6,8]) # for key in values.keys(): # new_value[key] = par_gaps[key] * step + fid[key] # for key in values.keys(): # par_gaps[key]= np.abs(new_value[key][0]-new_value[key][1]) # new_value = collections.OrderedDict(sorted(new_value.items(), key=lambda t: t[0])) # values = new_value # # =============== par_gaps, values, fid = utils.exclude_parameters(exclude, par_gaps, values, fid) print 'loading files' dats = utils.load_data(data_folder, values, lensed) n_values = np.size(values.keys()) lmax_index = np.where(dats[:, 0, 0] == lmax)[0][0] ltmax_index = np.where(dats[:, 0, 0] == l_t_max)[0][0] lmin_index = np.where(dats[:, 0, 0] == lmin)[0][0] # cut Cl^T at ells bigger than l_t_max dats[ltmax_index:, 1, 1:] = 0. # phi_T has oscillations in it. # dats[900:, 6, 0:] = 0. # creating the n_values by n_values matrix fisher = np.zeros((n_values, n_values))