plt.subplots_adjust(hspace=0.2, wspace=0.3) k_val = data['k_val'] delta_mean = data['delta_mean'] delta_sigma = data['delta_sigma'] bin_centers = data['bin_centers'] distribution = data['distribution'] sigma_l = data['sigma_left'] sigma_r = data['sigma_right'] sigma_c = data['sigma_max'] mean_samples = data['mean_samples'] n_independent = data['n_independent'] n_groups = len(mean_samples) n_bins_for_distribution = int(np.sqrt(n_groups)) mean_distribution, mean_bin_centers = compute_distribution( mean_samples, n_bins_for_distribution, log=True) mean_samples_mean = mean_samples.mean() mean_samples_sigma = mean_samples.std() mean_samples_sigma_l = mean_samples_mean - mean_samples_sigma mean_samples_sigma_r = mean_samples_mean + mean_samples_sigma ids = np.where(distribution > 0.001)[0] left = ids.min() right = ids.max() ax = ax_l[0] ax.plot(bin_centers, distribution) ax.axvline(x=delta_mean, c='C1') ax.axvline(x=sigma_l, linestyle='--', c='C1')
delta_all.append(PS_grid[i, j, :] * k_vals / np.pi) delta_all = np.array(delta_all) n_bins_for_distribution = 100 fill_sum = 0.70 ps_stats = {} # index = 6 for index in range(25): k_val = k_vals[index] delta_vals = delta_all[:, index] delta_mean = delta_vals.mean() delta_sigma = delta_vals.std() distribution, bin_centers = compute_distribution(delta_vals, n_bins_for_distribution, log=True) v_l, v_r, v_max, sum = get_highest_probability_interval(bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) vel = 2 * np.pi / k_val stride = n_points * (vel / vel_max) n_steps = int(2048 / stride) stride = int(stride) ids_1d = (np.arange(0, n_steps, 1) * stride).astype(np.int) n_1d = len(ids_1d) n_independent = n_1d**2
F_los[F_los < F_min] = F_min delta_F = (F_los - F_mean_HI) / F_mean_HI k_vals, flux_power_spectrum = get_skewer_flux_power_spectrum( vel_Hubble, delta_F, d_log_k=d_log_k) flux_ps_all.append(flux_power_spectrum) flux_ps_all = np.array(flux_ps_all) ps_mean = flux_ps_all.mean(axis=0) fill_sum = 0.67 sigma_vals, lower_vals, higher_vals, max_vals, mean_vals = [], [], [], [], [] for ps_slice in flux_ps_all.T: v_mean = ps_slice.mean() sigma = ps_slice.std() distribution, bin_centers = compute_distribution(ps_slice, 40, log=True) v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=False, n_interpolate=500) sigma_vals.append(sigma) lower_vals.append(v_l) higher_vals.append(v_r) max_vals.append(v_max) mean_vals.append(v_mean) sigma_vals = np.array(sigma_vals) lower_vals = np.array(lower_vals) higher_vals = np.array(higher_vals) max_vals = np.array(max_vals) mean_vals = np.array(mean_vals) n_points = len(vel_Hubble)
def Sample_Power_Spectrum(n_samples, params, data_grid, SG, sampling='gaussian', hpi_sum=0.7): print(f'\nSampling Power Spectrum') ps_samples = {} n_param = len(params.keys()) n_z_ids = len(data_grid.keys()) for id_z in range(n_z_ids): ps_data = data_grid[id_z] ps_samples[id_z] = {} ps_samples[id_z]['z'] = ps_data['z'] samples = [] for i in range(n_samples): p_rand = [] for p_id in params.keys(): if sampling == 'gaussian': p_rand.append( np.random.normal(params[p_id]['mean'], params[p_id]['sigma'])) if sampling == 'uniform': p_min = params[p_id]['min'] p_max = params[p_id]['max'] p_rand.append(np.random.rand() * (p_max - p_min) + p_min) if n_param == 3: ps_interp = Interpolate_3D(p_rand[0], p_rand[1], p_rand[2], ps_data, 'P(k)', 'mean', SG, clip_params=True) if n_param == 4: ps_interp = Interpolate_4D(p_rand[0], p_rand[1], p_rand[2], p_rand[3], ps_data, 'P(k)', 'mean', SG, clip_params=True) samples.append(ps_interp) samples = np.array(samples).T ps_mean = np.array([ps_vals.mean() for ps_vals in samples]) ps_sigma = [] ps_lower, ps_higher = [], [] for i in range(len(samples)): ps_sigma.append(np.sqrt(((samples[i] - ps_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) ps_lower.append(v_l) ps_higher.append(v_r) ps_sigma = np.array(ps_sigma) ps_lower = np.array(ps_lower) ps_higher = np.array(ps_higher) ps_samples[id_z]['mean'] = ps_mean ps_samples[id_z]['sigma'] = ps_sigma ps_samples[id_z]['k_vals'] = ps_data[0]['P(k)']['k_vals'] ps_samples[id_z]['lower'] = ps_lower ps_samples[id_z]['higher'] = ps_higher return ps_samples
def Sample_T0_from_Trace(param_samples, data_grid, SG, hpi_sum=0.7, n_samples=None): print(f'\nSampling Temperateure') temp_samples = {} param_ids = param_samples.keys() n_param = len(param_ids) if not n_samples: n_samples = len(param_samples[0]['trace']) print(f' N Samples: {n_samples}') # samples = [] for i in range(n_samples): p_vals = [] for p_id in param_ids: p_vals.append(param_samples[p_id]['trace'][i]) if n_param == 3: temp_interp = Interpolate_3D(p_vals[0], p_vals[1], p_vals[2], data_grid, 'T0', 'mean', SG, clip_params=True) if n_param == 4: temp_interp = Interpolate_4D(p_vals[0], p_vals[1], p_vals[2], p_vals[3], data_grid, 'T0', 'mean', SG, clip_params=True) samples.append(temp_interp) samples = np.array(samples).T temp_mean = np.array([temp_vals.mean() for temp_vals in samples]) temp_sigma = [] temp_lower, temp_higher = [], [] for i in range(len(samples)): temp_sigma.append(np.sqrt(((samples[i] - temp_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) temp_lower.append(v_l) temp_higher.append(v_r) temp_sigma = np.array(temp_sigma) temp_lower = np.array(temp_lower) temp_higher = np.array(temp_higher) temp_samples['mean'] = temp_mean temp_samples['sigma'] = temp_sigma temp_samples['z'] = data_grid[0]['z'] temp_samples['lower'] = temp_lower temp_samples['higher'] = temp_higher return temp_samples
def Sample_Power_Spectrum_from_Trace(param_samples, data_grid, SG, hpi_sum=0.7, n_samples=None): print(f'\nSampling Power Spectrum') ps_samples = {} param_ids = param_samples.keys() n_param = len(param_ids) if not n_samples: n_samples = len(param_samples[0]['trace']) print(f' N Samples: {n_samples}') n_z_ids = len(data_grid.keys()) for id_z in range(n_z_ids): ps_data = data_grid[id_z] ps_samples[id_z] = {} ps_samples[id_z]['z'] = ps_data['z'] samples = [] for i in range(n_samples): p_vals = [] for p_id in param_ids: p_vals.append(param_samples[p_id]['trace'][i]) if n_param == 3: ps_interp = Interpolate_3D(p_vals[0], p_vals[1], p_vals[2], ps_data, 'P(k)', 'mean', SG, clip_params=True) if n_param == 4: ps_interp = Interpolate_4D(p_vals[0], p_vals[1], p_vals[2], p_vals[3], ps_data, 'P(k)', 'mean', SG, clip_params=True) samples.append(ps_interp) samples = np.array(samples).T ps_mean = np.array([ps_vals.mean() for ps_vals in samples]) ps_sigma = [] ps_lower, ps_higher = [], [] for i in range(len(samples)): ps_sigma.append(np.sqrt(((samples[i] - ps_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) ps_lower.append(v_l) ps_higher.append(v_r) ps_sigma = np.array(ps_sigma) ps_lower = np.array(ps_lower) ps_higher = np.array(ps_higher) ps_samples[id_z]['mean'] = ps_mean ps_samples[id_z]['sigma'] = ps_sigma ps_samples[id_z]['k_vals'] = ps_data[0]['P(k)']['k_vals'] ps_samples[id_z]['lower'] = ps_lower ps_samples[id_z]['higher'] = ps_higher return ps_samples