fill_sum = 0.70 ps_stats = {} # index = 6 for index in range(25): k_val = k_vals[index] delta_vals = delta_all[:, index] delta_mean = delta_vals.mean() delta_sigma = delta_vals.std() distribution, bin_centers = compute_distribution(delta_vals, n_bins_for_distribution, log=True) v_l, v_r, v_max, sum = get_highest_probability_interval(bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) vel = 2 * np.pi / k_val stride = n_points * (vel / vel_max) n_steps = int(2048 / stride) stride = int(stride) ids_1d = (np.arange(0, n_steps, 1) * stride).astype(np.int) n_1d = len(ids_1d) n_independent = n_1d**2 n_groups = stride**2 print( f'\nk_index: {index}, stride: {stride} N_independent: {n_independent}, N_groups: {n_groups}' )
def Sample_T0_from_Trace(param_samples, data_grid, SG, hpi_sum=0.7, n_samples=None): print(f'\nSampling Temperateure') temp_samples = {} param_ids = param_samples.keys() n_param = len(param_ids) if not n_samples: n_samples = len(param_samples[0]['trace']) print(f' N Samples: {n_samples}') # samples = [] for i in range(n_samples): p_vals = [] for p_id in param_ids: p_vals.append(param_samples[p_id]['trace'][i]) if n_param == 3: temp_interp = Interpolate_3D(p_vals[0], p_vals[1], p_vals[2], data_grid, 'T0', 'mean', SG, clip_params=True) if n_param == 4: temp_interp = Interpolate_4D(p_vals[0], p_vals[1], p_vals[2], p_vals[3], data_grid, 'T0', 'mean', SG, clip_params=True) samples.append(temp_interp) samples = np.array(samples).T temp_mean = np.array([temp_vals.mean() for temp_vals in samples]) temp_sigma = [] temp_lower, temp_higher = [], [] for i in range(len(samples)): temp_sigma.append(np.sqrt(((samples[i] - temp_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) temp_lower.append(v_l) temp_higher.append(v_r) temp_sigma = np.array(temp_sigma) temp_lower = np.array(temp_lower) temp_higher = np.array(temp_higher) temp_samples['mean'] = temp_mean temp_samples['sigma'] = temp_sigma temp_samples['z'] = data_grid[0]['z'] temp_samples['lower'] = temp_lower temp_samples['higher'] = temp_higher return temp_samples
def Sample_Power_Spectrum(n_samples, params, data_grid, SG, sampling='gaussian', hpi_sum=0.7): print(f'\nSampling Power Spectrum') ps_samples = {} n_param = len(params.keys()) n_z_ids = len(data_grid.keys()) for id_z in range(n_z_ids): ps_data = data_grid[id_z] ps_samples[id_z] = {} ps_samples[id_z]['z'] = ps_data['z'] samples = [] for i in range(n_samples): p_rand = [] for p_id in params.keys(): if sampling == 'gaussian': p_rand.append( np.random.normal(params[p_id]['mean'], params[p_id]['sigma'])) if sampling == 'uniform': p_min = params[p_id]['min'] p_max = params[p_id]['max'] p_rand.append(np.random.rand() * (p_max - p_min) + p_min) if n_param == 3: ps_interp = Interpolate_3D(p_rand[0], p_rand[1], p_rand[2], ps_data, 'P(k)', 'mean', SG, clip_params=True) if n_param == 4: ps_interp = Interpolate_4D(p_rand[0], p_rand[1], p_rand[2], p_rand[3], ps_data, 'P(k)', 'mean', SG, clip_params=True) samples.append(ps_interp) samples = np.array(samples).T ps_mean = np.array([ps_vals.mean() for ps_vals in samples]) ps_sigma = [] ps_lower, ps_higher = [], [] for i in range(len(samples)): ps_sigma.append(np.sqrt(((samples[i] - ps_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) ps_lower.append(v_l) ps_higher.append(v_r) ps_sigma = np.array(ps_sigma) ps_lower = np.array(ps_lower) ps_higher = np.array(ps_higher) ps_samples[id_z]['mean'] = ps_mean ps_samples[id_z]['sigma'] = ps_sigma ps_samples[id_z]['k_vals'] = ps_data[0]['P(k)']['k_vals'] ps_samples[id_z]['lower'] = ps_lower ps_samples[id_z]['higher'] = ps_higher return ps_samples
def Sample_Power_Spectrum_from_Trace(param_samples, data_grid, SG, hpi_sum=0.7, n_samples=None): print(f'\nSampling Power Spectrum') ps_samples = {} param_ids = param_samples.keys() n_param = len(param_ids) if not n_samples: n_samples = len(param_samples[0]['trace']) print(f' N Samples: {n_samples}') n_z_ids = len(data_grid.keys()) for id_z in range(n_z_ids): ps_data = data_grid[id_z] ps_samples[id_z] = {} ps_samples[id_z]['z'] = ps_data['z'] samples = [] for i in range(n_samples): p_vals = [] for p_id in param_ids: p_vals.append(param_samples[p_id]['trace'][i]) if n_param == 3: ps_interp = Interpolate_3D(p_vals[0], p_vals[1], p_vals[2], ps_data, 'P(k)', 'mean', SG, clip_params=True) if n_param == 4: ps_interp = Interpolate_4D(p_vals[0], p_vals[1], p_vals[2], p_vals[3], ps_data, 'P(k)', 'mean', SG, clip_params=True) samples.append(ps_interp) samples = np.array(samples).T ps_mean = np.array([ps_vals.mean() for ps_vals in samples]) ps_sigma = [] ps_lower, ps_higher = [], [] for i in range(len(samples)): ps_sigma.append(np.sqrt(((samples[i] - ps_mean[i])**2).mean())) values = samples[i] n_bins = 100 distribution, bin_centers = compute_distribution(values, n_bins, log=True) fill_sum = hpi_sum v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000) ps_lower.append(v_l) ps_higher.append(v_r) ps_sigma = np.array(ps_sigma) ps_lower = np.array(ps_lower) ps_higher = np.array(ps_higher) ps_samples[id_z]['mean'] = ps_mean ps_samples[id_z]['sigma'] = ps_sigma ps_samples[id_z]['k_vals'] = ps_data[0]['P(k)']['k_vals'] ps_samples[id_z]['lower'] = ps_lower ps_samples[id_z]['higher'] = ps_higher return ps_samples