def get_frbpoppy_data(): """Get frbpoppy data.""" surv_pop = unpickle('cosmic_chime') # Split population into seamingly one-off and repeater populations mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1) pop_ngt1, pop_nle1 = split_pop(surv_pop, mask) pop_ngt1.name += ' (> 1 burst)' pop_nle1.name += ' (1 burst)' # Limit to population above S/N limits mask = (pop_ngt1.frbs.snr > SNR_LIMIT_REPS) pop_ngt1.frbs.apply(mask) mask = (pop_nle1.frbs.snr > SNR_LIMIT_ONE_OFFS) pop_nle1.frbs.apply(mask) print(f'{surv_pop.n_repeaters()} repeaters') print(f'{surv_pop.n_one_offs()} one-offs') frbpop = {'r': {}, 'o': {}} for i, pop in enumerate((pop_ngt1, pop_nle1)): t = 'o' if i == 0: t = 'r' frbpop[t]['dm'] = pop.frbs.dm # Take only the first snr frbpop[t]['snr'] = pop.frbs.snr[:, 0] return frbpop
def get_data(): """Get survey populations.""" # Don't always regenerate a population if REMAKE is False: # Check where a possible population would be located path = '' surv_pops = [] for telescope in TELESCOPES: if telescope == 'askap': telescope = 'askap-fly' name = f'{telescope}' path = paths.populations() + f'complex_{name}.p' surv_pops.append(unpickle(path)) return surv_pops cosmic_pop = CosmicPopulation.complex(SIZE, generate=False) surveys = [] for telescope in TELESCOPES: pattern = 'airy' if telescope == 'parkes': pattern = telescope s = telescope if telescope == 'askap': s = 'askap-fly' surveys.append(Survey(s, gain_pattern=pattern, n_sidelobes=1)) return LargePopulation(cosmic_pop, *surveys).pops
def get_data(self): """Read in populations.""" # Read in files for f in self.files: # Check whether file exists if os.path.isfile(f): try: df = unpickle(f).frbs.to_df() except ValueError: pprint(f'Unpacking {f} seemed to have failed.') continue if '.' in f: name = '.'.join(f.split('/')[-1].split('.')[:-1]) if '_for_plotting' in name: name = name.split('_for_plotting')[0] if len(name) > 15: name = name.split('_')[-1] else: name = f # If things haven't worked if df is None: m = 'Skipping population {} - contains no sources'.format(f) pprint(m) continue # Downsample population size if it's too large if df.shape[0] > 10000: pprint(f'Downsampling population {f} (else too big to plot)') df = df.sample(n=10000) df['color'] = self.colours[self.n_df] df['lum_bol'] = df['lum_bol'] / 1e30 # Sidestepping Bokeh issue if df.empty: m = 'Skipping population {} - contains no sources'.format(f) pprint(m) continue else: self.dfs.append(df) self.labels.append(name) self.n_df += 1 # Add on tns if self.tns: df = TNS(frbpoppy=True).df # Filter by survey if wished if isinstance(self.tns, str): if not df[df.survey == self.tns].empty: df = df[df.survey == self.tns] elif not df[df.telescope == self.tns].empty: df = df[df.telescope == self.tns] else: m = 'Your chosen input for tns is not found.' raise ValueError(m) df['color'] = self.colours[len(self.dfs)] self.dfs.append(df) self.labels.append(f'tns {self.tns}')
def get_pops(alpha='*', li='*', si='*', survey='*', get_range=False): filename = f'complex_alpha_{alpha}_lum_{li}_si_{si}_{survey}.p' filter = os.path.join(paths.populations(), filename) pop_paths = glob(filter) pops = [] for path in pop_paths: if '_for_plotting' not in path: pops.append(unpickle(path)) return pops
def complex_rates(remake=REMAKE, alphas=ALPHAS, size=SIZE, surveys=SURVEYS): """Calculate expected rates for a complex populations.""" rates = defaultdict(list) # Don't always regenerate a population if remake is False: for alpha in alphas: for s in surveys: surv_rates = unpickle(f'complex_alpha_{alpha}_{s}').source_rate pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}') rate = (surv_rates.det / surv_rates.days) rates[s].append(rate) else: pops = [] for alpha in alphas: if alpha <= -1.0 and ADAPTATIVE_SCALING: size = 1e7 if alpha <= -1.5 and ADAPTATIVE_SCALING: size = 1e8 pop = CosmicPopulation.complex(size) pop.set_dist(model='vol_co', z_max=2.5, alpha=alpha, H_0=67.74, W_m=0.3089, W_v=0.6911) pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=-1) pop.name = f'complex_alpha_{alpha}' pops.append(pop) # Set up surveys ss = [] for s in surveys: survey = Survey(name=s) survey.set_beam(model='airy', n_sidelobes=1) ss.append(survey) surv_pops = LargePopulation(pop, *ss).pops for i, s in enumerate(surveys): surv_rates = surv_pops[i].source_rate pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}') rate = (surv_rates.det / surv_rates.days) rates[s].append(rate) # Scale rates to first survey in list for s in surveys: if s != surveys[0]: norm = [] for i, r in enumerate(rates[s]): norm.append(r / rates[surveys[0]][i]) rates[s] = norm rates[surveys[0]] = [r / r for r in rates[surveys[0]]] return rates
def calc_gofs(self, run): # For each requested run self.so = SimulationOverview() par_set = self.so.df[self.so.df.run == run].par_set.iloc[0] pprint(f'Calculating goodness of fit for run {run}, par set {par_set}') pars = self.run_pars[par_set] values = [] # Loop through all combination of parameters for values, group in self.so.df[self.so.df.run == run].groupby(pars): pprint(f' - {list(zip(pars, values))}') # Calculate goodness of fit values for each simulation for row_ix, row in group.iterrows(): survey_name = row.survey uuid = row.uuid pop = unpickle(f'mc/run_{run}/{uuid}') # Apply a DM cutoff mask = (pop.frbs.dm <= 950) pop.frbs.apply(mask) pop.source_rate.det = pop.n_sources() * pop.source_rate.f_area dm_gof = self.dm(pop, survey_name) snr_gof = self.snr(pop, survey_name) self.so.df.at[row_ix, 'dm_gof'] = dm_gof self.so.df.at[row_ix, 'snr_gof'] = snr_gof if pop.n_sources() == 0: self.so.df.at[row_ix, 'weight'] = 0 self.so.df.at[row_ix, 'n_det'] = pop.n_sources() pprint(f' - No sources in {survey_name}') continue # Find corresponding rate normalisation population uuid norm_mask = dict(zip(pars, values)) norm_mask['survey'] = self.norm_surv norm_mask['run'] = run k = norm_mask.keys() v = norm_mask.values() norm_uuid = group.loc[group[k].isin(v).all(axis=1), :].uuid norm_uuid = norm_uuid.values[0] rate_diff, n_det = self.rate(pop, survey_name, norm_uuid, run) # Get rate weighting self.so.df.at[row_ix, 'weight'] = rate_diff self.so.df.at[row_ix, 'n_det'] = n_det pprint(f'Saving the results for run {run}') # Best matching in terms of rates max_w = np.nanmax(self.so.df.weight) self.so.df.loc[self.so.df.weight == 1e3]['weight'] = max_w self.so.save()
def get_data(self): """Read in populations.""" # Read in files for f in self.files: # Check whether file exists if os.path.isfile(f): try: df = unpickle(f).frbs.to_df() except ValueError: continue if '.' in f: name = f.split('/')[-1].split('.')[0] else: name = f if df is not None: pass else: m = 'Skipping population {} - contains no sources'.format(f) pprint(m) continue # Downsample population size if it's too large if df.shape[0] > 10000: df = df.iloc[::1000] df['population'] = name df['color'] = self.colours[self.n_df] df['lum_bol'] = df['lum_bol'] / 1e30 # Sidestepping Bokeh issue self.dfs.append(df) self.n_df += 1 # Add on frbcat if self.frbcat: df = Frbcat().df # Filter by survey if wished if isinstance(self.frbcat, str): if df['survey'].str.match(self.frbcat).any(): df = df[df.survey == self.frbcat] elif df['telescope'].str.match(self.frbcat).any(): df = df[df.telescope == self.frbcat] else: m = 'Your chosen input for frbcat is not found.' raise ValueError(m) df['population'] = f'frbcat {self.frbcat}' df['color'] = self.colours[len(self.dfs)] self.dfs.append(df)
def simple_rates(remake=REMAKE, alphas=ALPHAS, size=SIZE, surveys=SURVEYS): """Calculate expected rates for a simple populations.""" rates = defaultdict(list) # Don't always regenerate a population if remake is False: for alpha in alphas: for s in surveys: surv_rates = unpickle(f'simple_alpha_{alpha}_{s}').rates() pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}') rate = (surv_rates.det / surv_rates.days) rates[s].append(rate) else: pops = [] for alpha in alphas: pop = CosmicPopulation.simple(size) pop.alpha = alpha pop.name = f'simple_alpha_{alpha}' pops.append(pop) # Set up surveys ss = [] for s in surveys: survey = Survey(name=s, gain_pattern='perfect', n_sidelobes=0.5) ss.append(survey) surv_pops = LargePopulation(pop, *ss).pops for i, s in enumerate(surveys): surv_rates = surv_pops[i].rates() pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}') rate = (surv_rates.det / surv_rates.days) rates[s].append(rate) # Scale rates to HTRU for s in surveys: if s != 'htru': norm = [] for i, r in enumerate(rates[s]): norm.append(r / rates['htru'][i]) rates[s] = norm rates['htru'] = [r / r for r in rates['htru']] return rates
def get_survey_pop(pop, survey, overwrite=False): """Quickly get survey populations. Args: pop (CosmicPopulation): Population to survey survey (Survey): Survey to use overwrite (bool): Check whether a population has already been run. If overwrite is true, it will always make a new instance. Returns: pop: Desired population. """ observe = True # Check where a possible population would be located path = '' if isinstance(pop, str): name = f'{pop}_{survey.name}' path = paths.populations() + name + '.p' # Check if the file exists if not overwrite: if os.path.isfile(path): observe = False return unpickle(path) # If all else fails observe again if observe: if isinstance(pop, str): m = f'No survey population at {path}, yet no surveying requested' raise ValueError(m) surv_pop = SurveyPopulation(pop, survey) surv_pop.name = f'{pop.name}_{survey.name}' surv_pop.save() return surv_pop
def rate(self, pop, survey_name, norm_uuid, run, errs=False): # Add rate details sr = pop.source_rate surv_sim_rate = sr.det / sr.days # Perhaps use at some stage if errs: p_int = poisson_interval(sr.det, sigma=1) surv_sim_rate_errs = [p / sr.days for p in p_int] # Determine ratio of detection rates if survey_name in EXPECTED: n_frbs, n_days = EXPECTED[survey_name] else: n_frbs, n_days = [np.nan, np.nan] surv_real_rate = n_frbs / n_days # Get normalisation properties norm_real_n_frbs, norm_real_n_days = EXPECTED[self.norm_surv] norm_pop = unpickle(f'mc/run_{run}/{norm_uuid}') norm_sim_n_frbs = norm_pop.source_rate.det norm_sim_n_days = norm_pop.source_rate.days norm_sim_rate = norm_sim_n_frbs / norm_sim_n_days norm_real_rate = norm_real_n_frbs / norm_real_n_days if norm_sim_rate == 0: norm_sim_rate = POP_SIZE / norm_sim_n_days sim_ratio = surv_sim_rate / norm_sim_rate real_ratio = surv_real_rate / norm_real_rate diff = np.abs(sim_ratio - real_ratio) if diff == 0: rate_diff = 1e-3 else: rate_diff = 1 / diff return rate_diff, pop.n_sources()
def get_cosmic_pop(sort, size, load=True, overwrite=False, alpha=None, gamma=None): """Quickly get standard populations. Args: sort (str): Which type of population, standard, std_candle etc. size (str): Choice of 'small', 'medium' or 'large' load (bool): Whether to load in a population overwrite (bool): Check whether a population has already been run. If overwrite is true, it will always make a new instance. Returns: pop: Desired population. """ pop = StandardCosmicPops(sort, size, alpha=alpha, gamma=gamma) # Skip loading a population if you don't have to if not load: return pop.name # Go for an earlier version if available if not overwrite: if os.path.isfile(pop.path): return unpickle(pop.path) # Else generate a standard population if pop.sort == 'standard': # Also known as a complex population return pop.standard_pop() if pop.sort == 'standard_candle': return pop.standard_candle_pop() if pop.sort == 'alpha': return pop.alpha_pop() if pop.sort == 'gamma': return pop.gamma_pop() if pop.sort == 'alpha_simple': return pop.alpha_simple_pop()
n_model='sfr', name='sfr') # Generate population following stellar mass density pop_smd = CosmicPopulation(n_per_day * days, days=days, z_max=3., n_model='smd', name='smd') pop_cst.save() pop_sfr.save() pop_smd.save() else: pop_cst = unpickle('vol_co') pop_sfr = unpickle('sfr') pop_smd = unpickle('smd') fig = plt.figure() ax = fig.add_subplot(111) # Get redshift of population zs = {} zs['sfr'] = pop_sfr.frbs.z zs['smd'] = pop_smd.frbs.z zs['vol_co'] = pop_cst.frbs.z n_sfr, bins = np.histogram(zs['sfr'], bins=100, density=False) n_smd, bins = np.histogram(zs['smd'], bins=100, density=False) n_constant, bins = np.histogram(zs['vol_co'], bins=100, density=False)
else: pop[n] = copy.deepcopy(pop[35]) n_gen = len(pop[35].frbs.lum_bol) pop[n].frbs.lum_bol = dis.powerlaw(10**n, 10**n, 0, n_gen) pop[n].name = f'sc-{n}' pop[n].save() pop_obs = {} if OBSERVE: for n in (35, 36, 37): if not CREATE: pop[n] = unpickle(f'sc-{n}') # Create Survey perfect = Survey('perfect-small', gain_pattern='gaussian', n_sidelobes=8) # Observe populations pop_obs[n] = SurveyPopulation(pop[n], perfect) pop_obs[n].name = f'sc-{n}-obs' pop_obs[n].rates() pop_obs[n].save() else: for n in (35, 36, 37): pop_obs[n] = unpickle(f'sc-{n}-obs')
pulse_model='uniform', pulse_range=[1., 1.], pulse_mu=1., pulse_sigma=0., repeat=0., si_mu=0., si_sigma=0., z_max=2.5) pop.save() pop_obs = {} if OBSERVE: if not CREATE: pop = unpickle(f'simple') for pattern in BEAMPATTERNS: # Create Survey survey = Survey('perfect-small', gain_pattern=pattern, n_sidelobes=0) # Observe populations pop_obs[pattern] = SurveyPopulation(pop, survey) pop_obs[pattern].name = f'obs-{pattern}' pop_obs[pattern].rates() pop_obs[pattern].save() else: for p in BEAMPATTERNS: pop_obs[p] = unpickle(f'obs-{p}')
"""Examples of saving populations.""" from frbpoppy import CosmicPopulation, unpickle # Set up an FRB population pop = CosmicPopulation.simple(1e4, generate=True) # Set filename file_name = 'saving_example' # Exporting a population as a pickled object pop.to_pickle(f'./{file_name}.p') # Unpickling a population copy_of_pop = unpickle(f'./{file_name}.p') # Exporting frb information to csv pop.frbs.to_csv(f'./{file_name}.csv') # Exporting frb information to a Pandas DataFrame df = pop.frbs.to_df() # Alternatively save in '/data/results' as a pickled file pop.name = file_name pop.save()
rate = lognormal(RATE, 1, int(N_SRCS)) r.set_time(model='poisson', rate=rate) # Only generate FRBs in CHIME's survey region r.set_direction(model='uniform', min_ra=chime.ra_min, max_ra=chime.ra_max, min_dec=chime.dec_min, max_dec=chime.dec_max) r.generate() surv_pop = SurveyPopulation(r, chime) surv_pop.save() else: surv_pop = unpickle('cosmic_chime') # Limit population to above S/N >= 10 mask = (surv_pop.frbs.snr >= 10) surv_pop.frbs.apply(mask) # Set up plot style plot_aa_style(cols=1) f, ax1 = plt.subplots(1, 1) # See how fraction changes over time n_pointings = chime.n_pointings days = np.linspace(0, N_DAYS, (N_DAYS * n_pointings) + 1) fracs = [] for day in tqdm(days, desc="frbpoppy"):
'ska1-mid') if MAKE: surv_pops = [] pop = CosmicPopulation.complex(1e5, generate=False) pop.generate() for name in SURVEYS: survey = Survey(name) surv_pop = SurveyPopulation(pop, survey) surv_pop.save() surv_pops.append(surv_pop) else: surv_pops = [] for name in SURVEYS: surv_pops.append(unpickle(f'complex_{name}')) # Start plot plot_aa_style() fig, ax1 = plt.subplots(1, 1) # Fluence plot ax1.set_xlabel('S/N') ax1.set_xscale('log') ax1.set_ylabel(r'\#(${>}\text{S/N}$)') ax1.set_yscale('log') # Update fluence plot for i, surv_pop in enumerate(surv_pops): name = surv_pop.name.split('_')[-1] snr = surv_pop.frbs.snr
pop.gen_w() pop.gen_lum() pop.gen_si() else: pop.generate() surv_pop = SurveyPopulation(pop, survey, scale_by_area=False) surv_pop.source_rate.f_area = surv_f_area[name] surv_pop.source_rate.scale_by_area() # surv_pop.save() surv_pops.append(surv_pop) else: surv_pops = [] for name in SURVEYS: surv_pops.append(unpickle(f'optimal_{name}')) # Start plot plot_aa_style(cols=2) plt.rcParams["figure.figsize"] = (3.556 * 3, 3.556) fig, axes = plt.subplots(1, 3) for ax in axes.flatten(): ax.set_aspect('auto') # Get norm pop y = 0 ys = [] names = [] rates = [] norm_sim_rate = surv_pops[0].source_rate.det
pop.set_dist(model='vol_co', z_max=0.01) pop.set_lum(model='constant', value=1e36) pop.set_dm(mw=False, igm=True, host=True) pop.set_w(model='lognormal', mean=0.1, std=0.7) pop.name = 'local' pop.generate() for name in SURVEYS: survey = Survey(name) surv_pop = SurveyPopulation(pop, survey) surv_pop.save() surv_pops.append(surv_pop) else: surv_pops = [] for name in SURVEYS: surv_pops.append(unpickle(f'local_{name}')) # Start plot plot_aa_style() fig, ax1 = plt.subplots(1, 1) # Fluence plot ax1.set_xlabel('S/N') ax1.set_xscale('log') ax1.set_ylabel(r'\#(${>}\text{S/N}$)') ax1.set_yscale('log') # ax1.set_xlim(9, 1e5) # ax1.set_ylim(1e-3, 1e3) # Update fluence plot for i, surv_pop in enumerate(surv_pops):
pop[si].save() else: pop[si] = copy.deepcopy(pop[min(SIS)]) pop[si].frbs.si = np.random.normal(si, 0, n_tot) pop[si].name = f'si-{si}' pop[si].save() pop_obs = {} if OBSERVE or CREATE: for si in SIS: if not CREATE: pop[si] = unpickle(f'si-{si}') # Create Survey perfect = Survey('perfect', gain_pattern='perfect') # Observe populations pop_obs[si] = SurveyPopulation(pop[si], perfect) pop_obs[si].name = f'si-{si}-obs' pop_obs[si].rates() pop_obs[si].save() else: for si in SIS: pop_obs[si] = unpickle(f'si-{si}-obs') # Plot log N and alpha versus log S
pop['shallow'].set_dist(model='vol_co', z_max=3., alpha=-0.5) pop['shallow'].name = 'shallow' pop['shallow'].generate() # Generate population following stellar mass density pop['steep'] = CosmicPopulation.simple(n_frbs) pop['steep'].set_dist(model='vol_co', z_max=3., alpha=-2.0) pop['steep'].name = 'steep' pop['steep'].generate() for k, v in pop.items(): v.save() else: for s in pop_types: pop[s] = unpickle(s) plot_aa_style() if NUM_FRBS: fig = plt.figure() ax = fig.add_subplot(111) # Get redshift of population zs = {} ns = {} i = 0 for s in pop_types: zs[s] = pop[s].frbs.z ns[s], bins = np.histogram(zs[s], bins=50)
n_model='sfr', pulse_model='uniform', pulse_range=[1., 1.], pulse_mu=1., pulse_sigma=0., si_mu=0., si_sigma=0., z_max=2.5) pop.save() pop_obs = {} if OBSERVE: if not CREATE: pop = unpickle(f'simple') for n in SIDELOBES: # Create Survey survey = Survey('perfect-small', gain_pattern='airy', n_sidelobes=n) # Observe populations pop_obs[n] = SurveyPopulation(pop, survey) pop_obs[n].name = f'obs-airy-{n}' pop_obs[n].rates() pop_obs[n].save() else: for n in SIDELOBES: pop_obs[n] = unpickle(f'obs-airy-{n}')