コード例 #1
0
def plot_dists(surv_pop, telescope):
    """Plot the fluence and DM distribution of a surveyed population.

    Args:
        surv_pop (Population): Population from which to plot
        telescope (str): Name of the telescope with which to compare the
            distribution. Necessary for Frbcat.

    """
    # Use a nice font for axes
    plt.rc('text', usetex=True)

    # Plot dm distribution
    f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)

    dm_frbpoppy = surv_pop.frbs.dm
    pprint(f'Number of detected FRBs: {len(dm_frbpoppy)}')
    ax1.step(*hist(dm_frbpoppy), where='mid', linestyle='dashed')

    df = Frbcat().df
    dm_frbcat = df[df.telescope == telescope].dm
    ax1.step(*hist(dm_frbcat), where='mid')

    # Compare distributions
    ks = ks_2samp(dm_frbpoppy, dm_frbcat)
    text = fr'$p={round(ks[1], 2)}$'
    anchored_text = AnchoredText(text, loc=1, borderpad=1., frameon=False)
    ax1.add_artist(anchored_text)

    ax1.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
    ax1.set_ylabel('Fraction')
    ax1.set_ylim([0, 1.1])
    ax1.set_xlim([0, 3500])

    # Plot fluence distributions
    fluence_frbpoppy = surv_pop.frbs.fluence
    ax2.step(*hist(fluence_frbpoppy, bins='log'),
             where='mid',
             label='frbpoppy',
             linestyle='dashed')

    fluence_frbcat = df[df.telescope == telescope].fluence
    ax2.step(*hist(fluence_frbcat, bins='log'), where='mid', label='frbcat')

    # Compare distributions
    ks = ks_2samp(fluence_frbpoppy, fluence_frbcat)
    text = fr'$p={round(ks[1], 2)}$'
    anchored_text = AnchoredText(text, loc=1, borderpad=1., frameon=False)
    ax2.add_artist(anchored_text)

    ax2.set_xlabel(r'Fluence (Jy ms)')
    ax2.set_ylim([0, 1.1])
    ax2.set_xlim([5e-1, 1e4])
    plt.xscale('log')

    plt.figlegend(loc='upper center', ncol=2, framealpha=1)

    plt.tight_layout()
    plt.savefig(f'plots/frbpoppy_{telescope}.pdf')
    plt.clf()
コード例 #2
0
def get_data():
    """Get the population data."""
    # Construct population
    pop = CosmicPopulation(n_srcs=SIZE, n_days=1, name='standard_candle')
    pop.set_dist(model='sfr', z_max=2.5, H_0=67.74, W_m=0.3089, W_v=0.6911)
    pop.set_dm_host(model='constant', value=100)
    pop.set_dm_igm(model='ioka', slope=1000, std=None)
    pop.set_dm_mw(model='ne2001')
    pop.set_emission_range(low=10e6, high=10e9)
    pop.set_lum(model='constant', value=1e36)
    pop.set_w(model='constant', value=1.)
    pop.set_si(model='constant', value=0)
    pop.generate()

    # Survey population
    pops = {}
    for b in BEAMPATTERNS:
        pprint(f'Surveying with {b} beampattern')
        n_s = 0
        bp = b
        if b.startswith('airy'):
            bp, n_s = b.split('-')
            n_s = int(n_s)

        survey = Survey(name='perfect-small')
        # Prevent beam from getting larger than the sky
        survey.set_beam(model=bp, n_sidelobes=n_s, size=10)
        surv_pop = SurveyPopulation(pop, survey)
        print(surv_pop.source_rate)
        pops[b] = surv_pop

    return pops
コード例 #3
0
ファイル: alpha_complex.py プロジェクト: telegraphic/frbpoppy
def complex_rates(remake=REMAKE, alphas=ALPHAS, size=SIZE, surveys=SURVEYS):
    """Calculate expected rates for a complex populations."""
    rates = defaultdict(list)

    # Don't always regenerate a population
    if remake is False:
        for alpha in alphas:
            for s in surveys:
                surv_rates = unpickle(f'complex_alpha_{alpha}_{s}').source_rate
                pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}')
                rate = (surv_rates.det / surv_rates.days)
                rates[s].append(rate)
    else:
        pops = []
        for alpha in alphas:
            if alpha <= -1.0 and ADAPTATIVE_SCALING:
                size = 1e7
            if alpha <= -1.5 and ADAPTATIVE_SCALING:
                size = 1e8
            pop = CosmicPopulation.complex(size)
            pop.set_dist(model='vol_co',
                         z_max=2.5,
                         alpha=alpha,
                         H_0=67.74,
                         W_m=0.3089,
                         W_v=0.6911)
            pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=-1)
            pop.name = f'complex_alpha_{alpha}'
            pops.append(pop)

            # Set up surveys
            ss = []
            for s in surveys:
                survey = Survey(name=s)
                survey.set_beam(model='airy', n_sidelobes=1)
                ss.append(survey)

            surv_pops = LargePopulation(pop, *ss).pops

            for i, s in enumerate(surveys):
                surv_rates = surv_pops[i].source_rate
                pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}')
                rate = (surv_rates.det / surv_rates.days)
                rates[s].append(rate)

    # Scale rates to first survey in list
    for s in surveys:
        if s != surveys[0]:
            norm = []
            for i, r in enumerate(rates[s]):
                norm.append(r / rates[surveys[0]][i])
            rates[s] = norm
    rates[surveys[0]] = [r / r for r in rates[surveys[0]]]

    return rates
コード例 #4
0
def simple_rates(remake=REMAKE, alphas=ALPHAS, size=SIZE, surveys=SURVEYS):
    """Calculate expected rates for a simple populations."""
    rates = defaultdict(list)

    # Don't always regenerate a population
    if remake is False:
        for alpha in alphas:
            for s in surveys:
                surv_rates = unpickle(f'simple_alpha_{alpha}_{s}').rates()
                pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}')
                rate = (surv_rates.det / surv_rates.days)
                rates[s].append(rate)
    else:
        pops = []
        for alpha in alphas:
            pop = CosmicPopulation.simple(size)
            pop.alpha = alpha
            pop.name = f'simple_alpha_{alpha}'
            pops.append(pop)

            # Set up surveys
            ss = []
            for s in surveys:
                survey = Survey(name=s,
                                gain_pattern='perfect',
                                n_sidelobes=0.5)
                ss.append(survey)

            surv_pops = LargePopulation(pop, *ss).pops

            for i, s in enumerate(surveys):
                surv_rates = surv_pops[i].rates()
                pprint(f'Alpha:{alpha:.2}, Survey: {s}, Det: {surv_rates.det}')
                rate = (surv_rates.det / surv_rates.days)
                rates[s].append(rate)

    # Scale rates to HTRU
    for s in surveys:
        if s != 'htru':
            norm = []
            for i, r in enumerate(rates[s]):
                norm.append(r / rates['htru'][i])
            rates[s] = norm
    rates['htru'] = [r / r for r in rates['htru']]

    return rates
コード例 #5
0
    def calc_gofs(self, run):

        # For each requested run
        self.so = SimulationOverview()
        par_set = self.so.df[self.so.df.run == run].par_set.iloc[0]
        pprint(f'Calculating goodness of fit for run {run}, par set {par_set}')
        pars = self.run_pars[par_set]
        values = []

        # Loop through all combination of parameters
        for values, group in self.so.df[self.so.df.run == run].groupby(pars):
            pprint(f'    - {list(zip(pars, values))}')
            # Calculate goodness of fit values for each simulation
            for row_ix, row in group.iterrows():
                survey_name = row.survey
                uuid = row.uuid
                pop = unpickle(f'mc/run_{run}/{uuid}')

                # Apply a DM cutoff
                mask = (pop.frbs.dm <= 950)
                pop.frbs.apply(mask)
                pop.source_rate.det = pop.n_sources() * pop.source_rate.f_area

                dm_gof = self.dm(pop, survey_name)
                snr_gof = self.snr(pop, survey_name)
                self.so.df.at[row_ix, 'dm_gof'] = dm_gof
                self.so.df.at[row_ix, 'snr_gof'] = snr_gof

                if pop.n_sources() == 0:
                    self.so.df.at[row_ix, 'weight'] = 0
                    self.so.df.at[row_ix, 'n_det'] = pop.n_sources()
                    pprint(f'        -  No sources in {survey_name}')
                    continue

                # Find corresponding rate normalisation population uuid
                norm_mask = dict(zip(pars, values))
                norm_mask['survey'] = self.norm_surv
                norm_mask['run'] = run
                k = norm_mask.keys()
                v = norm_mask.values()
                norm_uuid = group.loc[group[k].isin(v).all(axis=1), :].uuid
                norm_uuid = norm_uuid.values[0]
                rate_diff, n_det = self.rate(pop, survey_name, norm_uuid, run)

                # Get rate weighting
                self.so.df.at[row_ix, 'weight'] = rate_diff
                self.so.df.at[row_ix, 'n_det'] = n_det

        pprint(f'Saving the results for run {run}')
        # Best matching in terms of rates
        max_w = np.nanmax(self.so.df.weight)
        self.so.df.loc[self.so.df.weight == 1e3]['weight'] = max_w
        self.so.save()
コード例 #6
0
def limit_ra_dec(pop, pointings):
    """Doesn't work at boundary coordinates."""

    pprint('Limiting coordinates')

    def sample(n_gen):
        u = np.random.uniform
        ra = u(0, 360, n_gen)
        dec = np.rad2deg(np.arccos(u(-1, 1, n_gen))) - 90
        return ra, dec

    def accept(ra, dec):
        coords = np.full(len(ra), False)
        r = np.sqrt(40 / np.pi)
        for p_ra, p_dec in pointings:
            limits = go.separation(ra, dec, p_ra, p_dec) < r
            coords[limits] = True
        return coords

    # Limit population to smaller area
    # Sample RA, dec
    ra, dec = sample(pop.n_gen)
    mask = accept(ra, dec)
    reject, = np.where(~mask)
    while reject.size > 0:
        fill_ra, fill_dec = sample(reject.size)
        mask = accept(fill_ra, fill_dec)
        ra[reject[mask]] = fill_ra[mask]
        dec[reject[mask]] = fill_dec[mask]
        reject = reject[~mask]

    frbs = pop.frbs
    frbs.ra = ra
    frbs.dec = dec

    # Convert to galactic coordinates
    frbs.gl, frbs.gb = go.radec_to_lb(frbs.ra, frbs.dec, frac=True)
    pop.gen_gal_coords()

    return pop
def get_probabilities(normalise=False):
    """Get the number of bursts per maximum time.

    Args:
        normalise (type): Whether to normalise at each maximum time, such that
            there's a number of bursts at each time stamp having a maximum
            probability of one.

    Returns:
        array: Number of bursts per maximum time.

    """
    days = np.arange(1, N_DAYS, 1)
    m_bursts = np.arange(0, M_BURSTS, 1)
    xx, yy = np.meshgrid(m_bursts, days)
    prob = np.full((len(days), len(m_bursts)), np.nan)

    # Mask any frbs over the maximum time
    time = td.clustered(r=R, k=K, n_srcs=N_SRCS, n_days=N_DAYS, z=0)

    pprint('Masking days')
    for i, day in reversed(list(enumerate(days))):
        time[(time > day)] = np.nan
        m_bursts = (~np.isnan(time)).sum(1)
        unique, counts = np.unique(m_bursts, return_counts=True)

        try:
            prob[(i, unique)] = counts
        except IndexError:
            pprint('Please ensure M is large enough.')
            exit()

    prob = prob.T / N_SRCS

    if normalise:
        # Normalise at each maximum time (highest chance becomes one)
        prob = prob / np.nanmax(prob, axis=0)

    return prob
コード例 #8
0
def bw_from_epn():
    """Scrape bandwidth from EPN website."""
    pprint('Getting data from EPN')
    bw_db = {'name': [], 'bw': []}
    url = 'http://www.epta.eu.org/epndb/list.php'

    def clean(l):
        return float(l.split('>')[1].split(' ')[0])

    with urllib.request.urlopen(url) as resp:
        data = resp.read().decode().replace('&nbsp', ' ')
        for line in data.split('\n'):
            if line.startswith('<li>'):
                name = line.split()[0].split('>')[-1]
                freqs = [clean(l) for l in line.split('<a ')[1:]]
                bw = (max(freqs) - min(freqs))
                bw_db['name'].append(name)
                bw_db['bw'].append(bw)

    pprint('Finished getting data from EPN')
    bw_db = pd.DataFrame.from_dict(bw_db)
    return bw_db
コード例 #9
0
ファイル: rates.py プロジェクト: mvsantosdev/frbpoppy
    def __str__(self):
        """How to print the class."""
        # Set up title
        r = '{:20.19} {:>10} {:>10}\n'
        t = r.format(self.name, 'Days', 'FRBs')
        line = '-'*len(t.split('\n')[-2].strip()) + '\n'
        t += line

        # Format rates
        rdays = round(self.days, 3)
        t += r.format('In population', rdays, round(self.tot()))
        t += r.format('Detected', rdays, round(self.det, 3))
        t += r.format('Too late', rdays, round(self.late, 3))
        t += r.format('Too faint', rdays, round(self.faint, 3))
        t += r.format('Outside survey', rdays, round(self.out, 3))
        t += r.format('/Gpc^3', 365.25, round(self.vol, 3))
        t += r.format('Expected', round(self.exp, 4), 1)
        t += line

        return pprint(t, output=False)
コード例 #10
0
ファイル: sim.py プロジェクト: telegraphic/frbpoppy
def iter_run(i):
    r = CosmicPopulation(N_SRCS, n_days=N_DAYS, repeaters=True)
    r.set_dist(model='vol_co', z_max=1.0)
    r.set_dm_host(model='gauss', mean=100, std=200)
    r.set_dm_igm(model='ioka', slope=1000, std=None)
    r.set_dm(mw=True, igm=True, host=True)
    r.set_emission_range(low=100e6, high=10e9)
    r.set_lum(model='powerlaw',
              per_source='different',
              low=1e40,
              high=1e45,
              power=0)
    r.set_si(model='gauss', mean=-1.4, std=1)
    r.set_w(model='lognormal', per_source='different', mean=0.1, std=1)
    rate = lognormal(RATE, 2, N_SRCS)
    if LARGE_POP:
        rate = lognormal(RATE, 2, int(MAX_SIZE))  # Not completely kosher
    r.set_time(model='poisson', rate=rate)

    # Set up survey
    s = Survey('chime-frb', n_days=N_DAYS)
    s.set_beam(model='chime-frb')
    s.gen_pointings()  # To ensure each sub pop has the same pointings

    # Only generate FRBs in CHIME's survey region
    r.set_direction(model='uniform',
                    min_ra=s.ra_min,
                    max_ra=s.ra_max,
                    min_dec=s.dec_min,
                    max_dec=s.dec_max)

    if LARGE_POP:
        surv_pop = LargePopulation(r, s, max_size=MAX_SIZE).pops[0]
    else:
        r.generate()
        surv_pop = SurveyPopulation(r, s)
    surv_pop.name = f'cosmic_chime_longer_{i}'
    surv_pop.save()

    print(surv_pop.source_rate)
    print(surv_pop.burst_rate)
    pprint(f'i: {i}')
    pprint(f'# one-offs: {surv_pop.n_one_offs()}')
    pprint(f'# repeaters: {surv_pop.n_repeaters()}')
コード例 #11
0
    def gen_par_set_4(self,
                      parallel=True,
                      alpha=-1.5,
                      si=0,
                      li=-1,
                      lum_min=1e40,
                      lum_max=1e40,
                      w_mean=np.nan,
                      w_std=np.nan,
                      run=np.nan):
        dm_igm_slopes = np.linspace(800, 1200, 11)
        dm_hosts = np.linspace(0, 500, 11)

        # Put all options into a dataframe
        self.so.df = self.so.df[self.so.df.run != run]
        opt = np.meshgrid(dm_igm_slopes, dm_hosts, self.survey_ix)
        options = np.array(opt).T.reshape(-1, 3)
        cols = ('dm_igm_slope', 'dm_host', 'survey')
        df = pd.DataFrame(options, columns=cols)
        df['run'] = run
        df['par_set'] = 4
        df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
        df['date'] = datetime.today()
        self.so.append(df)
        self.so.map_surveys(self.survey_ix, self.survey_names)
        self.so.save()

        # Remove previous par_set of the same number
        if not self.set_up_dirs(run=run):
            fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
            for f in glob(fs):
                os.remove(f)

        pop = CosmicPopulation.complex(self.pop_size)

        if not np.isnan(alpha):
            pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
            pop.set_si(model='constant', value=si)
        if not np.isnan(lum_min):
            pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
        if not np.isnan(w_mean):
            pop.set_w(model='lognormal', mean=w_mean, std=w_std)
        pop.generate()

        def adapt_pop(e):
            dm_igm_slope, dm_host = e
            t_pop = deepcopy(pop)
            t_pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
            t_pop.gen_dm_igm()
            t_pop.set_dm_host(model='constant', value=dm_host)
            t_pop.gen_dm_host()
            t_pop.frbs.dm = t_pop.frbs.dm_mw + t_pop.frbs.dm_igm
            t_pop.frbs.dm += t_pop.frbs.dm_host

            for survey in self.surveys:
                surv_pop = SurveyPopulation(t_pop, survey)

                # Get unique identifier
                mask = (self.so.df.par_set == 4)
                mask &= (self.so.df.run == run)
                mask &= (self.so.df.dm_igm_slope == dm_igm_slope)
                mask &= (self.so.df.dm_host == dm_host)
                mask &= (self.so.df.survey == survey.name)
                uuid = self.so.df[mask].uuid.iloc[0]
                surv_pop.name = f'mc/run_{run}/{uuid}'
                surv_pop.save()

        n_cpu = min([4, os.cpu_count() - 1])
        pprint(f'{os.cpu_count()} CPUs available')
        mg = np.meshgrid(dm_igm_slopes, dm_hosts)
        loop = np.array(mg).T.reshape(-1, 2)
        if parallel:
            Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
        else:
            [adapt_pop(e) for e in tqdm(loop)]
コード例 #12
0
ファイル: sim_runs.py プロジェクト: telegraphic/frbpoppy
        r.set_emission_range(low=100e6, high=10e9)
        r.set_lum(model='powerlaw',
                  per_source='different',
                  low=1e40,
                  high=1e45,
                  power=0)
        r.set_si(model='gauss', mean=-1.4, std=1)
        r.set_w(model='lognormal', per_source='different', mean=0.1, std=1)
        rate = lognormal(ra, 1, int(n))
        r.set_time(model='poisson', rate=rate)

        # Set up survey
        s = Survey('chime-frb', n_days=N_DAYS)
        s.set_beam(model='chime-frb')

        # Only generate FRBs in CHIME's survey region
        r.set_direction(model='uniform',
                        min_ra=s.ra_min,
                        max_ra=s.ra_max,
                        min_dec=s.dec_min,
                        max_dec=s.dec_max)

        r.generate()

        surv_pop = SurveyPopulation(r, s)
        surv_pop.name = 'cosmic_chime'
        print(surv_pop.source_rate)
        print(surv_pop.burst_rate)
        pprint(f'# one-offs: {surv_pop.n_one_offs()}')
        pprint(f'# repeaters: {surv_pop.n_repeaters()}')
コード例 #13
0
plot_aa_style()
fig, ax1 = plt.subplots(1, 1)

# Fluence plot
ax1.set_xlabel('S/N')
ax1.set_xscale('log')
ax1.set_ylabel(r'\#(${>}\text{S/N}$)')
ax1.set_yscale('log')

# Update fluence plot
for i, surv_pop in enumerate(surv_pops):
    name = surv_pop.name.split('_')[-1]
    snr = surv_pop.frbs.snr

    if snr.size == 0:
        pprint(f'No FRBs in {name} population')
        continue

    bins, values = hist(snr, bin_type='log', norm=None)

    # Cumulative sum
    values = np.cumsum(values[::-1])[::-1]

    # Normalise to area on sky
    if not np.isnan(values.all()):
        values = values * surv_pop.source_rate.f_area

    plt.step(bins, values, where='mid', label=name)

plt.legend()
plt.tight_layout()
コード例 #14
0
for ax in axes.flatten():
    ax.set_aspect('auto')

# Get norm pop
y = 0
ys = []
names = []
rates = []
norm_sim_rate = surv_pops[0].source_rate.det
norm_real_rate = EXPECTED['parkes-htru'][0] / EXPECTED['parkes-htru'][1]
norm_rate = norm_sim_rate / norm_real_rate

for i, surv_pop in enumerate(surv_pops):

    name = surv_pop.name.split('_')[-1]
    pprint(name)
    if surv_pop.n_sources() == 0:
        print(surv_pop.source_rate)
        print(f'{name} | no FRBs in population')
        continue

    names.append(name)
    ys.append(y)

    # Dimensions measure plot
    ax = axes[0]
    ax.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
    ax.set_ylabel(r'\#')
    ax.set_yscale('log')

    bins, values = hist(surv_pop.frbs.dm,
コード例 #15
0
ファイル: rep_vs_one.py プロジェクト: telegraphic/frbpoppy
"""Generate a repeater population and split into repeaters and one-offs."""
import numpy as np

from frbpoppy import CosmicPopulation, Survey, SurveyPopulation
from frbpoppy import split_pop, pprint, plot

DAYS = 1

r = CosmicPopulation.simple(int(1e4), n_days=DAYS, repeaters=True)
r.set_time(model='regular', rate=2)
r.set_lum(model='powerlaw', low=1e40, high=1e45, per_source='different')
r.generate()

survey = Survey('chime-frb', n_days=DAYS)
survey.set_beam(model='perfect')
surv_pop = SurveyPopulation(r, survey)

# Split population into seamingly one-off and repeater populations
mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1)
pop_ngt1, pop_nle1 = split_pop(surv_pop, mask)
pop_ngt1.name += ' (> 1 burst)'
pop_nle1.name += ' (1 burst)'

pops = [pop_nle1, pop_ngt1]

pprint(f'{surv_pop.n_sources()} sources detected')
pprint(f'{surv_pop.n_bursts()} bursts detected')

plot(surv_pop)
コード例 #16
0
    def gen_par_set_1(self,
                      parallel=True,
                      lum_min=np.nan,
                      lum_max=np.nan,
                      w_mean=np.nan,
                      w_std=np.nan,
                      dm_igm_slope=np.nan,
                      dm_host=np.nan,
                      run=0):
        alphas = np.linspace(-2.5, -1, 11)
        sis = np.linspace(-2, 2, 11)
        lis = np.linspace(-2, 0, 11)

        # Put all options into a dataframe
        if 'run' in self.so.df:
            self.so.df = self.so.df[self.so.df.run != run]
        opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
        options = np.array(opt).T.reshape(-1, 4)
        df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
        df['run'] = run
        df['par_set'] = 1
        df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
        df['date'] = datetime.today()
        self.so.append(df)
        self.so.map_surveys(self.survey_ix, self.survey_names)
        self.so.save()

        # Remove previous par_set of the same number
        if not self.set_up_dirs(run=run):
            fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
            for f in glob(fs):
                os.remove(f)

        def iter_alpha(i):
            alpha = alphas[i]
            pop = CosmicPopulation.complex(self.pop_size)

            pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
            pop.set_lum(model='constant', value=1)

            if not np.isnan(w_mean):
                pop.set_w(model='lognormal', mean=w_mean, std=w_std)
            if not np.isnan(dm_igm_slope):
                pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
                pop.set_dm_host(model='constant', value=dm_host)

            pop.generate()

            for si in sis:
                pop.set_si(model='constant', value=si)
                pop.gen_si()

                for li in lis:
                    pop.set_lum(model='powerlaw',
                                low=1e40,
                                high=1e45,
                                power=li)

                    if not np.isnan(lum_min):
                        pop.set_lum(model='powerlaw',
                                    low=lum_min,
                                    high=lum_max,
                                    index=li)

                    pop.gen_lum()

                    for survey in self.surveys:
                        surv_pop = SurveyPopulation(pop, survey)

                        # Get unique identifier
                        mask = (self.so.df.par_set == 1)
                        mask &= (self.so.df.run == run)
                        mask &= (self.so.df.alpha == alpha)
                        mask &= (self.so.df.si == si)
                        mask &= (self.so.df.li == li)
                        mask &= (self.so.df.survey == survey.name)
                        uuid = self.so.df[mask].uuid.iloc[0]
                        surv_pop.name = f'mc/run_{run}/{uuid}'
                        surv_pop.save()

        if parallel:
            n_cpu = min([3, os.cpu_count() - 1])
            pprint(f'{os.cpu_count()} CPUs available')
            r = range(len(alphas))
            Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
        else:
            [iter_alpha(i) for i in tqdm(range(len(alphas)))]
コード例 #17
0
ファイル: burstiness.py プロジェクト: telegraphic/frbpoppy
    elif lum_func == 'flat pl.':
        pop.set_lum(model='powerlaw', per_source='different', low=1e40,
                    high=1e45, power=0)
    elif lum_func == 'gauss':
        pop.set_lum(model='gauss', per_source='different', mean=1e42,
                    std=1e10)
    elif lum_func == 'std. candle':
        pop.set_lum(model='constant', value=1e42)

    pop.generate()
    surv_pop = SurveyPopulation(pop, survey)
    # Split population into seamingly one-off and repeater populations
    mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1)
    pop_ngt1, pop_nle1 = split_pop(surv_pop, mask)

    pprint(f'{pop_ngt1.n_bursts()} repeater bursts')
    if pop_ngt1.n_bursts() < 10:
        pprint('Insufficient FRB sources for plotting')

    # Plot
    if SCATTER:
        def sample(n):
            return np.random.randint(low=0, high=pop_ngt1.n_sources(), size=n)

        def accept(ii):
            return surv_pop.frbs.dm[ii] < 1000

        ii = sample(25)
        mask = accept(ii)
        reject, = np.where(~mask)
        while reject.size > 0:
コード例 #18
0
    def plot(self, run):
        # Get data
        # For each requested run
        df = self.so.df
        par_set = df[df.run == run].par_set.iloc[0]

        # For each parameter
        for main_par in self.run_pars[par_set]:
            pprint(f'Plotting {main_par}')
            other_pars = [e for e in self.run_pars[par_set] if e != main_par]

            for compare_par in ['dm', 'snr']:
                compare_col = f'{compare_par}_gof'

                pprint(f' - {compare_col}')
                for survey, group_surv in df[df.run == run].groupby('survey'):

                    pprint(f'    - {survey}')

                    # Set up plot
                    plot_aa_style()
                    plt.rcParams["figure.figsize"] = (5.75373 * 3, 5.75373 * 3)
                    plt.rcParams['figure.max_open_warning'] = 125
                    n_x = group_surv[other_pars[0]].nunique()
                    if len(other_pars) > 1:
                        n_y = group_surv[other_pars[1]].nunique()
                    else:
                        n_y = 1
                    fig, ax = plt.subplots(n_x,
                                           n_y,
                                           sharex='col',
                                           sharey='row')

                    groups = group_surv.groupby(other_pars)
                    x = -1
                    for i, (other_pars_vals, group) in enumerate(groups):
                        bins = group[main_par].values
                        values = group[compare_col].values
                        bins, values = self.add_edges_to_hist(bins, values)

                        if n_y > 1:
                            y = i % n_y
                            if y == 0:
                                x += 1
                            a = ax[y, x]
                        else:
                            y = i
                            a = ax[y]

                        a.step(bins, values, where='mid')
                        a.set_title = str(other_pars_vals)

                        diff = np.diff(bins)
                        if diff[1] != diff[0]:
                            a.set_xscale('log')

                        # Set axis label
                        if y == n_y - 1:
                            p = other_pars[0]
                            if isinstance(other_pars_vals, float):
                                val = other_pars_vals
                            else:
                                val = other_pars_vals[0]
                            p = p.replace('_', ' ')
                            a.set_xlabel(f'{p} = {val:.2}')

                        if x == 0:
                            p = other_pars[1]
                            val = other_pars_vals[1]
                            p = p.replace('_', ' ')
                            a.set_ylabel(f'{p} = {val:.2}')

                    # Set axis limits
                    subset = df[df.run == run][main_par]
                    y_subset = group_surv[compare_col].copy()
                    try:
                        low = np.nanmin(y_subset)
                        high = np.nanmax(y_subset)
                    except ValueError:
                        low = 0.0001
                        high = 1
                    log = False
                    if low > 0 and high > 0:
                        log = True

                    for a in ax.flatten():
                        a.set_xlim(subset.min(), subset.max())
                        if log:
                            a.set_yscale('log', nonposy='clip')
                            a.set_ylim(low, high)

                    p = main_par.replace('_', ' ')
                    fig.suptitle(f'{p} - {compare_par} - {survey}')
                    plt.tight_layout()
                    plt.subplots_adjust(top=0.95)

                    # Save to subdirectory
                    path_to_save = rel_path(f'./plots/mc/{main_par}_run{run}/')
                    if not os.path.isdir(path_to_save):
                        os.mkdir(path_to_save)
                    path_to_save += f'{compare_par}_{survey}.pdf'
                    plt.savefig(path_to_save)
                    plt.clf()
コード例 #19
0
          per_source='different')
r.set_time(model='poisson', rate=3)
r.set_dm_igm(model='ioka', slope=1000, std=0)
r.set_dm(mw=False, igm=True, host=False)
r.set_w('constant', value=1)

r.generate()

# Set up survey
survey = Survey('perfect', n_days=DAYS)
survey.set_beam(model='perfect')
survey.snr_limit = 1e6

surv_pop = SurveyPopulation(r, survey)

pprint(f'{r.n_bursts()}:{surv_pop.n_bursts()}')
pprint(f'{surv_pop.n_sources()} sources detected')

if r.n_bursts() < PLOTTING_LIMIT_N_SRCS:
    pprint('Not sufficient FRB sources for plotting')
    exit()

# Split population into seamingly one-off and repeater populations
mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1)
pop_rep, pop_one = split_pop(surv_pop, mask)
pop_rep.name += ' (> 1 burst)'
pop_one.name += ' (1 burst)'

if INTERACTIVE_PLOT:
    plot(r, pop_rep, pop_one, tns=False, mute=False)
コード例 #20
0
ファイル: cube.py プロジェクト: telegraphic/frbpoppy
def generate(parallel=False):
    from joblib import Parallel, delayed
    from frbpoppy import CosmicPopulation, Survey, pprint
    from frbpoppy import SurveyPopulation
    from tqdm import tqdm

    # Set up rate dataframe
    paras = [ALPHAS, LIS, SIS]
    vals = np.array(np.meshgrid(*paras)).T.reshape(-1, len(paras))
    cols = ['alpha', 'li', 'si']
    df = pd.DataFrame(vals, columns=cols)

    # Set up surveys
    surveys = []
    for name in SURVEY_NAMES:
        survey = Survey(name=name)
        survey.set_beam(model='airy', n_sidelobes=1)
        surveys.append(survey)
        df[name] = np.nan

    def iter_alpha(i, surveys=surveys, parallel=None):
        alpha = ALPHAS[i]
        pop = CosmicPopulation.complex(SIZE)
        pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
        pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=-1)
        pop.generate()

        for li in LIS:
            pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=li)
            pop.gen_lum()

            for si in SIS:
                pop.set_si(model='constant', value=si)
                pop.gen_si()

                pop.name = f'complex_alpha_{alpha}_lum_{li}_si_{si}'

                for survey in surveys:
                    surv_pop = SurveyPopulation(pop, survey)
                    print(surv_pop.name)
                    surv_pop.save()

                    sr = surv_pop.source_rate
                    rate = sr.det / sr.days
                    mask = (df.alpha == alpha) & (df.li == li) & (df.si == si)

                    if parallel is not None:
                        i = df[mask].index
                        j = SURVEY_NAMES.index(survey.name)
                        parallel[i, j] = rate
                    else:
                        df.loc[mask, survey.name] = rate

    if parallel:
        n_cpu = min([3, os.cpu_count() - 1])
        pprint(f'{os.cpu_count()} CPUs available')
        r = range(len(ALPHAS))

        temp_path = ('./temp.mmap')

        # Make a temp memmap to have a sharedable memory object
        temp = np.memmap(temp_path,
                         dtype=np.float64,
                         shape=(len(vals), len(SURVEY_NAMES)),
                         mode='w+')

        Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i, parallel=temp)
                               for i in tqdm(r))

        for name in SURVEY_NAMES:
            col = SURVEY_NAMES.index(name)
            df[name] = temp[:, col]
    else:
        for i in tqdm(range(len(ALPHAS)), desc='Alphas'):
            iter_alpha(i)

    df.to_csv(CSV_PATH)