Exemple #1
0
def gr_corrections(r, m, phi=1.0, verbose=False):
    """Returns GR correction factors (xi, 1+z) given Newtonian R, M
        Ref: Eq. B5, Keek & Heger 2011

    parameters
    ----------
    m : flt
        Newtonian mass (Msol) (i.e. Kepler frame)
    r   : flt
        Newtonian radius (km)
    phi : flt
        Ratio of GR mass to Newtonian mass: M_GR / M_NW
        (NOTE: unrelated to grav potential phi)
    verbose : bool
    """
    zeta = get_zeta(r=r, m=m)

    b = (9 * zeta**2 * phi**4 +
         np.sqrt(3) * phi**3 * np.sqrt(16 + 27 * zeta**4 * phi**2))**(1 / 3)
    a = (2 / 9)**(1 / 3) * (b**2 / phi**2 - 2 * 6**(1 / 3)) / (b * zeta**2)
    xi = (zeta * phi / 2) * (1 + np.sqrt(1 - a) +
                             np.sqrt(2 + a + 2 / np.sqrt(1 - a)))

    redshift = xi**2 / phi  # NOTE: xi is unrelated to anisotropy factors xi_b, xi_p

    if verbose:
        print_title(f'Using R={r:.3f}, M={m}, M_GR={m*phi}:')
        print(f'    R_GR = {r*xi:.2f} km')
        print(f'(1+z)_GR = {redshift:.3f}')
    return xi, redshift
Exemple #2
0
def extract_runs(runs,
                 batch,
                 source,
                 save_plots=True,
                 reload=False,
                 load_bursts=False,
                 load_summary=False,
                 basename='xrb'):
    """Do burst analysis on run(s) from a single batch and save results
    """
    runs = grid_tools.ensure_np_list(runs)
    for run in runs:
        print_title(f'Run {run}', character='-', n=50)
        model = burst_analyser.BurstRun(run,
                                        batch,
                                        source,
                                        analyse=True,
                                        reload=reload,
                                        load_bursts=load_bursts,
                                        basename=basename,
                                        load_summary=load_summary)
        model.save_burst_table()
        model.save_summary_table()

        if save_plots:
            model.plot(display=False, save=True, log=False)
            model.plot_convergence(display=False, save=True)
            model.plot_lightcurves(display=False, save=True)
Exemple #3
0
def collect_output(runs,
                   batches,
                   source,
                   basename='xrb',
                   mean_name='mean.data',
                   **kwargs):
    """=======================================================================
    Collects output files from kepler-analyser output and organises them into batches
    =======================================================================
    runs        = int,[int]  : list of model IDs ()
    batches     = [int]      : list of batch IDs/numbers (assumes same run-IDs for eachs)
    basename    = str        : basename of kepler models
    path        = str        : path to location of all collected batches
    ======================================================================="""
    print_title('Collecting mean lightcurve and summ.csv files')
    source = grid_strings.source_shorthand(source=source)
    batches = grid_tools.expand_batches(batches, source)
    runs = grid_tools.expand_runs(runs)

    path = kwargs.get('path', GRIDS_PATH)
    analyser_path = os.path.join(path, 'analyser', source)

    for batch in batches:
        batch_str = grid_strings.get_batch_string(batch, source)
        analyser_output_path = os.path.join(analyser_path,
                                            batch_str + OUTPUT_SUFFIX)
        source_path = grid_strings.get_source_path(source)
        save_path = os.path.join(source_path, 'mean_lightcurves', batch_str)
        grid_tools.try_mkdir(save_path, skip=True)

        print('Copying from: ', analyser_output_path)
        print('          to: ', source_path)

        print('Copying/reformatting summ files')
        reformat_summ(batch=batch, source=source, basename=basename)

        print('Copying mean lightcurves')
        final_run_str = grid_strings.get_run_string(runs[-1], basename)
        for run in runs:
            run_str = grid_strings.get_run_string(run, basename)
            sys.stdout.write(f'\r{run_str}/{final_run_str}')

            mean_filepath = os.path.join(analyser_output_path, run_str,
                                         mean_name)
            save_file = f'{batch_str}_{run_str}_{mean_name}'
            save_filepath = os.path.join(save_path, save_file)

            subprocess.run(['cp', mean_filepath, save_filepath])
        sys.stdout.write('\n')
Exemple #4
0
def print_summary(runs,
                  batches,
                  source,
                  basename='xrb',
                  skip=1,
                  redshift=1.259,
                  **kwargs):
    """
    prints summary analyser output of model
    """
    source = grid_strings.source_shorthand(source=source)
    batches = grid_tools.expand_batches(batches, source)
    path = kwargs.get('path', GRIDS_PATH)
    analyser_path = os.path.join(path, 'analyser', source)
    runs = grid_tools.expand_runs(runs)

    for batch in batches:
        batch_name = grid_strings.get_batch_string(batch, source)
        output_str = f'{batch_name}{OUTPUT_SUFFIX}'

        print_title(f'Batch {batch}')

        summ_filepath = os.path.join(analyser_path, output_str, 'summ.csv')
        summ = pd.read_csv(summ_filepath)
        summ_names = np.genfromtxt(summ_filepath,
                                   delimiter="'",
                                   usecols=[1],
                                   dtype='str')

        for run in runs:
            run_str = grid_strings.get_run_string(run, basename)
            idx = np.where(
                summ_names == run_str)[0][0]  # index of row for this run
            N = int(summ['num'][idx])  # Number of bursts

            dt = summ['tDel'][idx] * redshift / 3600
            u_dt = summ['uTDel'][idx] * redshift / 3600

            # ===== Print info =====
            print(basename + str(run))
            print('Total bursts = {n}'.format(n=N))
            print('Exluding first {skip} bursts'.format(skip=skip))
            print('Using redshift (1+z)={:.3f}'.format(redshift))
            print('Delta_t = {:.2f} (hr)'.format(dt))
            print('u(Delta_t) = {:.2f} (hr)'.format(u_dt))
            print_dashes()

    return {'N': N, 'dt': dt, 'u_dt': u_dt}
Exemple #5
0
def extract_batches(source,
                    batches=None,
                    save_plots=True,
                    multithread=True,
                    reload=False,
                    load_bursts=False,
                    load_summary=False,
                    basename='xrb',
                    param_table=None):
    """Do burst analysis on arbitrary number of batches"""
    t0 = time.time()
    if param_table is not None:
        print('Using models from table provided')
        batches = np.unique(param_table['batch'])
    else:
        batches = grid_tools.ensure_np_list(batches)

    for batch in batches:
        print_title(f'Batch {batch}')

        analysis_path = grid_strings.batch_analysis_path(batch, source)
        for folder in ['input', 'output']:
            path = os.path.join(analysis_path, folder)
            grid_tools.try_mkdir(path, skip=True)

        if param_table is not None:
            subset = grid_tools.reduce_table(param_table,
                                             params={'batch': batch})
            runs = np.array(subset['run'])
        else:
            n_runs = grid_tools.get_nruns(batch, source)
            runs = np.arange(n_runs) + 1

        if multithread:
            args = []
            for run in runs:
                args.append((run, batch, source, save_plots, reload,
                             load_bursts, load_summary, basename))
            with mp.Pool(processes=8) as pool:
                pool.starmap(extract_runs, args)
        else:
            extract_runs(runs,
                         batch,
                         source,
                         reload=reload,
                         save_plots=save_plots,
                         load_bursts=load_bursts,
                         load_summary=load_summary,
                         basename=basename)

        print_title('Combining run tables')
        for table_name in ('summary', 'bursts'):
            burst_tools.combine_run_tables(batch,
                                           source,
                                           table_name=table_name)

    t1 = time.time()
    dt = t1 - t0
    print_title(f'Time taken: {dt:.1f} s ({dt/60:.2f} min)')
Exemple #6
0
def extract_lightcurves(runs, path_data, path_target, basename='xrb'):
    """========================================================
    Loads Kepler .lum binaries and saves txt file of [time, luminosity, radius] for input to kepler_analyser
    Returns No. of cycles for each run
    ========================================================
    runs = []         : list of run numbers, eg. [324,325,340]
    path_data = str   : path to directory where kepler output folders are located (include trailing slash)
    path_target = str : path to directory where output .txt files will be written (include trailing slash)
    in_name = str     : base filename of input, eg. 'run' for run324
    out_name = str    : base filename of output, eg. 'xrb' for xrb324

    NOTE: kepler_analyser overwrites summ.csv and db.csv, should put path_target as new directory
    ========================================================"""

    print_title()
    print(f'Loading binaries from {path_data}')
    print(f'Writing txt files to {path_target}')
    print_title()

    cycles = np.zeros(len(runs), dtype=int)

    for i, run in enumerate(runs):
        rname = grid_strings.get_run_string(run, basename)
        lcfile = f'{rname}.lc'
        savefile = f'{rname}.data'

        print(f'Loading kepler binary for {rname}')
        lcpath = os.path.join(path_data, rname, lcfile)
        data = lcdata.load(lcpath, graphical=False)

        print('Writing txt file')
        savepath = os.path.join(path_target, savefile)
        data.write_lc_txt(savepath)

        # --- Save number of cycles in model ---
        cycles[i] = len(data.time)
        print_dashes()
Exemple #7
0
def run_analysis(batches,
                 source,
                 copy_params=False,
                 reload=True,
                 multithread=True,
                 analyse=True,
                 save_plots=True,
                 collect=True,
                 load_bursts=False,
                 load_summary=False,
                 auto_last_batch=True,
                 basename='xrb',
                 new_models=False):
    """Run all analysis steps for burst models
    """
    if new_models:
        print('Adding new models. '
              'Overriding options: reload, copy_params, auto_last_batch')
        reload = False
        copy_params = True
        auto_last_batch = False

    all_batches = np.arange(
        batches[-1]) + 1  # assumes batches[-1] is final batch of grid
    if copy_params:
        print_title('Copying parameter tables')
        grid_tools.copy_paramfiles(batches, source)
        grid_tools.combine_grid_tables(all_batches, 'params', source=source)

    if analyse:
        print_title('Extracting burst properties from models')
        extract_batches(batches=batches,
                        source=source,
                        save_plots=save_plots,
                        load_bursts=load_bursts,
                        multithread=multithread,
                        reload=reload,
                        basename=basename,
                        load_summary=load_summary)

    if collect:
        print_title('Collecting results')
        if auto_last_batch:
            grid_table = grid_tools.load_grid_table('params',
                                                    source=source,
                                                    lampe_analyser=False)
            last_batch = grid_table.batch.iloc[-1]
        else:
            last_batch = batches[
                -1]  # Assumes last batch is the last for whole grid

        burst_tools.combine_batch_tables(np.arange(last_batch) + 1,
                                         source=source,
                                         table_name='summary')
Exemple #8
0
def main(source,
         version,
         n_steps,
         dump_step=None,
         n_walkers=1000,
         n_threads=8,
         restart_step=None):
    """Performs an MCMC simulation using the given source grid
    """
    pyprint.print_title(f'{source}  V{version}')
    mcmc_path = mcmc_tools.get_mcmc_path(source)
    chain0 = None

    if dump_step is None:
        dump_step = n_steps
    dump_step = int(dump_step)
    n_threads = int(n_threads)
    n_walkers = int(n_walkers)

    if (n_steps % dump_step) != 0:
        raise ValueError(
            f'n_steps={n_steps} is not divisible by dump_step={dump_step}')

    if restart_step is None:
        restart = False
        start = 0
        pos = mcmc.setup_positions(source=source,
                                   version=version,
                                   n_walkers=n_walkers)
    else:
        restart = True
        start = int(restart_step)
        chain0 = mcmc_tools.load_chain(source=source,
                                       version=version,
                                       n_walkers=n_walkers,
                                       n_steps=start)
        pos = chain0[:, -1, :]

    sampler = mcmc.setup_sampler(source=source,
                                 version=version,
                                 pos=pos,
                                 n_threads=n_threads)
    iterations = round(n_steps / dump_step)
    t0 = time.time()

    # ===== do 'dump_step' steps at a time =====
    for i in range(iterations):
        step0 = start + (i * dump_step)
        step1 = start + ((i + 1) * dump_step)

        print('-' * 30)
        print(f'Doing steps: {step0} - {step1}')
        pos, lnprob, rstate = mcmc.run_sampler(sampler,
                                               pos=pos,
                                               n_steps=dump_step)
        # pos, lnprob, rstate, blob = mcmc.run_sampler(sampler, pos=pos, n_steps=dump_step)

        # ===== concatenate loaded chain to current chain =====
        if restart:
            save_chain = np.concatenate([chain0, sampler.chain], 1)
        else:
            save_chain = sampler.chain

        # === save chain state ===
        filename = mcmc_tools.get_mcmc_string(source=source,
                                              version=version,
                                              prefix='chain',
                                              n_steps=step1,
                                              n_walkers=n_walkers,
                                              extension='.npy')
        filepath = os.path.join(mcmc_path, filename)
        print(f'Saving: {filepath}')
        np.save(filepath, save_chain)

        # ===== save sampler state =====
        #  TODO: delete previous checkpoint after saving
        mcmc_tools.save_sampler_state(sampler,
                                      source=source,
                                      version=version,
                                      n_steps=step1,
                                      n_walkers=n_walkers)

    print('=' * 30)
    print('Done!')

    t1 = time.time()
    dt = t1 - t0
    time_per_step = dt / n_steps
    time_per_sample = dt / (n_walkers * n_steps)

    print(f'Total compute time: {dt:.0f} s ({dt/3600:.2f} hr)')
    print(f'Average time per step: {time_per_step:.1f} s')
    print(f'Average time per sample: {time_per_sample:.4f} s')
Exemple #9
0
def print_batch(batch, source):
    print_title()
    print_title()
    print(f'Batch: {batch}')
    print_title()
    print_title()
Exemple #10
0
def print_batch(batch):
    print_title()
    print_title()
    print(f'Batch: {batch}')
    print_title()
    print_title()