Ejemplo n.º 1
0
def load_lum(run, batch, source, basename='xrb', reload=False, save=True,
             silent=True, check_monotonic=True):
    """Attempts to load pre-extracted luminosity data, or load raw binary.
    Returns [time (s), luminosity (erg/s)]
    """
    def load_save(load_filepath, save_filepath):
        lum_loaded = extract_lcdata(filepath=load_filepath, silent=silent)
        if save:
            try:
                save_ascii(lum=lum_loaded, filepath=save_filepath)
            except FileNotFoundError:
                print("Can't save preloaded luminosity file, path not found")
        return lum_loaded

    pyprint.print_dashes()
    batch_str = grid_strings.get_batch_string(batch, source)
    analysis_path = grid_strings.get_source_subdir(source, 'burst_analysis')
    input_path = os.path.join(analysis_path, batch_str, 'input')

    presaved_filepath = os.path.join(input_path, f'{batch_str}_{run}.txt')
    run_str = grid_strings.get_run_string(run, basename)
    model_path = grid_strings.get_model_path(run, batch, source, basename)
    binary_filepath = os.path.join(model_path, f'{run_str}.lc')
    print(binary_filepath)
    if reload:
        print('Deleting preloaded file, reloading binary file')
        subprocess.run(['rm', '-f', presaved_filepath])
        try:
            lum = load_save(binary_filepath, presaved_filepath)
        except FileNotFoundError:
            print('XXXXXXX lumfile not found. Skipping XXXXXXXX')
            return
    else:
        try:
            lum = load_ascii(presaved_filepath)
        except (FileNotFoundError, OSError):
            print('No preloaded file found. Reloading binary')
            try:
                lum = load_save(binary_filepath, presaved_filepath)
            except FileNotFoundError:
                print('XXXXXXX lumfile not found. Skipping XXXXXXX')
                return

    if check_monotonic:
        dt = np.diff(lum[:, 0])
        if True in (dt < 0):
            pyprint.print_warning('Lightcurve timesteps are not in order. '
                                  + 'Something has gone horribly wrong!', n=80)
            raise RuntimeError('Lightcurve timesteps are not in order')
    pyprint.print_dashes()
    return lum
Ejemplo n.º 2
0
def print_summary(runs,
                  batches,
                  source,
                  basename='xrb',
                  skip=1,
                  redshift=1.259,
                  **kwargs):
    """
    prints summary analyser output of model
    """
    source = grid_strings.source_shorthand(source=source)
    batches = grid_tools.expand_batches(batches, source)
    path = kwargs.get('path', GRIDS_PATH)
    analyser_path = os.path.join(path, 'analyser', source)
    runs = grid_tools.expand_runs(runs)

    for batch in batches:
        batch_name = grid_strings.get_batch_string(batch, source)
        output_str = f'{batch_name}{OUTPUT_SUFFIX}'

        print_title(f'Batch {batch}')

        summ_filepath = os.path.join(analyser_path, output_str, 'summ.csv')
        summ = pd.read_csv(summ_filepath)
        summ_names = np.genfromtxt(summ_filepath,
                                   delimiter="'",
                                   usecols=[1],
                                   dtype='str')

        for run in runs:
            run_str = grid_strings.get_run_string(run, basename)
            idx = np.where(
                summ_names == run_str)[0][0]  # index of row for this run
            N = int(summ['num'][idx])  # Number of bursts

            dt = summ['tDel'][idx] * redshift / 3600
            u_dt = summ['uTDel'][idx] * redshift / 3600

            # ===== Print info =====
            print(basename + str(run))
            print('Total bursts = {n}'.format(n=N))
            print('Exluding first {skip} bursts'.format(skip=skip))
            print('Using redshift (1+z)={:.3f}'.format(redshift))
            print('Delta_t = {:.2f} (hr)'.format(dt))
            print('u(Delta_t) = {:.2f} (hr)'.format(u_dt))
            print_dashes()

    return {'N': N, 'dt': dt, 'u_dt': u_dt}
Ejemplo n.º 3
0
def extract_lightcurves(runs, path_data, path_target, basename='xrb'):
    """========================================================
    Loads Kepler .lum binaries and saves txt file of [time, luminosity, radius] for input to kepler_analyser
    Returns No. of cycles for each run
    ========================================================
    runs = []         : list of run numbers, eg. [324,325,340]
    path_data = str   : path to directory where kepler output folders are located (include trailing slash)
    path_target = str : path to directory where output .txt files will be written (include trailing slash)
    in_name = str     : base filename of input, eg. 'run' for run324
    out_name = str    : base filename of output, eg. 'xrb' for xrb324

    NOTE: kepler_analyser overwrites summ.csv and db.csv, should put path_target as new directory
    ========================================================"""

    print_title()
    print(f'Loading binaries from {path_data}')
    print(f'Writing txt files to {path_target}')
    print_title()

    cycles = np.zeros(len(runs), dtype=int)

    for i, run in enumerate(runs):
        rname = grid_strings.get_run_string(run, basename)
        lcfile = f'{rname}.lc'
        savefile = f'{rname}.data'

        print(f'Loading kepler binary for {rname}')
        lcpath = os.path.join(path_data, rname, lcfile)
        data = lcdata.load(lcpath, graphical=False)

        print('Writing txt file')
        savepath = os.path.join(path_target, savefile)
        data.write_lc_txt(savepath)

        # --- Save number of cycles in model ---
        cycles[i] = len(data.time)
        print_dashes()
Ejemplo n.º 4
0
def gravity_summary(r, m):
    """Prints summary gravitational properties given R, M
    """
    redshift = get_redshift(r=r, m=m)
    phi_newton, phi_gr = get_potentials(r=r, m=m)
    g_newton, g_gr = get_accelerations(r=r, m=m)

    print_dashes()
    print('R (km),  M (Msun)')
    print(f'{r:.2f},   {m:.2f}')

    print_dashes()
    print('g (Newtonian)')
    print(f'{g_newton:.3e}')

    print_dashes()
    print('g (GR)')
    print(f'{g_gr:.3e}')

    print_dashes()
    print('(1+z) (GR)')
    print(f'{redshift:.3f}')

    print_dashes()
    print('potential (Newtonian, erg/g)')
    print(f'{phi_newton:.3e}')

    print_dashes()
    print('potential (GR, erg/g)')
    print(f'{phi_gr:.3e}')

    return g_newton, g_gr, phi_newton, phi_gr
Ejemplo n.º 5
0
def check_finished(batches,
                   source,
                   efficiency=True,
                   show='all',
                   basename='xrb',
                   extension='z1',
                   **kwargs):
    """Checks which running models are finished

    t_end      =  flt  : end-time of the simulations
    basename   =  str  : prefix for individual model names
    extension  =  str  : suffix of kepler dump
    efficiency = bool  : print time per 1000 steps
    all        = str   : which models to show, based on their progress,
                    one of (all, finished, not_finished, started, not_started)
    (path      =  str  : path to location of model directories)

    Notes
    -----
    timeused gets reset when a model is resumed,
        resulting in unreliable values in efficiency
    """
    def progress_string(batch, basename, run, progress, elapsed, remaining,
                        eff_str, eff2_str):
        string = [
            f'{batch}    {basename}{run:02}  {progress:.0f}%   ' +
            f'{elapsed:.0f}hrs     ~{remaining:.0f}hrs,    ' +
            f'{eff_str},    {eff2_str}'
        ]
        return string

    def shorthand(string):
        map_ = {
            'a': 'all',
            'ns': 'not_started',
            'nf': 'not_finished',
            'f': 'finished'
        }
        if string not in map_:
            if string not in map_.values():
                raise ValueError("invalid 'show' parameter")
            return string
        else:
            return map_[string]

    source = grid_strings.source_shorthand(source=source)
    show = shorthand(show)
    batches = expand_batches(batches=batches, source=source)

    print_strings = []
    print_idx = {
        'finished': [],
        'not_finished': [],
        'started': [],
        'not_started': []
    }
    for batch in batches:
        n_runs = get_nruns(batch=batch, source=source)
        print_strings += [f'===== Batch {batch} =====']

        for run in range(1, n_runs + 1):
            run_str = grid_strings.get_run_string(run, basename)
            run_path = grid_strings.get_model_path(run,
                                                   batch,
                                                   source,
                                                   basename=basename)
            string_idx = len(print_strings)

            filename = f'{run_str}{extension}'
            filepath = os.path.join(run_path, filename)

            # ===== get t_end from cmd file =====
            cmd_file = f'{run_str}.cmd'
            cmd_filepath = os.path.join(run_path, cmd_file)

            t_end = None
            try:
                with open(cmd_filepath) as f:
                    lines = f.readlines()

                marker = '@time>'
                for line in lines[-10:]:
                    if marker in line:
                        t_end = float(line.strip('@time>').strip())
                        break

                kmodel = kepdump.load(filepath)
                progress = kmodel.time / t_end
                timeused = kmodel.timeused[0][-1]  # CPU time elapsed
                ncyc = kmodel.ncyc  # No. of time-steps
                remaining = (timeused / 3600) * (1 - progress) / progress

                if efficiency:
                    eff = (timeused / (ncyc / 1e4)) / 3600  # Time per 1e4 cyc
                    eff2 = timeused / kmodel.time
                    eff_str = f'{eff:.1f} hr/10Kcyc'
                    eff2_str = f'{eff2:.2f} walltime/modeltime'
                else:
                    eff_str = ''
                    eff2_str = ''

                # ===== Tracking model progress =====
                print_idx['started'] += [string_idx]

                if f'{remaining:.0f}' == '0':
                    print_idx['finished'] += [string_idx]
                else:
                    print_idx['not_finished'] += [string_idx]
            except:
                progress = 0
                timeused = 0
                remaining = 0
                eff_str = ''
                eff2_str = ''

                print_idx['not_started'] += [string_idx]

            progress *= 100
            elapsed = timeused / 3600
            print_strings += progress_string(batch=batch,
                                             basename=basename,
                                             run=run,
                                             progress=progress,
                                             elapsed=elapsed,
                                             remaining=remaining,
                                             eff_str=eff_str,
                                             eff2_str=eff2_str)

    print_idx['all'] = np.arange(len(print_strings))

    print_dashes()
    print('Batch  Model       elapsed  remaining')
    for i, string in enumerate(print_strings):
        if i in print_idx[show]:
            print(string)
Ejemplo n.º 6
0
def create_batch(batch, dv, source,
                 params={'x': [0.6, 0.8], 'z': [0.01, 0.02],
                         'tshift': [0.0], 'accrate': [0.05],
                         'qb': [0.125], 'acc_mult': [1.0], 'qnuc': [5.0],
                         'qb_delay': [0.0], 'mass': [1.4],
                         'accmass': [1e16], 'accdepth': [1e20]},
                 lburn=1, t_end=1.3e5, exclude={}, basename='xrb',
                 walltime=96, qos='normal', nstop=10000000, nsdump=500,
                 auto_t_end=True, notes='No notes given', debug=False,
                 nbursts=20, parallel=False, ntasks=8, kgrid=None,
                 nuc_heat=True, setup_test=False, predict_qnuc=False,
                 grid_version=None, qnuc_source='heat', minzone=51,
                 zonermax=10, zonermin=-1, thickfac=0.001,
                 substrate='fe54', substrate_off=True, adapnet_filename=None,
                 bdat_filename=None, ibdatov=1, params_full=None):
    """Generates a grid of Kepler models, containing n models over the range x

    Parameters
    ---------
    batch : int
    params : {}
        specifiy model parameters. If variable: give range
    dv : {}
        stepsize in variables (if ==-1: keep param as is)
    exclude : {}
        specify any parameter values to exclude from grid
    params : {}
        mass of NS (in Msun). Only changes geemult (gravity multiplier)
    qos : str
        quality of service (slurm), one of ['general', 'medium', 'short']
    auto_t_end : bool
        auto-choose t_end based on predicted recurrence time
    parallel : bool
        utilise parallel independent kepler tasks
    ntasks : int
        no. of tasks in each parallel job (split up by this)
    kgrid : Kgrid
        pre-loaded Kgrid object, optional (avoids reloading)
    """
    # TODO: WRITE ALL PARAM DESCRIPTIONS
    # TODO: set default values for params
    # TODO: Overhaul/tidy up
    # TODO: use pd table instead of dicts of arrays
    source = grid_strings.source_shorthand(source=source)
    mass_ref = 1.4  # reference NS mass (Msun)
    radius_ref = 10  # default NS radius (km)

    print_batch(batch=batch, source=source)

    if params_full is None:
        params = dict(params)
        params_expanded, var = expand_params(dv, params)

        # ===== Cut out any excluded values =====
        cut_params(params=params_expanded, exclude=exclude)
        print_grid_params(params_expanded)

        params_full = grid_tools.enumerate_params(params_expanded)

    n_models = len(params_full['x'])

    if parallel and (n_models % ntasks != 0):
        raise ValueError(f'n_models ({n_models}) not divisible by ntasks ({ntasks})')

    if kgrid is None:
        print('No kgrid provided. Loading:')
        kgrid = grid_analyser.Kgrid(load_lc=False, source=source)

    params_full['y'] = 1 - params_full['x'] - params_full['z']  # helium-4 values
    params_full['geemult'] = params_full['mass'] / mass_ref  # Gravity multiplier

    gravities = gravity.get_acceleration_newtonian(r=radius_ref,
                                                   m=params_full['mass']).value
    params_full['radius'] = np.full(n_models, radius_ref)
    params_full['gravity'] = gravities

    # TODO: rewrite properly (use tables)
    if predict_qnuc:
        if len(params['qnuc']) > 1:
            raise ValueError('Cannot provide multiple "qnuc" in params if predict_qnuc=True')

        linr_qnuc = qnuc_tools.linregress_qnuc(qnuc_source, grid_version=grid_version)
        for i in range(n_models):
            params_qnuc = {}
            for param in param_list:
                params_qnuc[param] = params_full[param][i]
            params_full['qnuc'][i] = qnuc_tools.predict_qnuc(params=params_qnuc,
                                                             source=qnuc_source,
                                                             linr_table=linr_qnuc)

    # ===== Create top grid folder =====
    batch_model_path = grid_strings.get_batch_models_path(batch, source)
    grid_tools.try_mkdir(batch_model_path)

    # Directory to keep MonARCH logs and sbatch files
    logpath = grid_strings.get_source_subdir(batch_model_path, 'logs')
    grid_tools.try_mkdir(logpath)

    # ===== Write parameter table MODELS.txt and NOTES.txt=====
    write_model_table(n=n_models, params=params_full, lburn=lburn, path=batch_model_path)
    filepath = os.path.join(batch_model_path, 'NOTES.txt')
    with open(filepath, 'w') as f:
        f.write(notes)

    job_runs = []
    if parallel:
        n_jobs = int(n_models / ntasks)
        for i in range(n_jobs):
            start = i * ntasks
            job_runs += [[start + 1, start + ntasks]]
    else:
        job_runs += [[1, n_models]]

    print_dashes()
    for runs in job_runs:
        for restart in [True, False]:
            kepler_jobscripts.write_submission_script(run0=runs[0], run1=runs[1],
                                                      restart=restart, batch=batch,
                                                      source=source, basename=basename,
                                                      path=logpath, qos=qos,
                                                      walltime=walltime,
                                                      parallel=parallel, debug=debug,
                                                      adapnet_filename=adapnet_filename,
                                                      bdat_filename=bdat_filename)

    # ===== Directories and templates for each model =====
    for i in range(n_models):
        # ==== Create directory tree ====
        print_dashes()
        model = i + 1
        run_path = grid_strings.get_model_path(model, batch, source, basename=basename)

        # ==== Create task directory ====
        grid_tools.try_mkdir(run_path)

        # ==== Write burn file, set initial composition ====
        x0 = params_full['x'][i]
        z0 = params_full['z'][i]
        kepler_files.write_rpabg(x0, z0, run_path, substrate=substrate)

        # ==== Create model generator file ====
        accrate0 = params_full['accrate'][i]

        if auto_t_end:
            mdot = params_full['accrate'][i] * params_full['acc_mult'][i]
            rate_params = {}
            for param in ('x', 'z', 'qb', 'mass'):
                rate_params[param] = params_full[param][i]
            fudge = 0.5  # extra time to ensure complete final burst
            tdel = kgrid.predict_recurrence(accrate=mdot, params=rate_params)
            t_end = (nbursts + fudge) * tdel
            print(f'Using predicted dt={tdel/3600:.1f} hr')
            if t_end < 0:
                print('WARN! negative dt predicted. Defaulting n * 1.5hr')
                t_end =nbursts * 1.5 * 3600

        run = i + 1
        print(f'Writing genfile for xrb{run}')
        header = f'This generator belongs to model: {source}_{batch}/{basename}{run}'

        accdepth = params_full['accdepth'][i]
        if (params_full['x'][i] > 0.0) and (accdepth > 1e20):
            print(f"!!!WARNING!!!: accdepth of {accdepth:.0e} may be too deep for" +
                  " models accreting hydrogen")
        print(f'Using accdepth = {accdepth:.1e}')

        kepler_files.write_genfile(h1=params_full['x'][i], he4=params_full['y'][i],
                                   n14=params_full['z'][i], qb=params_full['qb'][i],
                                   acc_mult=params_full['acc_mult'][i], qnuc=params_full['qnuc'][i],
                                   lburn=lburn, geemult=params_full['geemult'][i],
                                   path=run_path, t_end=t_end, header=header,
                                   accrate0=accrate0, accdepth=accdepth,
                                   accmass=params_full['accmass'][i],
                                   nsdump=nsdump, nstop=nstop,
                                   nuc_heat=nuc_heat, setup_test=setup_test, cnv=0,
                                   minzone=minzone, zonermax=zonermax, zonermin=zonermin,
                                   thickfac=thickfac, substrate_off=substrate_off,
                                   ibdatov=ibdatov)
Ejemplo n.º 7
0
def create_batch(batch,
                 source,
                 params,
                 dv,
                 t_end=1.3e5,
                 exclude=None,
                 basename='xrb',
                 walltime=96,
                 auto_t_end=True,
                 notes='No notes given',
                 nbursts=20,
                 kgrid=None,
                 nuc_heat=True,
                 setup_test=False,
                 auto_qnuc=False,
                 grid_version=None,
                 qnuc_source='heat',
                 substrate='fe54',
                 substrate_off=True,
                 adapnet_filename=None,
                 bdat_filename=None,
                 params_full=None,
                 numerical_params=None,
                 scratch_file_sys=False):
    """Generates a grid of Kepler models, containing n models over the range x

    Parameters
    ---------
    batch : int
    source : str
    params : {}
        specifiy model parameters. If variable: give range
    dv : {}
        stepsize in variables (if ==-1: keep param as is)
    exclude : {}
        specify any parameter values to exclude from grid
    params : {}
        mass of NS (in Msun). Only changes geemult (gravity multiplier)
    auto_t_end : bool
        auto-choose t_end based on predicted recurrence time
    kgrid : Kgrid
        pre-loaded Kgrid object, optional (avoids reloading)
    t_end : float (optional)
    basename : str (optional)
    walltime : int (optional)
    notes : str (optional)
    nbursts : int (optional)
    auto_qnuc : bool (optional)
    nuc_heat : bool (optional)
    setup_test : bool (optional)
    grid_version : int (optional)
    qnuc_source : str (optional)
    substrate : str (optional)
    substrate_off : bool (optional)
    adapnet_filename : str (optional)
    bdat_filename : str (optional)
    params_full : {} (optional)
    numerical_params : {} (optional)
        Overwrite default numerical kepler parameters (e.g. nsdump, zonermax, lburn,
        For all parameters: see 'numerical_params' in config/default.ini
    scratch_file_sys : bool (optional)
        whether to use the scratch file system on ICER cluster
    """
    # TODO:
    #   - WRITE ALL PARAM DESCRIPTIONS
    #   - Overhaul/tidy up
    #   - use pd table instead of dicts of arrays

    print_batch(batch=batch)
    source = grid_strings.source_shorthand(source=source)
    mass_ref = 1.4  # reference NS mass (Msun)
    radius_ref = 10  # default NS radius (km)

    specified = {
        'params': params,
        'dv': dv,
        'numerical_params': numerical_params
    }

    if specified['numerical_params'] is None:
        specified['numerical_params'] = {}

    config = grid_tools.setup_config(specified=specified, source=source)
    # TODO: print numerical_params being used

    if params_full is None:
        params_expanded, var = expand_params(params=config['params'],
                                             dv=config['dv'])
        params_full = exclude_params(params_expanded=params_expanded,
                                     exclude=exclude)

    n_models = len(params_full['x'])

    if kgrid is None:
        print('No kgrid provided. Loading default:')
        kgrid = grid_analyser.Kgrid(load_lc=False,
                                    linregress_burst_rate=True,
                                    source=source)

    params_full['y'] = 1 - params_full['x'] - params_full[
        'z']  # helium-4 values
    params_full[
        'geemult'] = params_full['mass'] / mass_ref  # Gravity multiplier

    gravities = gravity.get_acceleration_newtonian(r=radius_ref,
                                                   m=params_full['mass']).value
    params_full['radius'] = np.full(n_models, radius_ref)
    params_full['gravity'] = gravities

    if auto_qnuc:
        predict_qnuc(params_full=params_full,
                     qnuc_source=qnuc_source,
                     grid_version=grid_version)

    # ===== Create top grid folder =====
    batch_model_path = grid_strings.get_batch_models_path(batch, source)
    grid_tools.try_mkdir(batch_model_path)

    # Directory to keep MonARCH logs and sbatch files
    logpath = grid_strings.get_source_subdir(batch_model_path, 'logs')
    grid_tools.try_mkdir(logpath)

    # ===== Write parameter table MODELS.txt and NOTES.txt=====
    write_model_table(n=n_models, params=params_full, path=batch_model_path)
    filepath = os.path.join(batch_model_path, 'NOTES.txt')
    with open(filepath, 'w') as f:
        f.write(notes)

    # ===== Write jobscripts for submission on clusters =====
    print_dashes()
    kepler_jobs.write_both_jobscripts(run0=1,
                                      run1=n_models,
                                      batch=batch,
                                      source=source,
                                      basename=basename,
                                      path=logpath,
                                      walltime=walltime,
                                      adapnet_filename=adapnet_filename,
                                      bdat_filename=bdat_filename,
                                      scratch_file_sys=scratch_file_sys)

    # ===== Directories and templates for each model =====
    for i in range(n_models):
        # ==== Create directory tree ====
        print_dashes()
        model = i + 1
        run_path = grid_strings.get_model_path(model,
                                               batch,
                                               source,
                                               basename=basename)

        # ==== Create task directory ====
        grid_tools.try_mkdir(run_path)

        # ==== Write burn file, set initial composition ====
        x0 = params_full['x'][i]
        z0 = params_full['z'][i]
        kepler_files.write_rpabg(x0, z0, run_path, substrate=substrate)

        # ==== Create model generator file ====
        if auto_t_end:
            mdot = params_full['accrate'][i] * params_full['acc_mult'][i]
            rate_params = {}
            for param in ('x', 'z', 'qb', 'mass'):
                rate_params[param] = params_full[param][i]
            fudge = 0.5  # extra time to ensure complete final burst
            tdel = kgrid.predict_recurrence(accrate=mdot, params=rate_params)
            t_end = (nbursts + fudge) * tdel
            print(f'Using predicted dt={tdel/3600:.1f} hr')
            if t_end < 0:
                print('WARN! negative dt predicted. Defaulting n * 1.5hr')
                t_end = nbursts * 1.5 * 3600

        run = i + 1
        print(f'Writing genfile for xrb{run}')
        header = f'This generator belongs to model: {source}_{batch}/{basename}{run}'

        accdepth = params_full['accdepth'][i]
        if (params_full['x'][i] > 0.0) and (accdepth > 1e20):
            print(
                f"!!!WARNING!!!: accdepth of {accdepth:.0e} may be too deep for"
                + " models accreting hydrogen")
        print(f'Using accdepth = {accdepth:.1e}')

        kepler_files.write_genfile(h1=params_full['x'][i],
                                   he4=params_full['y'][i],
                                   n14=params_full['z'][i],
                                   qb=params_full['qb'][i],
                                   acc_mult=params_full['acc_mult'][i],
                                   qnuc=params_full['qnuc'][i],
                                   geemult=params_full['geemult'][i],
                                   accrate0=params_full['accrate'][i],
                                   accmass=params_full['accmass'][i],
                                   accdepth=params_full['accdepth'][i],
                                   path=run_path,
                                   t_end=t_end,
                                   header=header,
                                   nuc_heat=nuc_heat,
                                   setup_test=setup_test,
                                   substrate_off=substrate_off,
                                   numerical_params=config['numerical_params'])