Exemplo n.º 1
0
def combine_tables(source,
                   burst_analyser=True,
                   add_radius=True,
                   radius=10,
                   add_gravity=True):
    """Combines summ and params tables
    """
    param_table = load_grid_table('params', source=source)
    summ_table = load_grid_table('summ',
                                 source=source,
                                 burst_analyser=burst_analyser)

    if len(param_table) != len(summ_table):
        raise RuntimeError('param and summ tables are different lengths')

    if add_radius:
        param_table['radius'] = radius
    if add_gravity:
        masses = np.array(param_table['mass'])
        radii = np.array(param_table['radius'])
        gravities = gravity.get_acceleration_newtonian(r=radii, m=masses)
        param_table['gravity'] = gravities.value

    print('Combining summ and params tables')
    summ_table.drop(['batch', 'run'], axis=1, inplace=True)
    combined_table = pd.concat([param_table, summ_table], axis=1)

    path = grid_strings.get_source_path(source)
    filename = f'grid_table_{source}.txt'
    filepath = os.path.join(path, filename)
    write_pandas_table(combined_table, filepath)
Exemplo n.º 2
0
def plot_posteriors(chain=None, discard=10000):
    if chain is None:
        chain = mcmc_tools.load_chain('sim_test',
                                      n_walkers=960,
                                      n_steps=20000,
                                      version=5)
    params = [
        r'Accretion rate ($\dot{M} / \dot{M}_\text{Edd}$)', 'Hydrogen',
        r'$Z_{\text{CNO}}$', r'$Q_\text{b}$ (MeV nucleon$^{-1}$)',
        'gravity ($10^{14}$ cm s$^{-2}$)', 'redshift (1+z)', 'distance (kpc)',
        'inclination (degrees)'
    ]

    g = gravity.get_acceleration_newtonian(10, 1.4).value / 1e14
    chain[:, :, 4] *= g

    cc = chainconsumer.ChainConsumer()
    cc.add_chain(chain[:, discard:, :].reshape((-1, 8)))
    cc.configure(kde=False, smooth=0)

    fig = cc.plotter.plot_distributions(display=True)

    for i, p in enumerate(params):
        fig.axes[i].set_title('')
        fig.axes[i].set_xlabel(p)  #, fontsize=10)

    plt.tight_layout()
    return fig
Exemplo n.º 3
0
def get_mass_radius(chain, discard, source, version, cap=None):
    """Returns GR mass and radius given a chain containing gravity and redshift

    Returns ndarray of equivalent form to input chain (after slicing discard/cap)
    """
    ref_mass = 1.4
    ref_radius = 10

    chain = mcmc_tools.slice_chain(chain, discard=discard, cap=cap)
    n_walkers, n_steps, n_dimensions = chain.shape
    chain_flat = chain.reshape((-1, n_dimensions))
    pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')

    if 'm_gr' in pkeys:
        mass_nw = ref_mass * chain_flat[:, pkeys.index('g')]
        mass_gr = chain_flat[:, pkeys.index('m_gr')]
        m_ratio = mass_gr / mass_nw
        xi = gravity.gr_corrections(r=ref_radius, m=mass_nw, phi=m_ratio)[0]
        radius_gr = ref_radius * xi
    else:
        redshift = chain_flat[:, pkeys.index('redshift')]
        g_reference = gravity.get_acceleration_newtonian(r=ref_radius,
                                                         m=ref_mass)
        g = chain_flat[:, pkeys.index('g')] * g_reference
        mass_gr, radius_gr = gravity.get_mass_radius(g=g, redshift=redshift)
        mass_gr = mass_gr.value
        radius_gr = radius_gr.value

    # reshape back into chain
    new_shape = (n_walkers, n_steps)
    mass_reshape = mass_gr.reshape(new_shape)
    radius_reshape = radius_gr.reshape(new_shape)

    return np.dstack((mass_reshape, radius_reshape))
Exemplo n.º 4
0
def get_gravity_chain(chain, discard, source, version, cap=None, r_nw=10):
    """Returns flat chain of surface gravity (g) samples for a given MCMC chain
        Note: returns in units of 1e14 cm/s^2
    """
    mass_nw_chain = get_param_chain(chain, param='m_nw', discard=discard,
                                    source=source, version=version, cap=cap)
    g = gravity.get_acceleration_newtonian(r=r_nw, m=mass_nw_chain)
    return g.value/1e14
Exemplo n.º 5
0
def setup_master_chainconsumer(source, master_version, epoch_versions, n_steps, discard,
                               n_walkers=1000, epoch_discard=None, epoch_n_steps=None,
                               epoch_n_walkers=None, cap=None, sigmas=None, cloud=None,
                               compressed=False, fontsize=16, alt_params=True,
                               unit_labels=True):
    """Setup multiple MCMC chains, including multi-epoch and single-epochs

    alt_params : bool
        Replace parameters with forms used in paper
    """
    if epoch_discard is None:
        epoch_discard = discard
    if epoch_n_steps is None:
        epoch_n_steps = n_steps
    if epoch_n_walkers is None:
        epoch_n_walkers = n_walkers

    cc = setup_epochs_chainconsumer(source, versions=epoch_versions, n_steps=epoch_n_steps,
                                    discard=epoch_discard, n_walkers=epoch_n_walkers,
                                    cap=cap, sigmas=sigmas, cloud=cloud, compressed=False,
                                    alt_params=alt_params, unit_labels=unit_labels)

    # ===== Setup master chain =====
    master_mc_v = mcmc_versions.McmcVersion(source, version=master_version)

    master_chain = load_chain(source, version=master_version, n_steps=n_steps,
                              n_walkers=n_walkers, compressed=compressed)
    master_chain_sliced = slice_chain(master_chain, discard=discard, cap=cap,
                                      flatten=True)

    params = list(master_mc_v.param_keys)

    #  TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    #       quick and dirty patch! To fix
    ref_m = 1.4
    ref_g = gravity.get_acceleration_newtonian(r=10, m=ref_m).value / 1e14
    g_idx = 8
    m_idx = 9

    if alt_params:
        master_chain_sliced[:, g_idx] *= ref_g / ref_m
        params[g_idx] = 'g'
        params[m_idx] = 'M'
    #  TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    formatted_params = plot_tools.convert_mcmc_labels(params, unit_labels=unit_labels)
    cc.add_chain(master_chain_sliced, parameters=formatted_params, color='black',
                 name='Multi-epoch')
    cc.configure(sigmas=sigmas, cloud=cloud, kde=False, smooth=False,
                 label_font_size=fontsize, tick_font_size=fontsize-2)

    return cc
Exemplo n.º 6
0
def get_mass_radius_point(params, source, version):
    """Returns the mass, radius for a single walker point
    """
    ref_mass = 1.4
    ref_radius = 10
    g_reference = gravity.get_acceleration_newtonian(r=ref_radius, m=ref_mass)

    pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')

    redshift = params[pkeys.index('redshift')]
    g = params[pkeys.index('g')] * g_reference
    mass, radius = gravity.get_mass_radius(g=g, redshift=redshift)
    return mass.value, radius.value
Exemplo n.º 7
0
def setup_epochs_chainconsumer(source, versions, n_steps, discard, n_walkers=1000,
                               cap=None, sigmas=None, cloud=None, compressed=False,
                               alt_params=True, unit_labels=True):
    """Setup multiple MCMC chains fit to individual epochs

    chains : [n_epochs]
        list of raw numpy chains
    param_keys : [n_epochs]
        list of parameters for each epoch chain
    discard : int
    cap : int (optional)
    sigmas : [] (optional)
    cloud : bool (optional)
    """
    param_keys = load_multi_param_keys(source, versions=versions)
    chains = load_multi_chains(source, versions=versions, n_steps=n_steps,
                               n_walkers=n_walkers, compressed=compressed)

    #  TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    #       quick and dirty patch! To fix
    ref_m = 1.4
    ref_g = gravity.get_acceleration_newtonian(r=10, m=ref_m).value / 1e14
    g_idx = 4
    m_idx = 5
    if alt_params:
        for params in param_keys:
            params[g_idx] = 'g'
            params[m_idx] = 'M'
    #  TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    chains_flat = []
    for chain in chains:
        sliced_flat = slice_chain(chain, discard=discard, cap=cap, flatten=True)

        # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
        if alt_params:
            sliced_flat[:, g_idx] *= ref_g / ref_m
        # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

        chains_flat += [sliced_flat]

    cc = chainconsumer.ChainConsumer()

    for i, chain_flat in enumerate(chains_flat):
        epoch = mcmc_versions.get_parameter(source, version=versions[i], parameter='epoch')
        param_labels = plot_tools.convert_mcmc_labels(param_keys[i],
                                                      unit_labels=unit_labels)
        cc.add_chain(chain_flat, parameters=param_labels, name=str(epoch))

    cc.configure(sigmas=sigmas, cloud=cloud, kde=False, smooth=0)
    return cc
Exemplo n.º 8
0
        def gr_factors():
            mass_nw = self.reference_mass * params[self.param_idxs['g']]

            if self.has_m_gr:
                mass_gr = params[self.param_idxs['m_gr']]
                m_ratio = mass_gr / mass_nw
                red = gravity.gr_corrections(r=self.reference_radius, m=mass_nw,
                                             phi=m_ratio)[1]
            else:
                red = params[self.param_idxs['redshift']]
                g_nw = gravity.get_acceleration_newtonian(r=self.reference_radius, m=mass_nw)
                mass_gr = gravity.mass(g=g_nw, redshift=red).value
                m_ratio = mass_gr / mass_nw

            return m_ratio, red
Exemplo n.º 9
0
def get_burstfit_params(params, r_nw=10):
    """Converts Duncan's params to those for input to BurstFit
    """
    out_params = dict()

    xi_b, xi_p = anisotropy_tools.anisotropy.anisotropy(params['inclination'] *
                                                        units.deg)
    g = gravity.get_acceleration_newtonian(r=r_nw, m=params['mass'])
    m_gr, r_gr = gravity.get_mass_radius(g, redshift=params['redshift'])

    out_params['d_b'] = params['d'] * np.sqrt(xi_b)
    out_params['m_nw'] = params['mass']
    out_params['m_gr'] = m_gr.value
    out_params['xi_ratio'] = xi_p / xi_b

    return out_params
Exemplo n.º 10
0
def create_batch(batch, dv, source,
                 params={'x': [0.6, 0.8], 'z': [0.01, 0.02],
                         'tshift': [0.0], 'accrate': [0.05],
                         'qb': [0.125], 'acc_mult': [1.0], 'qnuc': [5.0],
                         'qb_delay': [0.0], 'mass': [1.4],
                         'accmass': [1e16], 'accdepth': [1e20]},
                 lburn=1, t_end=1.3e5, exclude={}, basename='xrb',
                 walltime=96, qos='normal', nstop=10000000, nsdump=500,
                 auto_t_end=True, notes='No notes given', debug=False,
                 nbursts=20, parallel=False, ntasks=8, kgrid=None,
                 nuc_heat=True, setup_test=False, predict_qnuc=False,
                 grid_version=None, qnuc_source='heat', minzone=51,
                 zonermax=10, zonermin=-1, thickfac=0.001,
                 substrate='fe54', substrate_off=True, adapnet_filename=None,
                 bdat_filename=None, ibdatov=1, params_full=None):
    """Generates a grid of Kepler models, containing n models over the range x

    Parameters
    ---------
    batch : int
    params : {}
        specifiy model parameters. If variable: give range
    dv : {}
        stepsize in variables (if ==-1: keep param as is)
    exclude : {}
        specify any parameter values to exclude from grid
    params : {}
        mass of NS (in Msun). Only changes geemult (gravity multiplier)
    qos : str
        quality of service (slurm), one of ['general', 'medium', 'short']
    auto_t_end : bool
        auto-choose t_end based on predicted recurrence time
    parallel : bool
        utilise parallel independent kepler tasks
    ntasks : int
        no. of tasks in each parallel job (split up by this)
    kgrid : Kgrid
        pre-loaded Kgrid object, optional (avoids reloading)
    """
    # TODO: WRITE ALL PARAM DESCRIPTIONS
    # TODO: set default values for params
    # TODO: Overhaul/tidy up
    # TODO: use pd table instead of dicts of arrays
    source = grid_strings.source_shorthand(source=source)
    mass_ref = 1.4  # reference NS mass (Msun)
    radius_ref = 10  # default NS radius (km)

    print_batch(batch=batch, source=source)

    if params_full is None:
        params = dict(params)
        params_expanded, var = expand_params(dv, params)

        # ===== Cut out any excluded values =====
        cut_params(params=params_expanded, exclude=exclude)
        print_grid_params(params_expanded)

        params_full = grid_tools.enumerate_params(params_expanded)

    n_models = len(params_full['x'])

    if parallel and (n_models % ntasks != 0):
        raise ValueError(f'n_models ({n_models}) not divisible by ntasks ({ntasks})')

    if kgrid is None:
        print('No kgrid provided. Loading:')
        kgrid = grid_analyser.Kgrid(load_lc=False, source=source)

    params_full['y'] = 1 - params_full['x'] - params_full['z']  # helium-4 values
    params_full['geemult'] = params_full['mass'] / mass_ref  # Gravity multiplier

    gravities = gravity.get_acceleration_newtonian(r=radius_ref,
                                                   m=params_full['mass']).value
    params_full['radius'] = np.full(n_models, radius_ref)
    params_full['gravity'] = gravities

    # TODO: rewrite properly (use tables)
    if predict_qnuc:
        if len(params['qnuc']) > 1:
            raise ValueError('Cannot provide multiple "qnuc" in params if predict_qnuc=True')

        linr_qnuc = qnuc_tools.linregress_qnuc(qnuc_source, grid_version=grid_version)
        for i in range(n_models):
            params_qnuc = {}
            for param in param_list:
                params_qnuc[param] = params_full[param][i]
            params_full['qnuc'][i] = qnuc_tools.predict_qnuc(params=params_qnuc,
                                                             source=qnuc_source,
                                                             linr_table=linr_qnuc)

    # ===== Create top grid folder =====
    batch_model_path = grid_strings.get_batch_models_path(batch, source)
    grid_tools.try_mkdir(batch_model_path)

    # Directory to keep MonARCH logs and sbatch files
    logpath = grid_strings.get_source_subdir(batch_model_path, 'logs')
    grid_tools.try_mkdir(logpath)

    # ===== Write parameter table MODELS.txt and NOTES.txt=====
    write_model_table(n=n_models, params=params_full, lburn=lburn, path=batch_model_path)
    filepath = os.path.join(batch_model_path, 'NOTES.txt')
    with open(filepath, 'w') as f:
        f.write(notes)

    job_runs = []
    if parallel:
        n_jobs = int(n_models / ntasks)
        for i in range(n_jobs):
            start = i * ntasks
            job_runs += [[start + 1, start + ntasks]]
    else:
        job_runs += [[1, n_models]]

    print_dashes()
    for runs in job_runs:
        for restart in [True, False]:
            kepler_jobscripts.write_submission_script(run0=runs[0], run1=runs[1],
                                                      restart=restart, batch=batch,
                                                      source=source, basename=basename,
                                                      path=logpath, qos=qos,
                                                      walltime=walltime,
                                                      parallel=parallel, debug=debug,
                                                      adapnet_filename=adapnet_filename,
                                                      bdat_filename=bdat_filename)

    # ===== Directories and templates for each model =====
    for i in range(n_models):
        # ==== Create directory tree ====
        print_dashes()
        model = i + 1
        run_path = grid_strings.get_model_path(model, batch, source, basename=basename)

        # ==== Create task directory ====
        grid_tools.try_mkdir(run_path)

        # ==== Write burn file, set initial composition ====
        x0 = params_full['x'][i]
        z0 = params_full['z'][i]
        kepler_files.write_rpabg(x0, z0, run_path, substrate=substrate)

        # ==== Create model generator file ====
        accrate0 = params_full['accrate'][i]

        if auto_t_end:
            mdot = params_full['accrate'][i] * params_full['acc_mult'][i]
            rate_params = {}
            for param in ('x', 'z', 'qb', 'mass'):
                rate_params[param] = params_full[param][i]
            fudge = 0.5  # extra time to ensure complete final burst
            tdel = kgrid.predict_recurrence(accrate=mdot, params=rate_params)
            t_end = (nbursts + fudge) * tdel
            print(f'Using predicted dt={tdel/3600:.1f} hr')
            if t_end < 0:
                print('WARN! negative dt predicted. Defaulting n * 1.5hr')
                t_end =nbursts * 1.5 * 3600

        run = i + 1
        print(f'Writing genfile for xrb{run}')
        header = f'This generator belongs to model: {source}_{batch}/{basename}{run}'

        accdepth = params_full['accdepth'][i]
        if (params_full['x'][i] > 0.0) and (accdepth > 1e20):
            print(f"!!!WARNING!!!: accdepth of {accdepth:.0e} may be too deep for" +
                  " models accreting hydrogen")
        print(f'Using accdepth = {accdepth:.1e}')

        kepler_files.write_genfile(h1=params_full['x'][i], he4=params_full['y'][i],
                                   n14=params_full['z'][i], qb=params_full['qb'][i],
                                   acc_mult=params_full['acc_mult'][i], qnuc=params_full['qnuc'][i],
                                   lburn=lburn, geemult=params_full['geemult'][i],
                                   path=run_path, t_end=t_end, header=header,
                                   accrate0=accrate0, accdepth=accdepth,
                                   accmass=params_full['accmass'][i],
                                   nsdump=nsdump, nstop=nstop,
                                   nuc_heat=nuc_heat, setup_test=setup_test, cnv=0,
                                   minzone=minzone, zonermax=zonermax, zonermin=zonermin,
                                   thickfac=thickfac, substrate_off=substrate_off,
                                   ibdatov=ibdatov)
Exemplo n.º 11
0
def create_batch(batch,
                 source,
                 params,
                 dv,
                 t_end=1.3e5,
                 exclude=None,
                 basename='xrb',
                 walltime=96,
                 auto_t_end=True,
                 notes='No notes given',
                 nbursts=20,
                 kgrid=None,
                 nuc_heat=True,
                 setup_test=False,
                 auto_qnuc=False,
                 grid_version=None,
                 qnuc_source='heat',
                 substrate='fe54',
                 substrate_off=True,
                 adapnet_filename=None,
                 bdat_filename=None,
                 params_full=None,
                 numerical_params=None,
                 scratch_file_sys=False):
    """Generates a grid of Kepler models, containing n models over the range x

    Parameters
    ---------
    batch : int
    source : str
    params : {}
        specifiy model parameters. If variable: give range
    dv : {}
        stepsize in variables (if ==-1: keep param as is)
    exclude : {}
        specify any parameter values to exclude from grid
    params : {}
        mass of NS (in Msun). Only changes geemult (gravity multiplier)
    auto_t_end : bool
        auto-choose t_end based on predicted recurrence time
    kgrid : Kgrid
        pre-loaded Kgrid object, optional (avoids reloading)
    t_end : float (optional)
    basename : str (optional)
    walltime : int (optional)
    notes : str (optional)
    nbursts : int (optional)
    auto_qnuc : bool (optional)
    nuc_heat : bool (optional)
    setup_test : bool (optional)
    grid_version : int (optional)
    qnuc_source : str (optional)
    substrate : str (optional)
    substrate_off : bool (optional)
    adapnet_filename : str (optional)
    bdat_filename : str (optional)
    params_full : {} (optional)
    numerical_params : {} (optional)
        Overwrite default numerical kepler parameters (e.g. nsdump, zonermax, lburn,
        For all parameters: see 'numerical_params' in config/default.ini
    scratch_file_sys : bool (optional)
        whether to use the scratch file system on ICER cluster
    """
    # TODO:
    #   - WRITE ALL PARAM DESCRIPTIONS
    #   - Overhaul/tidy up
    #   - use pd table instead of dicts of arrays

    print_batch(batch=batch)
    source = grid_strings.source_shorthand(source=source)
    mass_ref = 1.4  # reference NS mass (Msun)
    radius_ref = 10  # default NS radius (km)

    specified = {
        'params': params,
        'dv': dv,
        'numerical_params': numerical_params
    }

    if specified['numerical_params'] is None:
        specified['numerical_params'] = {}

    config = grid_tools.setup_config(specified=specified, source=source)
    # TODO: print numerical_params being used

    if params_full is None:
        params_expanded, var = expand_params(params=config['params'],
                                             dv=config['dv'])
        params_full = exclude_params(params_expanded=params_expanded,
                                     exclude=exclude)

    n_models = len(params_full['x'])

    if kgrid is None:
        print('No kgrid provided. Loading default:')
        kgrid = grid_analyser.Kgrid(load_lc=False,
                                    linregress_burst_rate=True,
                                    source=source)

    params_full['y'] = 1 - params_full['x'] - params_full[
        'z']  # helium-4 values
    params_full[
        'geemult'] = params_full['mass'] / mass_ref  # Gravity multiplier

    gravities = gravity.get_acceleration_newtonian(r=radius_ref,
                                                   m=params_full['mass']).value
    params_full['radius'] = np.full(n_models, radius_ref)
    params_full['gravity'] = gravities

    if auto_qnuc:
        predict_qnuc(params_full=params_full,
                     qnuc_source=qnuc_source,
                     grid_version=grid_version)

    # ===== Create top grid folder =====
    batch_model_path = grid_strings.get_batch_models_path(batch, source)
    grid_tools.try_mkdir(batch_model_path)

    # Directory to keep MonARCH logs and sbatch files
    logpath = grid_strings.get_source_subdir(batch_model_path, 'logs')
    grid_tools.try_mkdir(logpath)

    # ===== Write parameter table MODELS.txt and NOTES.txt=====
    write_model_table(n=n_models, params=params_full, path=batch_model_path)
    filepath = os.path.join(batch_model_path, 'NOTES.txt')
    with open(filepath, 'w') as f:
        f.write(notes)

    # ===== Write jobscripts for submission on clusters =====
    print_dashes()
    kepler_jobs.write_both_jobscripts(run0=1,
                                      run1=n_models,
                                      batch=batch,
                                      source=source,
                                      basename=basename,
                                      path=logpath,
                                      walltime=walltime,
                                      adapnet_filename=adapnet_filename,
                                      bdat_filename=bdat_filename,
                                      scratch_file_sys=scratch_file_sys)

    # ===== Directories and templates for each model =====
    for i in range(n_models):
        # ==== Create directory tree ====
        print_dashes()
        model = i + 1
        run_path = grid_strings.get_model_path(model,
                                               batch,
                                               source,
                                               basename=basename)

        # ==== Create task directory ====
        grid_tools.try_mkdir(run_path)

        # ==== Write burn file, set initial composition ====
        x0 = params_full['x'][i]
        z0 = params_full['z'][i]
        kepler_files.write_rpabg(x0, z0, run_path, substrate=substrate)

        # ==== Create model generator file ====
        if auto_t_end:
            mdot = params_full['accrate'][i] * params_full['acc_mult'][i]
            rate_params = {}
            for param in ('x', 'z', 'qb', 'mass'):
                rate_params[param] = params_full[param][i]
            fudge = 0.5  # extra time to ensure complete final burst
            tdel = kgrid.predict_recurrence(accrate=mdot, params=rate_params)
            t_end = (nbursts + fudge) * tdel
            print(f'Using predicted dt={tdel/3600:.1f} hr')
            if t_end < 0:
                print('WARN! negative dt predicted. Defaulting n * 1.5hr')
                t_end = nbursts * 1.5 * 3600

        run = i + 1
        print(f'Writing genfile for xrb{run}')
        header = f'This generator belongs to model: {source}_{batch}/{basename}{run}'

        accdepth = params_full['accdepth'][i]
        if (params_full['x'][i] > 0.0) and (accdepth > 1e20):
            print(
                f"!!!WARNING!!!: accdepth of {accdepth:.0e} may be too deep for"
                + " models accreting hydrogen")
        print(f'Using accdepth = {accdepth:.1e}')

        kepler_files.write_genfile(h1=params_full['x'][i],
                                   he4=params_full['y'][i],
                                   n14=params_full['z'][i],
                                   qb=params_full['qb'][i],
                                   acc_mult=params_full['acc_mult'][i],
                                   qnuc=params_full['qnuc'][i],
                                   geemult=params_full['geemult'][i],
                                   accrate0=params_full['accrate'][i],
                                   accmass=params_full['accmass'][i],
                                   accdepth=params_full['accdepth'][i],
                                   path=run_path,
                                   t_end=t_end,
                                   header=header,
                                   nuc_heat=nuc_heat,
                                   setup_test=setup_test,
                                   substrate_off=substrate_off,
                                   numerical_params=config['numerical_params'])