def _test_can_evaluate_thcompc():
    """Does this redistribute some emission?

    It does not test the result is actualy meaningful, but
    does check it's done something
    """

    ui.clean()

    ui.dataspace1d(0.1, 10, 0.01, id='unconv')
    ui.dataspace1d(0.1, 10, 0.01, id='conv')

    mconv = ui.create_model_component('xsthcompc', 'conv')
    ui.set_source('conv', mconv(ui.xsgaussian.m1))

    m1 = ui.get_model_component('m1')
    ui.set_source('unconv', m1)
    m1.lineE = 5.0
    m1.Sigma = 1.0

    yunconv = ui.get_model_plot('unconv').y.copy()
    yconv = ui.get_model_plot('conv').y.copy()

    assert (yunconv > 0).any()
    assert (yconv > 0).any()

    # not guaranteed the peak will be reduced (depends on what
    # the convolution is doing), and I would hope that flux
    # is at best conserved (ie not created), and that we don't
    # have to worry about numerical artifacts here.
    #
    assert yunconv.max() > yconv.max()
    assert yunconv.sum() >= yconv.sum()
Exemplo n.º 2
0
def test_get_model_plot_energy(idval):
    """Basic testing of get_model_plot: energy
    """

    setup_example(idval)
    if idval is None:
        ui.set_analysis('energy')
        mp = ui.get_model_plot()
    else:
        ui.set_analysis(idval, 'energy')
        mp = ui.get_model_plot(idval)

    assert mp.xlo == pytest.approx(_energies[:-1])
    assert mp.xhi == pytest.approx(_energies[1:])

    # This should be normalized by the bin width, but it is cancelled
    # out by the fact that the model normalization has to be multiplied
    # by the bin width (both in energy).
    #
    yexp = _arf * 1.02e2
    assert mp.y == pytest.approx(yexp)

    assert mp.title == 'Model'
    assert mp.xlabel == 'Energy (keV)'
    assert mp.ylabel == 'Counts/sec/keV'
Exemplo n.º 3
0
def test_get_model_plot(idval):
    """Basic testing of get_model_plot
    """

    setup_example(idval)
    if idval is None:
        mp = ui.get_model_plot()
    else:
        mp = ui.get_model_plot(idval)

    assert isinstance(mp, ModelHistogram)

    assert mp.xlo == pytest.approx(_data_chan)
    assert mp.xhi == pytest.approx(_data_chan + 1)

    # The model is a constant, but integrated across the energy bin,
    # so the energy width is important here to get the normalization
    # right. It should also be divided by the channel width, but in
    # this case each bin has a channel width of 1.
    #
    yexp = _arf * 1.02e2 * (_energies[1:] - _energies[:-1])
    assert mp.y == pytest.approx(yexp)

    assert mp.title == 'Model'
    assert mp.xlabel == 'Channel'
    assert mp.ylabel == 'Counts/sec/channel'
Exemplo n.º 4
0
    def test_cache_copy(self):
        # fake up a PHA data set
        chans = numpy.arange(1, 11, dtype=numpy.int8)
        counts = numpy.ones(chans.size)

        # bin width is not 0.1 but something slightly different
        ebins = numpy.linspace(0.1, 1.2, num=chans.size + 1)
        elo = ebins[:-1]
        ehi = ebins[1:]

        dset = ui.DataPHA('test', chans, counts)

        # make sure ARF isn't 1 to make sure it's being applied
        arf = ui.create_arf(elo, ehi, specresp=0.7 * numpy.ones(chans.size))

        rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi)

        ui.set_data(1, dset)
        ui.set_arf(1, arf)
        ui.set_rmf(1, rmf)

        ui.set_source(ui.const1d.mdl)

        # again not 1
        mdl.c0 = 8


        # Copy the values from the plot structures, since get_xxx_plot
        # returns the same object so m1.y == m2.y will not note a difference.
        #

        d1y = ui.get_data_plot().y.copy()
        m1y = ui.get_model_plot().y.copy()
        s1y = ui.get_source_plot().y.copy()

        d2y = ui.get_data_plot().y.copy()
        m2y = ui.get_model_plot().y.copy()
        s2y = ui.get_source_plot().y.copy()
        rtol = 1.0e-4
        atol = 1.0e-4
        numpy.testing.assert_allclose(d1y, d2y, rtol, atol)
        numpy.testing.assert_allclose(m1y, m2y, rtol, atol)
        numpy.testing.assert_allclose(s1y, s2y, rtol, atol)
def test_get_order_plot_multi(make_data_path, clean_astro_ui):
    """Rather than fake data, use a known dataset.

    Here we pretend we have three orders but with the same
    response (except that the ARF is 0.5, 0.4, 0.25 of the
    normal ARF).
    """

    setup_order_plot(make_data_path)

    fplot = ui.get_fit_plot()
    oplot = ui.get_order_plot()

    # The idea is to compare the X range of plot_fit to plot_order
    # (but accessed just using the plot objects rather than creating
    # an actual plot).
    #
    # First some safety checks
    assert fplot.dataplot.xlo == pytest.approx(fplot.modelplot.xlo)
    assert fplot.dataplot.xhi == pytest.approx(fplot.modelplot.xhi)

    assert len(oplot.xlo) == 3
    assert len(oplot.xhi) == 3
    assert len(oplot.y) == 3

    assert oplot.xlo[1] == pytest.approx(oplot.xlo[0])
    assert oplot.xlo[2] == pytest.approx(oplot.xlo[0])

    assert oplot.xhi[1] == pytest.approx(oplot.xhi[0])
    assert oplot.xhi[2] == pytest.approx(oplot.xhi[0])

    # We know the y values are 0.5, 0.4, 0.25 times the original arf
    # so we can compare them.
    #
    assert oplot.y[1] == pytest.approx(oplot.y[0] * 0.4 / 0.5)
    assert oplot.y[2] == pytest.approx(oplot.y[0] * 0.25 / 0.5)

    xlo = oplot.xlo[0]
    xhi = oplot.xhi[0]
    assert len(xlo) == 564
    assert xlo[0] == pytest.approx(0.46720001101493835)
    assert xhi[-1] == pytest.approx(9.869600296020508)

    # The model plot is technically drawn the same way as the order plot
    # (ungrouped) but it uses different code (sherpa.astro.plot.ModelHistogram)
    # so let's compare.
    #
    mplot = ui.get_model_plot()
    assert mplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert mplot.xhi[-1] == pytest.approx(9.869600296020508)

    # Also compare to the fit plot (which is grouped)
    #
    assert fplot.modelplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert fplot.modelplot.xhi[-1] == pytest.approx(9.869600296020508)
def check_eval_multi_rmf():
    """Test that the data is handled correctly

    For use by test_eval_multi_rmf.
    """

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 4
    ui.set_source(mdl)

    # The analysis setting appears to depend on how the
    # data is set up. This is just to check it is energy.
    #
    d = ui.get_data()
    assert d.units == 'energy'

    # Easiest way to evaluate the model is to grab the
    # data from plot_source / plot_model
    #
    # The source doesn't care about how the instrument is set
    # up.
    #
    splot = ui.get_source_plot()
    assert (splot.y == 4).all()

    # The model plot does care about the instrument. There should
    # be two equal responses, offset from each other. As this is
    # done for each energy they should "cancel out" (apart from
    # doubling the signal)  apart from the start/end bins
    #
    # I haven't been able to convince myself I understand the handling
    # at the start/end of the RMF, so I am using this just as a
    # regression test.
    #
    mplot = ui.get_model_plot()

    assert mplot.y[11:968] == pytest.approx(8)

    # handle "overflow" bins
    assert mplot.y[0] == pytest.approx(84.6)
    assert mplot.y[-1] == pytest.approx(43.0)

    # handle bins below and above the offsets
    range_is_low = np.arange(1, 11)
    is_low = [4.2, 4., 4., 4., 4., 4., 4., 4., 5.2, 7.6]
    assert mplot.y[range_is_low] == pytest.approx(is_low)

    is_high = [
        6.8, 4.4, 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.,
        4., 4., 4.2
    ]
    assert mplot.y[968:988] == pytest.approx(is_high)
Exemplo n.º 7
0
def check_chi2():
    """Execute this function after fitting to see if the
    best-fit chi2 reported matches the formula coded here"""
    import sherpa.astro.ui as sau
    chi2 = sau.get_fit_results().statval
    print('chi2 from fit: {0}'.format(chi2))
    data = sau.get_dep()
    model = sau.get_model_plot().y
    error = np.where(model > data, sau.get_syserror(), sau.get_staterror())

    chi = ((data - model) / error)  # Chi per bin
    chi2 = chi**2  # Chi^2 per bin
    print('chi2 re-computed: {0}'.format(chi2.sum()))
def check_eval_multi_arfrmf():
    """Test that the data is handled correctly

    For use by test_eval_multi_arfrmf.
    """

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 4
    ui.set_source(mdl)

    # The analysis setting appears to depend on how the
    # data is set up. This is just to check it is energy.
    #
    d = ui.get_data()
    assert d.units == 'energy'

    # Easiest way to evaluate the model is to grab the
    # data from plot_source / plot_model
    #
    # The source doesn't care about how the instrument is set
    # up.
    #
    splot = ui.get_source_plot()
    assert (splot.y == 4).all()

    # Comparison to the "truth" is harder than the previous checks
    # so just hard-code it.
    #
    y = ui.get_model_plot().y

    assert y[0] == pytest.approx(93.06)
    assert y[1] == pytest.approx(4.62)
    assert y[2:479] == pytest.approx(4.4)
    assert y[479] == pytest.approx(3.2)
    assert y[480] == pytest.approx(0.8)
    assert y[481:498] == pytest.approx(0.4)
    assert y[498] == pytest.approx(1.24)
    assert y[499] == pytest.approx(2.92)
    assert y[500:570] == pytest.approx(3.2)
    assert y[570] == pytest.approx(3.08)
    assert y[571] == pytest.approx(2.84)
    assert y[572:589] == pytest.approx(2.8)
    assert y[589] == pytest.approx(2.92)
    assert y[590] == pytest.approx(3.16)
    assert y[591:968] == pytest.approx(3.2)
    assert y[968] == pytest.approx(3.08)
    assert y[969] == pytest.approx(2.84)
    assert y[970:987] == pytest.approx(2.8)
    assert y[987] == pytest.approx(2.94)
    assert y[988] == pytest.approx(30.1)
Exemplo n.º 9
0
def _get_plot_data(ids, emin, emax):
    all_model = []
    all_emodel = []
    all_data = []
    all_dataxerr = []
    all_datayerr = []
    all_edata = []
    all_ratio = []
    all_ratioerr = []

    # Get data and model for each spectrum
    for sid in ids:
        d = shp.get_data_plot(sid)
        m = shp.get_model_plot(sid)
        e = (m.xhi + m.xlo) / 2
        bins = np.concatenate((d.x - d.xerr / 2, [d.x[-1] + d.xerr[-1]]))

        model = m.y
        model_de = model * (m.xhi - m.xlo)
        model_binned, foo1, foo2 = binned_statistic(
            e, model_de, bins=bins, statistic="sum"
        )
        model_binned = model_binned / d.xerr

        # delchi = resid/d.yerr
        ratio = d.y / model_binned

        mask_data = np.logical_and(d.x + d.xerr / 2 >= emin, d.x - d.xerr / 2 <= emax)
        mask_model = np.logical_and(e >= emin, e <= emax)

        all_model.append(model[mask_model])
        all_emodel.append(e[mask_model])
        all_data.append(d.y[mask_data])
        all_dataxerr.append(d.xerr[mask_data])
        all_datayerr.append(d.yerr[mask_data])
        all_edata.append(d.x[mask_data])
        all_ratio.append(ratio[mask_data])
        all_ratioerr.append(d.yerr[mask_data] / model_binned[mask_data])

    return (
        all_model,
        all_emodel,
        all_data,
        all_dataxerr,
        all_datayerr,
        all_edata,
        all_ratio,
        all_ratioerr,
    )
def check_eval_multi_arf():
    """Test that the data is handled correctly

    For use by test_eval_multi_arf.
    """

    mdl = ui.create_model_component('const1d', 'mdl')
    mdl.c0 = 4
    ui.set_source(mdl)

    # The analysis setting appears to depend on how the
    # data is set up. This is just to check it is energy.
    #
    d = ui.get_data()
    assert d.units == 'energy'

    # Easiest way to evaluate the model is to grab the
    # data from plot_source / plot_model
    #
    # The source doesn't care about how the instrument is set
    # up.
    #
    splot = ui.get_source_plot()
    assert (splot.y == 4).all()

    # The model plot does care about the instrument
    #
    yarf1, yarf2 = expected_arf2()
    expected = (yarf1 + yarf2) * 4

    mplot = ui.get_model_plot()
    assert mplot.y == pytest.approx(expected)

    # spot checks (just to validate the expected arf code)
    #
    # ymid represents the region where ARF1 is 0
    #
    yfirst = 4 * 1.1
    ylast = 4 * (0.1 + 0.7)
    ymid = 4 * 0.7

    assert mplot.y[0] == pytest.approx(yfirst)
    assert mplot.y[-1] == pytest.approx(ylast)
    assert mplot.y[600] == pytest.approx(ymid)
Exemplo n.º 11
0
Arquivo: qq.py Projeto: amasini90/BXA
def qq_export(id=None, bkg=False, outfile='qq.txt', elow=None, ehigh=None):
    """
	Export Q-Q plot into a file for plotting.

	:param id: spectrum id to use (see get_bkg_plot/get_data_plot)
	:param bkg: whether to use get_bkg_plot or get_data_plot
	:param outfile: filename to write results into
	:param elow: low energy limit
	:param ehigh: low energy limit

	Example::

		qq.qq_export('bg', outfile='my_bg_qq', elow=0.2, ehigh=10)

	"""
    # data
    d = ui.get_bkg_plot(id=id) if bkg else ui.get_data_plot(id=id)
    e = d.x
    mask = logical_and(e >= elow, e <= ehigh)
    data = d.y[mask].cumsum()
    d = ui.get_bkg_model_plot(id=id) if bkg else ui.get_model_plot(id=id)
    e = d.xlo
    mask = logical_and(e >= elow, e <= ehigh)
    e = e[mask]
    model = d.y[mask].cumsum()
    last_stat = ui.get_stat()
    ui.set_stat(ksstat)
    ks = ui.calc_stat()
    ui.set_stat(cvmstat)
    cvm = ui.calc_stat()
    ui.set_stat(adstat)
    ad = ui.calc_stat()
    ui.set_stat(last_stat)
    ad = ui.calc_stat()

    ui.set_stat('chi2gehrels')
    chi2 = ui.calc_stat()
    ui.set_stat('cstat')
    cstat = ui.calc_stat()
    ui.set_stat(last_stat)
    stats = dict(ks=ks, cvm=cvm, ad=ad, cstat=cstat, chi2=chi2)

    numpy.savetxt(outfile, numpy.transpose([e, data, model]))
    json.dump(stats, open(outfile + '.json', 'w'), indent=4)
Exemplo n.º 12
0
def test_can_evaluate_additive_models(mname):
    """Does this create some emission?

    It does not test the result is actualy meaningful,
    and relies on the slightly-more-involved tests in
    test_xspeclmodels.py for the model evaluation.
    """

    ui.clean()
    m1 = ui.create_model_component(mname, 'm1')

    # test out a combined model; not really needed but this is
    # closer to how people will be using it.
    #
    ui.dataspace1d(0.1, 10, 0.01)
    ui.set_source(ui.xsphabs.m2 * m1)

    # rely on test_xspeclmodels.py for a more-complete test of
    # the model calling
    y = ui.get_model_plot().y.copy()

    # Assume there is some emission
    assert (y > 0).any()
Exemplo n.º 13
0
def test_plot_order_multi(make_data_path, clean_astro_ui):
    """Rather than fake data, use a known dataset.

    Here we pretend we have three orders but with the same
    response (except that the ARF is 0.5, 0.4, 0.25 of the
    normal ARF).
    """

    pha = make_data_path('3c273.pi')
    ui.load_pha(pha)

    # It has already loaded in one response
    arf = ui.get_arf(resp_id=1)
    arf.specresp *= 0.5

    for order, scale in enumerate([0.4, 0.25], 2):
        ui.load_arf(make_data_path('3c273.arf'), resp_id=order)
        ui.load_rmf(make_data_path('3c273.rmf'), resp_id=order)

        arf = ui.get_arf(resp_id=order)
        arf.specresp *= scale

    ui.set_source(ui.powlaw1d.pl)

    ui.notice(0.5, 7)
    ui.ignore(3, 4)

    fplot = ui.get_fit_plot()
    oplot = ui.get_order_plot()

    # The idea is to compare the X range of plot_fit to plot_order
    # (but accessed just using the plot objects rather than creating
    # an actual plot).
    #
    # First some safety checks
    assert fplot.dataplot.xlo == pytest.approx(fplot.modelplot.xlo)
    assert fplot.dataplot.xhi == pytest.approx(fplot.modelplot.xhi)

    assert len(oplot.xlo) == 3
    assert len(oplot.xhi) == 3
    assert len(oplot.y) == 3

    assert oplot.xlo[1] == pytest.approx(oplot.xlo[0])
    assert oplot.xlo[2] == pytest.approx(oplot.xlo[0])

    assert oplot.xhi[1] == pytest.approx(oplot.xhi[0])
    assert oplot.xhi[2] == pytest.approx(oplot.xhi[0])

    # We know the y values are 0.5, 0.4, 0.25 times the original arf
    # so we can compare them.
    #
    assert oplot.y[1] == pytest.approx(oplot.y[0] * 0.4 / 0.5)
    assert oplot.y[2] == pytest.approx(oplot.y[0] * 0.25 / 0.5)

    xlo = oplot.xlo[0]
    xhi = oplot.xhi[0]
    assert len(xlo) == 564
    assert xlo[0] == pytest.approx(0.46720001101493835)
    assert xhi[-1] == pytest.approx(9.869600296020508)

    # The model plot is technically drawn the same way as the order plot
    # (ungrouped) but it uses different code (sherpa.astro.plot.ModelHistogram)
    # so let's compare.
    #
    mplot = ui.get_model_plot()
    assert mplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert mplot.xhi[-1] == pytest.approx(9.869600296020508)

    # Also compare to the fit plot (which is grouped)
    #
    assert fplot.modelplot.xlo[0] == pytest.approx(0.46720001101493835)
    assert fplot.modelplot.xhi[-1] == pytest.approx(9.869600296020508)
Exemplo n.º 14
0
def test_pha1_plot_model_options(clean_astro_ui, basic_pha1):
    """Test that the options have changed things, where easy to do so

    In matplotlib 3.1 the plot_model call causes a MatplotlibDeprecationWarning
    to be created:

    Passing the drawstyle with the linestyle as a single string is deprecated since Matplotlib 3.1 and support will be removed in 3.3; please pass the drawstyle separately using the drawstyle keyword argument to Line2D or set_drawstyle() method (or ds/set_ds()).

    This warning is hidden by the test suite (sherpa/conftest.py) so that
    it doesn't cause the tests to fail. Note that a number of other tests
    in this module also cause this warning to be displayed.

    """

    from matplotlib import pyplot as plt

    # Note that for PHA data sets, the mode is drawn as a histogram,
    # so get_model_plot_prefs doesn't actually work. We need to change
    # the histogram prefs instead. See issue
    # https://github.com/sherpa/sherpa/issues/672
    #
    # prefs = ui.get_model_plot_prefs()
    prefs = ui.get_model_plot().histo_prefs

    # check the preference are as expected for the boolean cases
    assert not prefs['xlog']
    assert not prefs['ylog']

    # Only change the X axis here
    prefs['xlog'] = True

    prefs['color'] = 'green'

    prefs['linecolor'] = 'red'
    prefs['linestyle'] = 'dashed'

    prefs['marker'] = '*'
    prefs['markerfacecolor'] = 'yellow'
    prefs['markersize'] = 8

    ui.plot_model()

    ax = plt.gca()
    assert ax.get_xscale() == 'log'
    assert ax.get_yscale() == 'linear'

    assert ax.get_xlabel() == 'Energy (keV)'
    assert ax.get_ylabel() == 'Counts/sec/keV'

    # It is not clear whether an 'exact' check on the value, as
    # provided by pytest.approx, makes sense, or whether a "softer"
    # check - e.g.  just check whether it is less- or greater- than a
    # value - should be used. It depends on how often matplotlib
    # tweaks the axis settings and how sensitive it is to
    # platform/backend differences. Let's see how pytest.approx works
    #
    xmin, xmax = ax.get_xlim()
    assert xmin == pytest.approx(0.40770789163447285)
    assert xmax == pytest.approx(11.477975806572461)

    ymin, ymax = ax.get_ylim()
    assert ymin == pytest.approx(-0.00045772936258082011, rel=0.01)
    assert ymax == pytest.approx(0.009940286575890335, rel=0.01)

    assert len(ax.lines) == 1
    line = ax.lines[0]

    # Apparently color wins out over linecolor
    assert line.get_color() == 'green'
    assert line.get_linestyle() == '--'  # note: input was dashed
    assert line.get_marker() == '*'
    assert line.get_markerfacecolor() == 'yellow'
    assert line.get_markersize() == pytest.approx(8.0)

    assert len(ax.collections) == 0
Exemplo n.º 15
0
def fit_draws(draws, parname, nbins=50, params=None, plot=True, verbose=True):
    """Fit a gaussian to the histogram of the given parameter.

    Before using this routine you should use get_parameter_info()
    to extract the parameter info for use by get_draws(). This is
    because using this routine will invalidate the internal
    data structures that get_draws() uses when its params argument
    is None.

    If params is not None then it should be the return value of
    get_parameter_info().

    If plot is True then a plot of the histogram and fit will be
    made.

    If verbose is True then a quick comparison of the fit
    results will be displayed.
    """

    if parname not in draws["parnames"]:
        raise RuntimeError, "Unknown parameter '%s'" % parname

    # Exclude any point with an iteration number of 0
    #
    idx = draws["iteration"] > 0
    parvals = draws[parname][idx]

    (hy, hx) = np.histogram(parvals, bins=nbins, new=True)
    xlo = hx[:-1]
    xhi = hx[1:]

    id = parname
    ui.load_arrays(id, 0.5 * (xlo + xhi), hy)

    # We can guess the amplitude and position fairly reliably;
    # for the FWHM we just use the inter-quartile range of the
    # X axis.
    #
    ui.set_source(id, ui.gauss1d.gparam)
    gparam.pos = xlo[xlo.size // 2]
    gparam.ampl = hy[xlo.size // 2]
    gparam.fwhm = xlo[xlo.size * 3 // 4] - xlo[xlo.size // 4]

    # Get the best-fit value if available
    if params != None:
        p0 = dict(zip(params["parnames"], params["parvals"]))[parname]

    logger = logging.getLogger("sherpa")
    olvl = logger.level
    logger.setLevel(40)

    ostat = ui.get_stat_name()
    ui.set_stat("leastsq")
    ui.fit(id)
    ui.set_stat(ostat)

    logger.setLevel(olvl)

    if plot:
        # We manually create the plot since we want to use a histogram for the
        # data and the Sherpa plots use curves.
        #
        ##dplot = ui.get_data_plot(id)
        mplot = ui.get_model_plot(id)

        chips.lock()
        try:
            chips.open_undo_buffer()
            chips.erase()
            chips.add_histogram(xlo, xhi, hy)
            ##chips.add_histogram(xlo, xhi, mplot.y, ["line.color", "red", "line.style", "dot"])
            chips.add_curve(mplot.x, mplot.y,
                            ["line.color", "red", "symbol.style", "none"])

            if params != None:
                chips.add_vline(
                    p0, ["line.color", "green", "line.style", "longdash"])

            chips.set_plot_xlabel(parname)
        except:
            chips.discard_undo_buffer()
            chips.unlock()
            raise
        chips.close_undo_buffer()
        chips.unlock()

    sigma = gparam.fwhm.val / (2.0 * np.sqrt(2 * np.log(2)))

    if verbose:
        print ""
        print "Fit to histogram of draws for parameter %s gives" % parname
        print "     mean     = %g" % gparam.pos.val
        print "     sigma    = %g" % sigma
        print ""

        if params != None:
            idx = params["parnames"] == parname
            print "     best fit = %g" % p0
            print "  covar sigma = %g" % params["parmaxes"][idx][0]
            print ""

    return (gparam.pos.val, sigma, gparam.ampl.val)