Beispiel #1
0
def test_insegment():
    segments = N.arange(0.0, 5.1, dtype='d')
    segments_t = Points(segments)

    N.random.seed(1)

    print('Edges', segments)
    for case in [(0.5, 3.6, 0.4), (-1.5, 3.6, 0.4), (3.5, 6.6, 0.4),
                 (-1.5, 6.6, 0.4), (0.0, 5.1), (-1.e-17, 5.1),
                 (-0.5, 6.5, 2.)]:
        points = N.arange(*case, dtype='d')
        print('  points', points)
        N.random.shuffle(points)
        points_t = Points(points)

        sw = R.InSegment()
        segments_t.points.points >> sw.transformations.insegment.edges
        points_t.points.points >> sw.transformations.insegment.points

        res = sw.transformations.insegment.insegment.data()
        print('  insegment', res)
        dgt = N.digitize(points, segments - 1.e-16)
        print('  digitize', dgt - 1)

        comp = dgt == res
        assert ((comp == 0).all())
        print('\033[32mOK!\033[0m')
Beispiel #2
0
def test_ratio_bc(mode):
    if mode==0:
        res    = N.arange(1.0, 13.0).reshape(3,4)
        bottom = N.arange(1, 4)
        top    = res*bottom[:,None]
    elif mode==1:
        res    = N.arange(1.0, 13.0).reshape(3,4)
        top    = N.arange(1, 4)
        bottom = top[:,None]/res
    else:
        top     = N.arange(1.0, 13.0).reshape(3,4)*2
        bottom  = N.arange(1.0, 13.0).reshape(3,4)
        res     = top/bottom

    top_o = Points(top)
    bottom_o = Points(bottom)

    ratio_o = R.RatioBC(top_o, bottom_o)
    out = ratio_o.ratio.ratio
    result = out.data()

    print('Top', top)
    print('Bottom', bottom)
    print('Result (python)', res)
    print('Result (GNA)', result)

    check = N.allclose(result, res, rtol=0, atol=1.e-15)
    assert check, "Not matching results"
Beispiel #3
0
def weightedsump_make(nsname):
    """Initialize inpnuts"""
    arr1 = N.arange(0, 5, dtype='d')
    arr2 = -arr1
    zeros = N.zeros((5,), dtype='d')
    print( 'Data1:', arr1 )
    print( 'Data2:', arr2 )

    labels = [ 'arr1', 'arr2' ]
    weights = [ 'w1', 'w2' ]

    """Initialize environment"""
    ns=env.globalns(nsname)
    p1 = ns.defparameter( weights[0], central=1.0, sigma=0.1 )
    p2 = ns.defparameter( weights[1], central=1.0, sigma=0.1 )

    ns.printparameters()

    with ns:
        pp1 = VarArray([weights[0]])
        pp2 = VarArray([weights[1]])

    """Initialize transformations"""
    points1 = Points( arr1 )
    points2 = Points( arr2 )

    return arr1, p1, pp1, points1, arr2, p2, pp2, points2, zeros
Beispiel #4
0
def test_points(opts):
    mat = N.arange(12, dtype='d').reshape(3, 4)

    print('Input matrix (numpy)')
    print(mat)
    print()

    #
    # Create transformations
    #
    points = Points(mat)
    identity = R.Identity()
    if opts.gpuargs:
        identity.identity.switchFunction('identity_gpuargs_h')

    identity.identity.source(points.points.points)
    res = identity.identity.target.data()
    dt = identity.identity.target.datatype()

    assert N.allclose(mat, res), "C++ and Python results doesn't match"

    #
    # Dump
    #
    print('Eigen dump (C++)')
    identity.dump()
    print()

    print('Result (C++ Data to numpy)')
    print(res)
    print()

    print('Datatype:', str(dt))
def test_chol():
    size = 4
    v = N.matrix(N.arange(size, dtype='d'))
    v.A1[size // 2:] = N.arange(size // 2, 0, -1)

    mat = v.T * v + N.eye(size, size) * size * 2

    chol = N.linalg.cholesky(mat)

    print('Matrix (numpy)')
    print(mat)
    print()

    print('L (numpy)')
    print(chol)
    print()

    #
    # Create the transformations
    #
    points = Points(mat)
    cholesky = R.Cholesky()

    cholesky.cholesky.mat(points.points.points)

    #
    # Retrieve data
    #
    res = cholesky.cholesky.L.data()
    res = N.matrix(N.tril(res))

    #
    # Print data
    #
    print('L')
    print(res)

    assert N.allclose(chol, res), "C++ Cholesky and Python one doesn't match"

    mat_back = res * res.T

    print('Matrix (rec)')
    print(mat_back)
    assert N.allclose(mat,
                      mat_back), "C++ result and Python origin doesn't match"

    diff = chol - res
    print('Diff L')
    print(diff)

    diff1 = mat_back - mat
    print('Diff mat')
    print(diff1)

    print((((N.fabs(diff) + N.fabs(diff1)) > 1.e-12).sum() and '\033[31mFail!'
           or '\033[32mOK!'), '\033[0m')
Beispiel #6
0
def test_ratio():
    scale = 2.0
    num = 9
    step = 2
    top = N.arange(-num, num, step)
    bottom = top * scale

    top_o = Points(top)
    bottom_o = Points(bottom)

    ratio_o = R.Ratio(top_o, bottom_o)
    out = ratio_o.ratio.ratio
    result = out.data()

    print('Scale', 1.0 / scale)
    print('Top', top)
    print('Bottom', bottom)
    print('Result (python)', top / bottom)
    print('Result (GNA)', result)

    check = N.allclose(result, 1.0 / scale, rtol=0, atol=1.e-15)
    assert check, "Not matching results"

    tf_top = top_o.points.getTaintflag()
    tf_bottom = bottom_o.points.getTaintflag()
    tf_r = ratio_o.ratio.getTaintflag()

    out.data()
    assert not tf_top.tainted() \
           and not tf_bottom.tainted() \
           and not tf_r.tainted()

    tf_top.taint()
    assert tf_top.tainted() \
           and not tf_bottom.tainted() \
           and tf_r.tainted()
    out.data()

    tf_bottom.taint()
    assert not tf_top.tainted() \
           and tf_bottom.tainted() \
           and tf_r.tainted()
    out.data()
Beispiel #7
0
def example(s1, s2, step=1, start=None, length=None):
    """Initialize data and transformations"""
    points = Points(N.arange(s1, s2, step, dtype='d'))
    if start is not None and length is not None:
        norm = R.Normalize(start, length)
    else:
        norm = R.Normalize()
    norm.normalize.inp(points.points.points)

    raw = points.points.points.data()
    data = norm.normalize.out.data()
    """
    Plot results
    """
    if "pytest" not in sys.modules:
        fig = P.figure()
        ax = P.subplot(111)
        ax.minorticks_on()
        ax.grid()
        ax.set_xlabel('$i$')
        ax.set_ylabel('height')
        ax.set_title('Test normalize' +
                     (start is not None and ' [%i, %i]=%i' %
                      (start, start + length - 1, length) or ''))

        ax.bar(range(len(data)), raw, width=0.5, label='raw')
        ax.bar(N.arange(len(data)) + 0.5, data, width=0.5, label='normalized')
        ax.legend(loc='upper left')

    if start is not None:
        print('Sum raw (%i:%i):' % (start, start + length),
              raw[start:start + length].sum())
        res = data[start:start + length].sum()
        print('Sum data (%i:%i):' % (start, start + length), res, end='')
    else:
        print('Sum raw:', raw.sum())
        res = data.sum()
        print('Sum data:', res, end='')
    print('    ', res == 1.0 and '\033[32mOK!' or '\033[31mFAIL!', '\033[0m')
    print()
Beispiel #8
0
    def load_pulls(self, dataset):
        #
        # Load nuisance parameters
        #

        # Get list of UncertainParameter objects, drop free and fixed
        pull_pars = get_parameters(self.opts.pull,
                                   drop_fixed=True,
                                   drop_free=True)
        variables = [par.getVariable() for par in pull_pars]
        sigmas, centrals, covariance = get_uncertainties(pull_pars)
        npars = len(pull_pars)

        print('   nuisance: {} parameters'.format(npars))

        from gna.constructors import VarArray, Points
        # Create an array, representing pull parameter values
        self.pull_vararray = VarArray(variables, labels='Nuisance: values')
        # Create an array, representing pull parameter central values
        self.pull_centrals = Points(centrals, labels='Nuisance: central')

        try:
            if covariance:
                cov = self.pull_covariance = Points(
                    covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2 = Points(sigmas**2,
                                                 labels='Nuisance: sigma')
        except ValueError:
            # handle case with covariance matrix
            if covariance.any():
                cov = self.pull_covariance = Points(
                    covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2 = Points(sigmas**2,
                                                 labels='Nuisance: sigma')

        dataset.assign(self.pull_vararray.single(),
                       self.pull_centrals.single(), cov.single())

        ns = self.env.globalns('pull')
        ns.addobservable(self.opts.name, self.pull_vararray.single())
        self.env.future['pull', self.opts.name] = self.pull_vararray.single()
Beispiel #9
0
from matplotlib import pyplot as P
from mpl_tools.helpers import savefig
from gna.bindings import common

from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-s',
                    '--show',
                    action='store_true',
                    help='show the figure')
parser.add_argument('-o', '--output')
opts = parser.parse_args()

x = N.array([0.0, 2.0, 4.0, 6.0], dtype='d')
y = N.array([10.0, 8.0, 6.0, 4.0], dtype='d')
xp = Points(x)
yp = Points(y)

point = N.array([3.0], dtype='d')
pointp = Points(point)

scaler = R.FixedPointScale()
scaler.scale(xp, yp, pointp)
seg_idx = scaler.insegment.insegment.data()
print('Segments', seg_idx)

# res = ie.interp.interp.data()
# print( 'Result', res )
if "pytest" not in sys.modules:
    fig = P.figure()
    ax = P.subplot(111)
Beispiel #10
0
class cmd(basecmd):
    pull_vararray, pull_centrals, pull_sigmas2, pull_covariance = None, None, None, None

    @classmethod
    def initparser(cls, parser, env):
        parser.add_argument('--name', required=True, help='Dataset name', metavar='dataset')

        pull = parser.add_mutually_exclusive_group()
        pull.add_argument('--pull', action='append', help='Parameters to be added as pull terms')

        parser.add_argument('--asimov-data', nargs=2, action='append',
                            metavar=('THEORY', 'DATA'),
                            default=[])
        # parser.add_argument('--asimov-poisson', nargs=2, action='append',
                            # metavar=('THEORY', 'DATA'),
                            # default=[])
        parser.add_argument('--error-type', choices=['pearson', 'neyman'],
                            default='pearson', help='The type of statistical errors to be used')
        parser.add_argument('--random-seed', type=int, help='Set random seed of numpy random generator to given value')
        parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')

    def run(self):
        if self.opts.random_seed:
            np.random.seed(self.opts.random_seed)

        dataset = Dataset(desc=None)
        verbose = self.opts.verbose
        if verbose:
            print('Adding pull parameters to dataset', self.opts.name)

        if self.opts.pull:
            self.load_pulls(dataset)

        self.snapshots = dict()
        if self.opts.asimov_data:
            for theory_path, data_path in self.opts.asimov_data:
                try:
                    theory, data = env.get(theory_path), env.get(data_path)
                except KeyError:
                    theory, data = env.future['spectra', theory_path], env.future['spectra', data_path]

                if self.opts.error_type == 'neyman':
                    error=data.single()
                elif self.opts.error_type == 'pearson':
                    error=theory.single()

                if not error.getTaintflag().frozen():
                    snapshot = self.snapshots[error] = C.Snapshot(error, labels='Snapshot: stat errors')
                    snapshot.single().touch()
                    error = snapshot

                dataset.assign(obs=theory, value=data, error=error.single())

        # if self.opts.asimov_poisson:
            # for theory_path, data_path in self.opts.asimov_poisson:
                # data_poisson = np.random.poisson(env.get(data_path).data())
                # if self.opts.error_type == 'neyman':
                    # dataset.assign(env.get(theory_path),
                                   # data_poisson,
                                   # env.get(data_path))
                # elif self.opts.error_type == 'pearson':
                    # dataset.assign(env.get(theory_path),
                                   # data_poisson,
                                   # env.get(theory_path))

        self.env.parts.dataset[self.opts.name] = dataset

    def load_pulls(self, dataset):
        #
        # Load nuisance parameters
        #

        # Get list of UncertainParameter objects, drop free and fixed
        pull_pars = get_parameters(self.opts.pull, drop_fixed=True, drop_free=True)
        variables = [par.getVariable() for par in pull_pars]
        sigmas, centrals, covariance = get_uncertainties(pull_pars)
        npars = len(pull_pars)

        from gna.constructors import VarArray, Points
        # Create an array, representing pull parameter values
        self.pull_vararray = VarArray(variables, labels='Nuisance: values')
        # Create an array, representing pull parameter central values
        self.pull_centrals = Points(centrals, labels='Nuisance: central')

        try:
            if covariance:
                cov = self.pull_covariance = Points(covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2  = Points(sigmas**2, labels='Nuisance: sigma')
        except ValueError:
            # handle case with covariance matrix
            if covariance.any():
                cov = self.pull_covariance = Points(covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2  = Points(sigmas**2, labels='Nuisance: sigma')

        dataset.assign(self.pull_vararray.single(), self.pull_centrals.single(), cov.single())

        ns = self.env.globalns('pull')
        ns.addobservable(self.opts.name, self.pull_vararray.single())
        self.env.future['pull', self.opts.name] = self.pull_vararray.single()
Beispiel #11
0
#!/usr/bin/env python

from load import ROOT as R
import numpy as N
from gna.constructors import Points
from gna.bindings import DataType

segments   = N.arange(0.0, 5.1, dtype='d')
segments_t = Points(segments)

print( 'Edges', segments )
for case in [
        ( 0.5, 3.6, 0.4),
        (-1.5, 3.6, 0.4),
        ( 3.5, 6.6, 0.4),
        (-1.5, 6.6, 0.4),
        (0.0, 5.1),
        (-1.e-17, 5.1),
        (-0.5, 6.5, 2.)
        ]:
    points   = N.arange(*case, dtype='d')
    print( '  points', points )
    points_t = Points( points )

    sw = R.SegmentWise()
    sw.segments.edges(segments_t.points.points)
    sw.segments.points(points_t.points.points)

    res = sw.segments.segments.data()
    print('  segments', res)
    for i, (i1, i2) in enumerate(zip(res[:-1], res[1:])):
Beispiel #12
0
#!/usr/bin/env python
"""Check the Exp transformation"""

import numpy as N
from load import ROOT as R
from gna.constructors import Points

arr = N.linspace(0.0, 4.0, 81)
print('Data:', arr)
"""Initialize transformations"""
points = Points(arr)
exp = R.Exp()
exp.exp.points(points.points)

data = exp.exp.result.data()
print('Result:', data)

cdata = N.exp(arr)
diff = data - cdata
ok = (N.fabs(diff) < 1.e-12).all()
if not ok:
    print('Cmp result:', cdata)
    print('Diff:', diff)
print(ok and '\033[32mOK!' or '\033[31mFAIL!', '\033[0m')
Beispiel #13
0
class cmd(basecmd):
    pull_vararray, pull_centrals, pull_sigmas2, pull_covariance = None, None, None, None

    def __init__(self, *args, **kwargs):
        basecmd.__init__(self, *args, **kwargs)
        self.opts.name = self.opts.name1 or self.opts.name

    @classmethod
    def initparser(cls, parser, env):
        name = parser.add_mutually_exclusive_group(required=True)
        name.add_argument('name',
                          nargs='?',
                          help='Dataset name',
                          metavar='dataset')
        name.add_argument('-n',
                          '--name',
                          dest='name1',
                          help='dataset name',
                          metavar='dataset')

        pull = parser.add_mutually_exclusive_group()
        pull.add_argument('--pull',
                          action='append',
                          help='Parameters to be added as pull terms')

        parser.add_argument('--theory-data',
                            '--td',
                            nargs=2,
                            action='append',
                            metavar=('THEORY', 'DATA'),
                            default=[])
        parser.add_argument('--theory-data-variance',
                            '--tdv',
                            nargs=3,
                            action='append',
                            metavar=('THEORY', 'DATA', 'VARIANCE'),
                            default=[])

        parser.add_argument(
            '--error-type',
            choices=['pearson', 'neyman'],
            default='pearson',
            help='The type of statistical errors to be used with --td')
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='verbose mode')

    def run(self):
        dataset = Dataset(desc=self.opts.name)
        verbose = self.opts.verbose
        if self.opts.verbose:
            print("Dataset '{}' with:".format(self.opts.name))

        if self.opts.pull:
            self.load_pulls(dataset)

        self.snapshots = dict()
        for theory_path, data_path in self.opts.theory_data:
            theory, data = env.future['spectra',
                                      theory_path], env.future['spectra',
                                                               data_path]
            data.data()

            if self.opts.verbose:
                print('   theory: ', str(theory))
                print('   data:   ', str(data))

            if self.opts.error_type == 'neyman':
                error = data.single()
            elif self.opts.error_type == 'pearson':
                error = theory.single()

            if not error.getTaintflag().frozen():
                snapshot = self.snapshots[error] = C.Snapshot(
                    error, labels='Snapshot: stat errors')
                snapshot.single().touch()
                error = snapshot

            dataset.assign(obs=theory, value=data, error=error.single())

        for theory_path, data_path, variance_path in self.opts.theory_data_variance:
            theory = env.future['spectra', theory_path]
            data = env.future['spectra', data_path]
            variance = env.future['spectra', variance_path]
            data.data()
            variance.data()

            if self.opts.verbose:
                print('   theory:  ', str(theory))
                print('   data:    ', str(data))
                print('   variance:', str(variance))

            dataset.assign(obs=theory, value=data, error=variance.single())

        self.env.parts.dataset[self.opts.name] = dataset

    def load_pulls(self, dataset):
        #
        # Load nuisance parameters
        #

        # Get list of UncertainParameter objects, drop free and fixed
        pull_pars = get_parameters(self.opts.pull,
                                   drop_fixed=True,
                                   drop_free=True)
        variables = [par.getVariable() for par in pull_pars]
        sigmas, centrals, covariance = get_uncertainties(pull_pars)
        npars = len(pull_pars)

        print('   nuisance: {} parameters'.format(npars))

        from gna.constructors import VarArray, Points
        # Create an array, representing pull parameter values
        self.pull_vararray = VarArray(variables, labels='Nuisance: values')
        # Create an array, representing pull parameter central values
        self.pull_centrals = Points(centrals, labels='Nuisance: central')

        try:
            if covariance:
                cov = self.pull_covariance = Points(
                    covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2 = Points(sigmas**2,
                                                 labels='Nuisance: sigma')
        except ValueError:
            # handle case with covariance matrix
            if covariance.any():
                cov = self.pull_covariance = Points(
                    covariance, labels='Nuisance: covariance matrix')
            else:
                # If there are no correlations, store only the uncertainties
                cov = self.pull_sigmas2 = Points(sigmas**2,
                                                 labels='Nuisance: sigma')

        dataset.assign(self.pull_vararray.single(),
                       self.pull_centrals.single(), cov.single())

        ns = self.env.globalns('pull')
        ns.addobservable(self.opts.name, self.pull_vararray.single())
        self.env.future['pull', self.opts.name] = self.pull_vararray.single()

    __tldr__ = """\
Beispiel #14
0
arr1 = N.arange(0, 5)
arr2 = -arr1
print( 'Data1:', arr1 )
print( 'Data2:', arr2 )

labels = [ 'arr1', 'arr2' ]
weights = [ 'w1', 'w2' ]

"""Initialize environment"""
p1 = env.globalns.defparameter( weights[0], central=1.0, sigma=0.1 )
p2 = env.globalns.defparameter( weights[1], central=1.0, sigma=0.1 )

env.globalns.printparameters()

"""Initialize transformations"""
points1 = Points( arr1 )
points2 = Points( arr2 )

"""Mode1: a1*w1+a2*w2"""
ws = R.WeightedSum( stdvector(weights), stdvector(labels) )
ws.sum.arr1(points1.points)
ws.sum.arr2(points2.points)

print( 'Mode1: a1*w1+a2*w2' )
print( '  ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(2)
print( '  ', p1.value(), p2.value(), ws.sum.sum.data() )
p2.set(2)
print( '  ', p1.value(), p2.value(), ws.sum.sum.data() )
p1.set(1)
p2.set(1)
Beispiel #15
0
from gna.bindings import DataType

#
# Create the matrix
#
mat = N.arange(5, dtype='d')

print('Input array (numpy)')
print(mat)
print()

#
# Create transformations
#
print('Configuration')
points = Points(mat)
multifunc = R.TrialMultiFunc()
trans = multifunc.multifunc
trans.inp(points)
out = trans.out

print()
print('Run in main mode')
print('    result', out.data())

print()
print('Switch to secondary mode')
trans.switchFunction('secondary')
trans.taint()
print('    result', out.data())
Beispiel #16
0
def test_selfpower():
    """Initialize parameters and data"""
    ns = env.globalns
    varname = 'scale'
    par = ns.defparameter(varname, central=1.0, sigma=0.1)

    arr = N.linspace(0.0, 4.0, 81)
    print('Data:', arr)
    """Initialize transformations"""
    points = Points(arr)
    selfpower = R.SelfPower(varname)

    selfpower.selfpower.points(points.points)
    selfpower.selfpower_inv.points(points.points)

    checks = ()
    """
    Plot results
    (Plot for positive power)
    """
    if not "pytest" in sys.modules:
        fig = P.figure()
        ax = P.subplot(111)
        ax.minorticks_on()
        ax.grid()
        ax.set_xlabel('$x$')
        ax.set_ylabel('f(x)')
        ax.set_title('SelfPower: $(x/a)^{x/a}$')
    """a=1"""
    par.set(1)
    data = selfpower.selfpower.result.data().copy()

    if not "pytest" in sys.modules:
        ax.plot(arr, data, label='$a=1$')

    checks += data - (arr / par.value())**(arr / par.value()),
    """a=2"""
    par.set(2)
    data = selfpower.selfpower.result.data().copy()

    if not "pytest" in sys.modules:
        ax.plot(arr, data, label='$a=2$')
        ax.legend(loc='upper left')

    checks += data - (arr / par.value())**(arr / par.value()),
    """
    Plot for negative power
    """
    if not "pytest" in sys.modules:
        fig = P.figure()
        ax = P.subplot(111)
        ax.minorticks_on()
        ax.grid()
        ax.set_xlabel('$x$')
        ax.set_ylabel('f(x)')
        ax.set_title('SelfPower: $(x/a)^{-x/a}$')
    """a=1"""
    par.set(1)
    data = selfpower.selfpower_inv.result.data().copy()

    if not "pytest" in sys.modules:
        ax.plot(arr, data, label='$a=1$')

    checks += data - (arr / par.value())**(-arr / par.value()),
    """a=2"""
    par.set(2)
    data = selfpower.selfpower_inv.result.data().copy()

    if not "pytest" in sys.modules:
        ax.plot(arr, data, label='$a=2$')
        ax.legend(loc='upper right')
        P.show()

    checks += data - (arr / par.value())**(-arr / par.value()),
    """Cross check results with numpy calculation"""
    checks = N.array(checks)
    print((checks == 0.0).all() and '\033[32mCross checks passed OK!'
          or '\033[31mCross checks failed!', '\033[0m')
    assert N.allclose(checks, 0)
Beispiel #17
0
# def fcn_int(x1, x2, y1, y2):
    # return a*(x2-x1)*(y2-y1)

def integr(x, y):
    x, y = N.meshgrid(x, y, indexing='ij')
    x1 = x[:-1,:-1]
    x2 = x[1: ,1:]
    y1 = y[:-1,:-1]
    y2 = y[1:,1: ]

    return fcn_int(x1, x2, y1, y2)

fcn_values = fcn(xmesh, ymesh)
integrals  = integr(xedges, yedges)

fcn_o = Points(fcn_values)
fcn_output=fcn_o.single()
integrator.hist.f(fcn_output)
hist_output = integrator.hist.hist
hist_output.setLabel('output histogram')
hist = hist_output.data()

"""Self test of integration"""
from scipy.integrate import dblquad
ix, iy = 4, min(yedges.size-2, 2)
x1, x2 = xedges[ix:ix+2]
y1, y2 = yedges[iy:iy+2]

int_s  = dblquad(lambda y, x: fcn(x, y), x1, x2, y1, y2)[0]
int_a1 = integr( [x1, x2], [y1, y2] )[0,0]
int_a2 = integrals[ix, iy]
Beispiel #18
0
        log = R.InterpLog,
        logx = R.InterpLogx,
        expo = R.InterpExpo
        )

from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument( '-s', '--show', action='store_true', help='show the figure' )
parser.add_argument( '-m', '--mode', default='expo', choices=interpolators.keys(), help='Interpolation mode' )
# parser.add_argument( '-U', '--underflow', default="", choices=['constant', 'extrapolate'] )
# parser.add_argument( '-O', '--overflow', default="", choices=['constant', 'extrapolate'] )
parser.add_argument( '-o', '--output' )
opts = parser.parse_args()

segments   = N.arange(1.0, 10.1, 1.5, dtype='d')
segments_t = Points(segments)

points   = N.stack([N.linspace(0.0+i, 12.+i, 61, dtype='d') for i in [0, -0.1, 0.1, 0.3, 0.5]]).T
points_t = Points(points)

fcn = N.exp( -(segments-segments[0])*0.5 )
fcn = N.exp(segments**(-0.5))
fcn_t = Points(fcn)

print( 'Edges', segments )
print( 'Points', points )
print( 'Fcn', fcn )

ie=interpolators[opts.mode]()
ie.interpolate(segments_t, fcn_t, points_t)
seg_idx = ie.insegment.insegment.data()