def __init__(self, X=None, Y=None, order=(2,1), fname=None, initDict=None, strategy=1, scale_min=-1, scale_max=1, pnames=None, set_structures=True):
        """
        Multivariate rational approximation f(x)_mn =  g(x)_m/h(x)_n

        kwargs:
            fname --- to read in previously calculated Pade approximation

            X     --- anchor points
            Y     --- function values
            order --- tuple (m,n) m being the order of the numerator polynomial --- if omitted: auto
        """
        self._vmin=None
        self._vmax=None
        self._xmin = None
        self._xmax = None
        if initDict is not None:
            self.mkFromDict(initDict, set_structures=set_structures)
        elif fname is not None:
            self.mkFromJSON(fname, set_structures=set_structures)
        elif X is not None and Y is not None:
            self._m=order[0]
            self._n=order[1]
            self._scaler = apprentice.Scaler(np.atleast_2d(np.array(X, dtype=np.float64)), a=scale_min, b=scale_max, pnames=pnames)
            self._X   = self._scaler.scaledPoints
            self._dim = self._X[0].shape[0]
            self._Y   = np.array(Y, dtype=np.float64)
            self._trainingsize=len(X)
            if self._dim==1: self.recurrence=apprentice.monomial.recurrence1D
            else           : self.recurrence=apprentice.monomial.recurrence
            self.fit(strategy=strategy)
        else:
            raise Exception("Constructor not called correctly, use either fname, initDict or X and Y")
Beispiel #2
0
    def mkFromDict(self, pdict):
        self._scaler = apprentice.Scaler(pdict["scaler"])
        self._pcoeff = np.array(pdict["pcoeff"])
        self._qcoeff = np.array(pdict["qcoeff"])
        self._iterationinfo = pdict["iterationinfo"]
        self._dim = pdict["dim"]
        self._m = pdict["m"]
        self._n = pdict["n"]
        self._M = pdict["M"]
        self._N = pdict["N"]
        self._fittime = pdict["log"]["fittime"]
        self._strategy = pdict["strategy"]
        self._roboptstrategy = pdict["roboptstrategy"]
        self._localoptsolver = pdict["localoptsolver"]
        self._fitstrategy = pdict["fitstrategy"]
        self._trainingscale = pdict["trainingscale"]
        self._trainingsize = pdict["trainingsize"]
        self._penaltyparam = 0.0

        if (self.strategy == 1 or self.strategy == 2):
            self._ppenaltybin = pdict['chosenppenalty']
            self._qpenaltybin = pdict['chosenqpenalty']

        if (self.strategy == 2):
            self._penaltyparam = pdict['lambda']

        self._struct_p = apprentice.monomialStructure(self.dim, self.m)
        self._struct_q = apprentice.monomialStructure(self.dim, self.n)
Beispiel #3
0
 def mkFromDict(self, RDict):
     self._pcoeff = np.array(RDict["pcoeff"])
     self._qcoeff = np.array(RDict["qcoeff"])
     self._m = int(RDict["m"])
     self._n = int(RDict["n"])
     self._scaler = apprentice.Scaler(RDict["scaler"])
     self._ONB = apprentice.ONB(RDict["ONB"])
    def __init__(self,
                 X=None,
                 Y=None,
                 order=2,
                 fname=None,
                 initDict=None,
                 strategy=2,
                 scale_min=-1,
                 scale_max=1,
                 pnames=None,
                 set_structures=True,
                 computecov=False):
        """
        Multivariate polynomial approximation

        kwargs:
            fname --- to read in previously calculated Pade approximation

            X        --- anchor points
            Y        --- function values
            fname    --- JSON file to read pre-calculated info from
            initDict --- dict to read pre-calculated info from
            order    --- int being the order of the polynomial
        """
        self._vmin = None
        self._vmax = None
        self._xmin = None
        self._xmax = None
        if initDict is not None:
            self.mkFromDict(initDict, set_structures=set_structures)
        elif fname is not None:
            self.mkFromJSON(fname, set_structures=set_structures)
        elif X is not None and Y is not None:
            self._m = order
            self._scaler = apprentice.Scaler(np.atleast_2d(
                np.array(X, dtype=np.float64)),
                                             a=scale_min,
                                             b=scale_max,
                                             pnames=pnames)
            self._X = self._scaler.scaledPoints
            self._dim = self._X[0].shape[0]
            self._Y = np.array(Y, dtype=np.float64)
            self._trainingsize = len(X)
            if self._dim == 1:
                self.recurrence = apprentice.monomial.recurrence1D
            else:
                self.recurrence = apprentice.monomial.recurrence
            self.fit(strategy=strategy, computecov=computecov)
        else:
            raise Exception(
                "Constructor not called correctly, use either fname, initDict or X and Y"
            )
Beispiel #5
0
    def __init__(self,
                 X=None,
                 Y=None,
                 order=(2, 1),
                 fname=None,
                 initDict=None,
                 strategy=2,
                 scale_min=-1,
                 scale_max=1,
                 pnames=None,
                 tol=1e-14,
                 debug=False,
                 validateSVD=True):
        """
        Multivariate rational approximation f(x)_mn =  g(x)_m/h(x)_n

        kwargs:
            fname --- to read in previously calculated Pade approximation

            X     --- anchor points
            Y     --- function values
            tol   --- singular value tolerance
            order --- tuple (m,n) m being the order of the numerator polynomial --- if omitted: auto
            strategy --- 1 is denominator first, 2 is enumerator first reduction
        """
        self._debug = debug
        self._strategy = strategy
        self.tol = tol
        self.validateSVD = validateSVD
        if initDict is not None:
            self.mkFromDict(initDict)
        elif fname is not None:
            self.mkFromJSON(fname)
        elif X is not None and Y is not None:
            self._m = order[0]
            self._n = order[1]
            self._scaler = apprentice.Scaler(np.atleast_2d(
                np.array(X, dtype=np.float64)),
                                             a=scale_min,
                                             b=scale_max,
                                             pnames=pnames)
            self._X = self._scaler.scaledPoints
            self._dim = self._X[0].shape[0]
            self._F = np.diag(Y)
            self._trainingsize = len(X)
            self._ONB = apprentice.ONB(self._X)

            self.fit()
        else:
            raise Exception(
                "Constructor not called correctly, use either fname, initDict or X and Y"
            )
    def __init__(self, *args, **kwargs):
        """
        Multivariate rational approximation p(x)_m/q(x)_n

        args:
            X       --- anchor points
            Y       --- function values

        kwargs:
            order (tuple m,n)
                            --- order of the numerator polynomial --- if omitted: auto 1 used
                            --- order of the denominator polynomial --- if omitted: auto 1 used
            pnames          --- list of parameter names to pass to the scaler to scale
        """
        self._vmin = None
        self._vmax = None
        self._debug = kwargs["debug"] if kwargs.get(
            "debug") is not None else False
        self._ftol = float(
            kwargs["ftol"]) if kwargs.get("ftol") is not None else 1e-9
        self._slsqp_iter = int(
            kwargs["itslsqp"]) if kwargs.get("itslsqp") is not None else 200

        self._m = kwargs["order"][0]
        self._n = kwargs["order"][1]
        import os
        if len(args) == 0:
            pass
        else:
            # Scaler related kwargs
            _pnames = kwargs["pnames"] if kwargs.get(
                "pnames") is not None else None
            _scale_min = kwargs["scalemin"] if kwargs.get(
                "scalemin") is not None else -1
            _scale_max = kwargs["scalemax"] if kwargs.get(
                "scalemax") is not None else 1
            self._scaler = apprentice.Scaler(np.array(args[0],
                                                      dtype=np.float64),
                                             a=_scale_min,
                                             b=_scale_max,
                                             pnames=_pnames)
            self._X = self._scaler.scaledPoints
            self._trainingsize = len(self._X)
            self._dim = self._X[0].shape[0]
            self._Y = np.array(args[1], dtype=np.float64)
            if self._dim == 1:
                self.recurrence = apprentice.monomial.recurrence1D
            else:
                self.recurrence = apprentice.monomial.recurrence
            self.setStructures()
            self.setIPO()
            self.fit()
 def mkFromDict(self, pdict, set_structures=True):
     self._pcoeff = np.array(pdict["pcoeff"])
     self._qcoeff = np.array(pdict["qcoeff"])
     self._m      = int(pdict["m"])
     self._n      = int(pdict["n"])
     self._dim    = int(pdict["dim"])
     self._scaler = apprentice.Scaler(pdict["scaler"])
     if "vmin" in pdict: self._vmin = pdict["vmin"]
     if "vmax" in pdict: self._vmax = pdict["vmax"]
     if self._dim==1: self.recurrence=apprentice.monomial.recurrence1D
     else           : self.recurrence=apprentice.monomial.recurrence
     try:
         self._trainingsize = int(pdict["trainingsize"])
     except:
         pass
     if set_structures: self.setStructures()
def getresults(farr, noisearr, tarr, ts, allsamples):
    import apprentice
    m = 5
    n = 5
    thresholdvalarr = np.array([float(t) for t in tarr])
    thresholdvalarr = np.sort(thresholdvalarr)

    results = {}
    for fnum, fname in enumerate(farr):
        results[fname] = {}
        dim = getdim(fname)
        infile = [
            "results/plots/poledata_corner" + str(dim) + "D.csv",
            "results/plots/poledata_inside" + str(dim) + "D.csv"
        ]

        X_testfc = np.loadtxt(infile[0], delimiter=',')
        X_testin = np.loadtxt(infile[1], delimiter=',')

        # X_testall = np.vstack(X_test,X_testin)

        print(len(X_testfc), len(X_testin))
        minarr, maxarr = getbox(fname)

        s = apprentice.Scaler(np.array(X_testfc, dtype=np.float64),
                              a=minarr,
                              b=maxarr)
        X_testfc = s.scaledPoints

        s = apprentice.Scaler(np.array(X_testin, dtype=np.float64),
                              a=minarr,
                              b=maxarr)
        X_testin = s.scaledPoints

        Y_testfc = np.array(getData(X_testfc, fname, 0))
        maxY_testfc = max(1, abs(np.max(Y_testfc)))

        Y_testin = np.array(getData(X_testin, fname, 0))
        maxY_testin = max(1, abs(np.max(Y_testin)))
        print(fname, maxY_testfc, maxY_testin)

        results[fname]['npoints_face'] = len(Y_testfc)
        results[fname]['npoints_inside'] = len(Y_testin)

        for snum, sample in enumerate(allsamples):
            results[fname][sample] = {}
            for noise in noisearr:
                results[fname][sample][noise] = {}
                noisestr, noisepct = getnoiseinfo(noise)

                resdata = {}
                resdata['papp'] = {}
                resdata['rapp'] = {}
                resdata['rapprd'] = {}
                resdata['rappsip'] = {}

                resdata['papp']['l2all'] = []
                resdata['rapp']['l2all'] = []
                resdata['rapprd']['l2all'] = []
                resdata['rappsip']['l2all'] = []

                for tval in thresholdvalarr:
                    for method in ['papp', 'rapp', 'rapprd', 'rappsip']:
                        resdata[method][str(tval)] = {}
                        resdata[method][str(tval)]['no'] = []
                        resdata[method][str(tval)]['no_face'] = []
                        resdata[method][str(tval)]['no_inside'] = []
                        resdata[method][str(tval)]['l2count'] = []
                        resdata[method][str(tval)]['l2notcount'] = []

                for rnum, run in enumerate(
                    ["exp1", "exp2", "exp3", "exp4", "exp5"]):
                    fndesc = "%s%s_%s_%s" % (fname, noisestr, sample, ts)
                    folder = "results/%s/%s" % (run, fndesc)
                    # print(folder)
                    pq = "p%d_q%d" % (m, n)
                    # print(run, fname,noisestr,sample,m,n)

                    rappsipfile = "%s/outrasip/%s_%s_ts%s.json" % (
                        folder, fndesc, pq, ts)
                    rappfile = "%s/outra/%s_%s_ts%s.json" % (folder, fndesc,
                                                             pq, ts)
                    rapprdfile = "%s/outrard/%s_%s_ts%s.json" % (
                        folder, fndesc, pq, ts)
                    pappfile = "%s/outpa/%s_%s_ts%s.json" % (folder, fndesc,
                                                             pq, ts)

                    if not os.path.exists(rappsipfile):
                        print("rappsipfile %s not found" % (rappsipfile))
                        if (knowmissing(rappsipfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)

                    if not os.path.exists(rappfile):
                        print("rappfile %s not found" % (rappfile))
                        if (knowmissing(rappfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)

                    if not os.path.exists(rapprdfile):
                        print("rappfile %s not found" % (rapprdfile))
                        if (knowmissing(rapprdfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)

                    if not os.path.exists(pappfile):
                        print("rappfile %s not found" % (pappfile))
                        if (knowmissing(pappfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)
                    print(fndesc + " " + run + " Start")

                    papp = PolynomialApproximation(fname=pappfile)
                    try:
                        Y_pred_pappfc = np.array([papp(x) for x in X_testfc])
                        Y_pred_pappin = np.array([papp(x) for x in X_testin])
                    except:
                        Y_pred_pappfc = findpredval(X_testfc, papp)
                        Y_pred_pappin = findpredval(X_testin, papp)

                    rappsip = RationalApproximationSIP(rappsipfile)
                    try:
                        Y_pred_rappsipfc = rappsip.predictOverArray(X_testfc)
                        Y_pred_rappsipin = rappsip.predictOverArray(X_testin)
                    except:
                        Y_pred_rappsipfc = findpredval(X_testfc, rappsip)
                        Y_pred_rappsipin = findpredval(X_testin, rappsip)

                    rapp = RationalApproximationONB(fname=rappfile)
                    try:
                        Y_pred_rappfc = np.array([rapp(x) for x in X_testfc])
                        Y_pred_rappin = np.array([rapp(x) for x in X_testin])
                    except:
                        Y_pred_rappfc = findpredval(X_testfc, rapp)
                        Y_pred_rappin = findpredval(X_testin, rapp)

                    rapprd = RationalApproximationONB(fname=rapprdfile)
                    try:
                        Y_pred_rapprdfc = np.array(
                            [rapprd(x) for x in X_testfc])
                        Y_pred_rapprdin = np.array(
                            [rapprd(x) for x in X_testin])
                    except:
                        Y_pred_rapprdfc = findpredval(X_testfc, rapprd)
                        Y_pred_rapprdin = findpredval(X_testin, rapprd)

                    print(fndesc + " " + run + " Done ")
                    sys.stdout.flush()

                    Y_testall = np.concatenate((Y_testfc, Y_testin), axis=None)

                    Y_pred_pappall = np.concatenate(
                        (Y_pred_pappfc, Y_pred_pappin), axis=None)
                    Y_pred_rappsipall = np.concatenate(
                        (Y_pred_rappsipfc, Y_pred_rappsipin), axis=None)
                    Y_pred_rappall = np.concatenate(
                        (Y_pred_rappfc, Y_pred_rappin), axis=None)
                    Y_pred_rapprdall = np.concatenate(
                        (Y_pred_rapprdfc, Y_pred_rapprdin), axis=None)

                    l2allrapp = np.sum((Y_pred_rappall - Y_testall)**2)
                    l2allrapprd = np.sum((Y_pred_rapprdall - Y_testall)**2)
                    l2allrappsip = np.sum((Y_pred_rappsipall - Y_testall)**2)
                    l2allpapp = np.sum((Y_pred_pappall - Y_testall)**2)
                    # print(l2allrapp,l2allrapprd,l2allrappsip)

                    resdata['rapp']['l2all'].append(np.sqrt(l2allrapp))
                    resdata['rapprd']['l2all'].append(np.sqrt(l2allrapprd))
                    resdata['rappsip']['l2all'].append(np.sqrt(l2allrappsip))
                    resdata['papp']['l2all'].append(np.sqrt(l2allpapp))

                    for tnum, tval in enumerate(thresholdvalarr):
                        for method in ['papp', 'rapp', 'rapprd', 'rappsip']:
                            resdata[method][str(tval)]['no_face'].append(0)
                            resdata[method][str(tval)]['no_inside'].append(0)
                            resdata[method][str(tval)]['no'].append(0)
                            resdata[method][str(tval)]['l2count'].append(0.)
                            resdata[method][str(tval)]['l2notcount'].append(0.)

                    for num, yt in enumerate(Y_testfc):
                        for method, pred in zip(
                            ['papp', 'rapp', 'rapprd', 'rappsip'], [
                                Y_pred_pappfc, Y_pred_rappfc, Y_pred_rapprdfc,
                                Y_pred_rappsipfc
                            ]):
                            yp = pred[num]
                            for tnum, tval in enumerate(thresholdvalarr):
                                if (abs(yp) / maxY_testfc > tval):
                                    resdata[method][str(tval)]['no'][rnum] += 1
                                    resdata[method][str(
                                        tval)]['no_face'][rnum] += 1
                                    resdata[method][str(
                                        tval)]['l2count'][rnum] += (yp - yt)**2

                    for num, yt in enumerate(Y_testin):
                        for method, pred in zip(
                            ['papp', 'rapp', 'rapprd', 'rappsip'], [
                                Y_pred_pappin, Y_pred_rappin, Y_pred_rapprdin,
                                Y_pred_rappsipin
                            ]):
                            yp = pred[num]
                            for tnum, tval in enumerate(thresholdvalarr):
                                if (abs(yp) / maxY_testin > tval):
                                    resdata[method][str(tval)]['no'][rnum] += 1
                                    resdata[method][str(
                                        tval)]['no_inside'][rnum] += 1
                                    resdata[method][str(
                                        tval)]['l2count'][rnum] += (yp - yt)**2

                    for tnum, tval in enumerate(thresholdvalarr):
                        for method, l2all in zip(
                            ['papp', 'rapp', 'rapprd', 'rappsip'],
                            [l2allpapp, l2allrapp, l2allrapprd, l2allrappsip]):
                            l2count = resdata[method][str(
                                tval)]['l2count'][rnum]
                            resdata[method][str(
                                tval)]['l2notcount'][rnum] = np.sqrt(l2all -
                                                                     l2count)
                            resdata[method][str(
                                tval)]['l2count'][rnum] = np.sqrt(l2count)

                    if (sample == "sg"):
                        break
                missingmean = -1
                for method in ['papp', 'rapp', 'rapprd', 'rappsip']:
                    l2allarr = resdata[method]['l2all']
                    results[fname][sample][noise][method] = {}
                    if (len(l2allarr) != 0):
                        results[fname][sample][noise][method]['l2all'] = float(
                            getstats(l2allarr, 'amean'))
                        results[fname][sample][noise][method][
                            'l2allgm'] = float(getstats(l2allarr, 'gmean'))
                        results[fname][sample][noise][method][
                            'l2allmed'] = float(getstats(l2allarr, 'median'))
                        results[fname][sample][noise][method][
                            'l2allra'] = float(getstats(l2allarr, 'range'))
                        results[fname][sample][noise][method][
                            'l2allsd'] = float(np.std(l2allarr))
                    else:
                        results[fname][sample][noise][method][
                            'l2all'] = missingmean
                        results[fname][sample][noise][method][
                            'l2allgm'] = missingmean
                        results[fname][sample][noise][method][
                            'l2allmed'] = missingmean
                        results[fname][sample][noise][method][
                            'l2allra'] = missingmean
                        results[fname][sample][noise][method]['l2allsd'] = 0

                for tval in thresholdvalarr:
                    for method in ['papp', 'rapp', 'rapprd', 'rappsip']:
                        results[fname][sample][noise][method][str(tval)] = {}
                        for key in [
                                'l2notcount', 'l2count', 'no', 'no_face',
                                'no_inside'
                        ]:

                            arr = resdata[method][str(tval)][key]
                            if (len(arr) != 0):
                                results[fname][sample][noise][method][str(
                                    tval)][key] = float(getstats(arr, 'amean'))
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'gm'] = float(
                                        getstats(arr, 'gmean'))
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'med'] = float(
                                        getstats(arr, 'median'))
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'ra'] = float(
                                        getstats(arr, 'range'))
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'sd'] = float(np.std(arr))
                            else:
                                results[fname][sample][noise][method][str(
                                    tval)][key] = missingmean
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'gm'] = missingmean
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'med'] = missingmean
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'ra'] = missingmean
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'sd'] = 0

        print("done with fn: %s" % (fname))

    return results
Beispiel #9
0
def plotminimizeranderror(usejson=0):
    def getData(X_train, fn, noisepct):
        """
        TODO use eval or something to make this less noisy
        """
        from apprentice import testData
        if fn == "f17":
            Y_train = [testData.f17(x) for x in X_train]
        else:
            raise Exception("function {} not implemented, exiting".format(fn))

        return Y_train

    import json
    import apprentice
    from apprentice import RationalApproximationSIP
    folder = "results"
    samplearr = ['lhs', 'splitlhs', 'sg']
    noise = "0"
    fname = "f17"
    noisestr, noisepct = getnoiseinfo(noise)
    dim = 3
    infile = [
        "results/plots/poledata_corner" + str(dim) + "D.csv",
        "results/plots/poledata_inside" + str(dim) + "D.csv"
    ]

    if (usejson == 0):
        minarr = [80, 5, 90]
        maxarr = [100, 10, 93]
        X_testfc = np.loadtxt(infile[0], delimiter=',')
        X_testin = np.loadtxt(infile[1], delimiter=',')
        s = apprentice.Scaler(np.array(X_testfc, dtype=np.float64),
                              a=minarr,
                              b=maxarr)
        X_testfc = s.scaledPoints

        s = apprentice.Scaler(np.array(X_testin, dtype=np.float64),
                              a=minarr,
                              b=maxarr)
        X_testin = s.scaledPoints

        Y_testfc = np.array(getData(X_testfc, fname, 0))
        Y_testin = np.array(getData(X_testin, fname, 0))

        Y_testall = np.concatenate((Y_testfc, Y_testin), axis=None)

        maxiterexp = [0, 0, 0]
        for snum, sample in enumerate(samplearr):
            maxiter = 0
            for exp in ['exp1', 'exp2', 'exp3', 'exp4', 'exp5']:
                file = "%s/%s/%s%s_%s_2x/outrasip/%s%s_%s_2x_p5_q5_ts2x.json" % (
                    folder, exp, fname, noisestr, sample, fname, noisestr,
                    sample)
                if not os.path.exists(file):
                    print("rappsipfile %s not found" % (file))
                    exit(1)
                if file:
                    with open(file, 'r') as fn:
                        datastore = json.load(fn)
                if len(datastore['iterationinfo']) > maxiter:
                    maxiter = len(datastore['iterationinfo'])
                    maxiterexp[snum] = exp
                if (sample == 'sg'):
                    break

        data = {}
        for snum, sample in enumerate(samplearr):
            data[sample] = {}
            exp = maxiterexp[snum]

            file = "%s/%s/%s%s_%s_2x/outrasip/%s%s_%s_2x_p5_q5_ts2x.json" % (
                folder, exp, fname, noisestr, sample, fname, noisestr, sample)
            if not os.path.exists(file):
                print("rappsipfile %s not found" % (file))
                exit(1)
            if file:
                with open(file, 'r') as fn:
                    datastore = json.load(fn)

            data[sample]['x'] = [
                i for i in range(len(datastore['iterationinfo']))
            ]
            data[sample]['minimizer'] = []
            data[sample]['error'] = []
            print('starting error calc for %s' % (sample))
            for inum, iter in enumerate(datastore['iterationinfo']):
                print('staring iter %d' % (inum))
                data[sample]['minimizer'].append(
                    iter['robOptInfo']['info'][0]['robustObj'])

                pcoeff = iter['pcoeff']
                qcoeff = iter['qcoeff']

                if file:
                    with open(file, 'r') as fn:
                        tempds = json.load(fn)
                tempds['pcoeff'] = pcoeff
                tempds['qcoeff'] = qcoeff

                rappsip = RationalApproximationSIP(tempds)

                Y_pred_rappsipfc = rappsip.predictOverArray(X_testfc)
                Y_pred_rappsipin = rappsip.predictOverArray(X_testin)

                Y_pred_rappsipall = np.concatenate(
                    (Y_pred_rappsipfc, Y_pred_rappsipin), axis=None)
                l2allrappsip = np.sum((Y_pred_rappsipall - Y_testall)**2)
                data[sample]['error'].append(np.sqrt(l2allrappsip))
                print('ending iter %d' % (inum))
        print(data)
        import json
        with open('results/plots/Jminimizeranderror.json', "w") as f:
            json.dump(data, f, indent=4, sort_keys=True)

    elif (usejson == 1):
        outfilejson = "results/plots/Jminimizeranderror.json"

        import matplotlib as mpl
        import matplotlib.pyplot as plt
        import matplotlib.text as text
        mpl.rc('text', usetex=True)
        mpl.rc('font', family='serif', size=12)
        mpl.rc('font', weight='bold')
        mpl.rcParams['text.latex.preamble'] = [
            r'\usepackage{sfmath} \boldmath'
        ]
        # mpl.style.use("ggplot")

        f, axarr = plt.subplots(2,
                                1,
                                sharex=True,
                                sharey=False,
                                figsize=(15, 8))
        f.subplots_adjust(hspace=0)
        f.subplots_adjust(wspace=0)

        style = ['b--', 'r-.', 'g-']
        linewidth = [1, 1, 2]
        labelarr = ['$LHS$', '$\\mathrm{d-LHD}$', '$SG$']
        marker = ['x', '*', 'o']

        index = 0
        if outfilejson:
            with open(outfilejson, 'r') as fn:
                data = json.load(fn)
        for snum, sample in enumerate(samplearr):
            x = data[sample]['x']
            y = data[sample]['minimizer']
            x.insert(0, -1)
            y.insert(0, -2.5)
            x = np.array(x) + 1
            axarr[index].plot(x,
                              y,
                              style[snum],
                              label=labelarr[snum],
                              lineWidth=linewidth[snum],
                              markevery=(1, 1),
                              marker=marker[snum])
        axarr[index].axhline(0, linestyle=":", linewidth='1', color='k')
        axarr[index].legend(fontsize=18, frameon=False)
        axarr[index].set_ylabel('$min\\quad q(x)$', fontsize=24)
        axarr[index].tick_params(labelsize=20)

        index = 1
        if outfilejson:
            with open(outfilejson, 'r') as fn:
                data = json.load(fn)
        for snum, sample in enumerate(samplearr):
            x = data[sample]['x']
            y = data[sample]['error']
            x.insert(0, -1)
            y.insert(0, 10**2)
            x = np.array(x) + 1
            axarr[index].plot(x,
                              y,
                              style[snum],
                              label=labelarr[snum],
                              lineWidth=linewidth[snum],
                              markevery=(1, 1),
                              marker=marker[snum])
            if (sample == 'splitlhs'):
                min = np.min(y)
        axarr[index].axhline(min, linestyle=":", linewidth='1', color='k')
        axarr[index].set_yscale('log')
        axarr[index].legend(fontsize=18, frameon=False)
        axarr[index].set_xlabel('$\\mathrm{Iteration\\ number}$', fontsize=24)
        axarr[index].set_ylabel('$\\Delta_r$', fontsize=24)
        axarr[index].tick_params(labelsize=20)

        # plt.yscale("log")

        plt.savefig("../../log/minimizererror.pdf", bbox_inches='tight')
Beispiel #10
0
    def __init__(self, *args, **kwargs):
        """
        Multivariate rational approximation p(x)_m/q(x)_n

        args:
            fname   --- to read in previously calculated Pade approximation stored as the JSON file

            dict    --- to read in previously calculated Pade approximation stored as the dictionary object obtained after parsing the JSON file

            X       --- anchor points
            Y       --- function values

        kwargs:
            m               --- order of the numerator polynomial --- if omitted: auto 1 used
            n               --- order of the denominator polynomial --- if omitted: auto 1 used
            trainingscale   --- size of training data to use --- if omitted: auto 1x used
                                .5x is the half the numbner of coeffs in numerator and denominator,
                                1x is the number of coeffs in numerator and denominator,
                                2x is twice the number of coeffecients,
                                Cp is 100% of the data
            box             --- box (2D array of dim X [min,max]) within which to perform the approximation --- if omitted: auto dim X [-1, 1] used
            pnames          --- list of parameter names to pass to the scaler to scale
            scalemin        --- scalar or list of shape = dimension of minimum scale value for X --- if omitted: auto -1 used on all dimensions
            scalemax        --- scalar or list of shape = dimension of maximum scale value for X --- if omitted: auto 1 used on all dimensions
            strategy        --- strategy to use --- if omitted: auto 0 used
                                0: min ||f*q(x)_n - p(x)_m||^2_2 sub. to q(x)_n >=1
                                1: min ||f*q(x)_n - p(x)_m||^2_2 sub. to q(x)_n >=1 and some p and/or q coeffecients set to 0
                                2: min ||f*q(x)_n - p(x)_m||^2_2 + lambda*||c_pq||_1 sub. to q(x)_n >=1
            fitstrategy     --- strategy to perform the fitting (least squares and sparse) --- if omitted: auto 'scipy' used
                                scipy: SLSQP optimization solver in scipy (scipy.SLSQP)
                                filter: filterSQP solver through pyomo (REQUIRED: pyomo and filter executable in PATH)
            roboptstrategy  --- strategy to optimize robust objective --- if omitted: auto 'ms' used
                                ss: single start algorithm using scipy.L-BFGS-B local optimizer
                                ms: multistart algorithm (with 10 restarts at random points from the box) using scipy.L-BFGS-B local optimizer
                                msbarontime: multistart algorithm using scipy.L-BFGS-B local optimizer that restarts for the amount of time baron would run for the no. of nonlinearities
                                baron: baron through pyomo (REQUIRED: Pyomo and baron executable in PATH)
                                solve: solve q(x) at random points in the box of X
                                ss_ms_so_ba: runs single start, multistart, baron and solve, and logs the different objective function values obtained
                                mlsl: multi-level single-linkage multistart algorithm from nlopt using nlopt.LD_LBFGS local optimizer
            localoptsolver  --- strategy to perform local optimization in robust optimization with single start and multistart approaches --- if omitted: auto 'scipy' used
                                scipy: L-BFGS-B optimization solver in scipy (scipy.L-BFGS-B)
                                filter: filterSQP solver through pyomo (REQUIRED: pyomo and filter executable in PATH)
            penaltyparam    --- lambda to use for strategy 2 --- if omitted: auto 0.1 used
            penaltybin      --- penalty binary array for numberator and denomintor of the bits to keep active in strategy 1 and put in penalty term for activity 2
                                represented in a 2D array of shape(2,(m/n)+1) where for each numberator and denominator, the bits represent penalized coeffecient degrees and constant (1: not peanlized, 0 penalized)
                                required for strategy 1 and 2

        """
        import os
        if len(args) == 0:
            pass
        else:
            if type(args[0]) == dict:
                self.mkFromDict(args[0])
            elif type(args[0]) == str:
                self.mkFromJSON(args[0])
            else:
                # Scaler related kwargs
                _pnames = kwargs["pnames"] if kwargs.get(
                    "pnames") is not None else None
                _scale_min = kwargs["scalemin"] if kwargs.get(
                    "scalemin") is not None else -1
                _scale_max = kwargs["scalemax"] if kwargs.get(
                    "scalemax") is not None else 1
                self._scaler = apprentice.Scaler(np.array(args[0],
                                                          dtype=np.float64),
                                                 a=_scale_min,
                                                 b=_scale_max,
                                                 pnames=_pnames)
                self._X = self._scaler.scaledPoints
                # ONB in scaled world
                self._ONB = apprentice.ONB(self._X)
                self._dim = self._X[0].shape[0]
                self._Y = np.array(args[1], dtype=np.float64)
                self.mkFromData(kwargs=kwargs)
Beispiel #11
0
def ploterrorbars(fff, baseline=13.5, usejson=0):
    import matplotlib as mpl
    import json
    import apprentice
    if not os.path.exists('results/plots/'):
        os.makedirs('results/plots/', exist_ok=True)

    # mpl.use('pgf')
    # pgf_with_custom_preamble = {
    #     "text.usetex": True,    # use inline math for ticks
    #     "pgf.rcfonts": False,   # don't setup fonts from rc parameters
    #     "pgf.preamble": [
    #         "\\usepackage{amsmath}",         # load additional packages
    #     ]
    # }
    # mpl.rcParams.update(pgf_with_custom_preamble)

    # fff = getfarr()
    # pqqq = ['p4_q3','p2_q3','p3_q3','p3_q7','p2_q7','p3_q6','p2_q3','p3_q3']
    width = 0.15
    # import matplotlib.pyplot as plt
    # fig, ax = plt.subplots(1,2,figsize=(15,10),sharey=True)
    data = {}
    noiselevels = ['0', '10-6', '10-2']
    # noiselevels = ['0']
    # allsamples = ['mc','lhs','so','sg']
    allsamples = ['lhs', 'splitlhs', 'sg']
    # allsamples = ['mc','lhs']
    # allsamples = ['sg']
    if (usejson == 0):
        for snum, sample in enumerate(allsamples):
            data[sample] = {}
            # first = sample
            for nnum, noise in enumerate(noiselevels):
                data[sample][noise] = {}

                second = noise

                noisestr, noisepct = getnoiseinfo(noise)

                for fnum, fname in enumerate(fff):
                    data[sample][noise][fname] = {}

                    # IF USING TESTFILE

                    # testfile = "../benchmarkdata/"+fname+"_test.txt"
                    # # testfile = "../benchmarkdata/"+fname+".txt"
                    # print(testfile)
                    # bottom_or_all = all
                    # try:
                    #     X, Y = readData(testfile)
                    # except:
                    #     DATA = tools.readH5(testfile, [0])
                    #     X, Y= DATA[0]
                    #
                    # if(bottom_or_all == "bottom"):
                    #     testset = [i for i in range(trainingsize,len(X_test))]
                    #     X_test = X[testset]
                    #     Y_test = Y[testset]
                    # else:
                    #     X_test = X
                    #     Y_test = Y

                    # IF USING POLEDATA FILES
                    dim = getdim(fname)
                    infile = "results/plots/poledata_corner" + str(
                        dim) + "D.csv"
                    X_test_1 = np.loadtxt(infile, delimiter=',')
                    infile = "results/plots/poledata_inside" + str(
                        dim) + "D.csv"
                    X_test_2 = np.loadtxt(infile, delimiter=',')
                    X_test = np.vstack([X_test_1, X_test_2])
                    minarr, maxarr = getbox(fname)
                    s = apprentice.Scaler(np.array(X_test, dtype=np.float64),
                                          a=minarr,
                                          b=maxarr)
                    X_test = s.scaledPoints
                    # print(np.shape(X_test_1),np.shape(X_test_2),np.shape(X_test))
                    Y_test = np.array(getData(X_test, fname, 0))

                    # print(np.shape(np.array(Y_test)))
                    # exit(1)

                    ts = "2x"

                    datapa = []
                    datara = []
                    datarard = []
                    datarasip = []
                    for run in ["exp1", "exp2", "exp3", "exp4", "exp5"]:
                        # for run in ["./"]:
                        fndesc = "%s%s_%s_%s" % (fname, noisestr, sample, ts)
                        folder = "results/%s/%s" % (run, fndesc)
                        m = 5
                        n = 5
                        pq = "p%d_q%d" % (m, n)
                        print(run, fname, noisestr, sample, m, n)

                        rappsipfile = "%s/outrasip/%s_%s_ts%s.json" % (
                            folder, fndesc, pq, ts)
                        rappfile = "%s/outra/%s_%s_ts%s.json" % (
                            folder, fndesc, pq, ts)
                        rapprdfile = "%s/outrard/%s_%s_ts%s.json" % (
                            folder, fndesc, pq, ts)
                        pappfile = "%s/outpa/%s_%s_ts%s.json" % (
                            folder, fndesc, pq, ts)
                        if not os.path.exists(rappsipfile):
                            print("rappsipfile %s not found" % (rappsipfile))
                            if (knowmissing(rappsipfile)):
                                if (sample == "sg"):
                                    break
                                continue
                            exit(1)

                        if not os.path.exists(rappfile):
                            print("rappfile %s not found" % (rappfile))
                            if (knowmissing(rappfile)):
                                if (sample == "sg"):
                                    break
                                continue
                            exit(1)

                        if not os.path.exists(rapprdfile):
                            print("rappfile %s not found" % (rapprdfile))
                            if (knowmissing(rapprdfile)):
                                if (sample == "sg"):
                                    break
                                continue
                            exit(1)

                        if not os.path.exists(pappfile):
                            print("pappfile %s not found" % (pappfile))
                            if (knowmissing(pappfile)):
                                if (sample == "sg"):
                                    break
                                continue
                            exit(1)

                        rappsip = RationalApproximationSIP(rappsipfile)
                        Y_pred_rappsip = rappsip.predictOverArray(X_test)

                        rapp = RationalApproximationONB(fname=rappfile)
                        Y_pred_rapp = np.array([rapp(x) for x in X_test])

                        rapprd = RationalApproximationONB(fname=rapprdfile)
                        Y_pred_rapprd = np.array([rapprd(x) for x in X_test])

                        papp = PolynomialApproximation(fname=pappfile)
                        Y_pred_papp = np.array([papp(x) for x in X_test])

                        datapa.append(
                            np.log10(np.sqrt(np.sum(
                                (Y_pred_papp - Y_test)**2))))
                        datara.append(
                            np.log10(np.sqrt(np.sum(
                                (Y_pred_rapp - Y_test)**2))))
                        datarard.append(
                            np.log10(
                                np.sqrt(np.sum((Y_pred_rapprd - Y_test)**2))))
                        datarasip.append(
                            np.log10(
                                np.sqrt(np.sum((Y_pred_rappsip - Y_test)**2))))

                        if (sample == "sg"):
                            break

                    missingmean = -15
                    if (len(datapa) == 0):
                        data[sample][noise][fname]['pamean'] = missingmean
                        data[sample][noise][fname]['pasd'] = 0
                    else:
                        data[sample][noise][fname]['pamean'] = np.average(
                            datapa)
                        data[sample][noise][fname]['pasd'] = np.std(datapa)

                    if (len(datara) == 0):
                        data[sample][noise][fname]['ramean'] = missingmean
                        data[sample][noise][fname]['rasd'] = 0
                    else:
                        data[sample][noise][fname]['ramean'] = np.average(
                            datara)
                        data[sample][noise][fname]['rasd'] = np.std(datara)
                    if (len(datarard) == 0):
                        data[sample][noise][fname]['rardmean'] = missingmean
                        data[sample][noise][fname]['rardsd'] = 0
                    else:
                        data[sample][noise][fname]['rardmean'] = np.average(
                            datarard)
                        data[sample][noise][fname]['rardsd'] = np.std(datarard)
                    if (len(datarasip) == 0):
                        data[sample][noise][fname]['rasipmean'] = missingmean
                        data[sample][noise][fname]['rasipsd'] = 0
                    else:
                        data[sample][noise][fname]['rasipmean'] = np.average(
                            datarasip)
                        data[sample][noise][fname]['rasipsd'] = np.std(
                            datarasip)

                    if (sample == "sg"):
                        data[sample][noise][fname]['pasd'] = 0
                        data[sample][noise][fname]['rasd'] = 0
                        data[sample][noise][fname]['rardsd'] = 0
                        data[sample][noise][fname]['rasipsd'] = 0

        outfile111 = "results/plots/Jerrors_" + fff[0] + ".json"
        import json
        with open(outfile111, "w") as f:
            json.dump(data, f, indent=4, sort_keys=True)
        exit(0)
    # else:
    # import json
    # outfile111 = "results/plots/Jerrors.json"
    # if outfile111:
    #     with open(outfile111, 'r') as fn:
    #         data = json.load(fn)

    ecolor = 'black'
    # if(plottype == 'persample' or plottype == 'pernoiselevel'):
    # if(plottype == 'persample'):
    # minval = np.Infinity
    methodarr = ['ra', 'rard', 'rasip', 'pa']
    import matplotlib.pyplot as plt
    ffffff = plt.figure(0, figsize=(25, 20))
    # totalrow = 5
    # totalcol = 4
    totalrow = 2
    totalcol = 2
    baseline = baseline
    # color = ['#900C3F','#C70039','#FF5733','#FFC300']
    color = ['#FFC300', '#FF5733', '#900C3F']
    width = 0.2
    ecolor = 'black'
    plt.rc('ytick', labelsize=14)
    plt.rc('xtick', labelsize=14)
    props = dict(boxstyle='square', facecolor='wheat', alpha=0.5)
    X111 = np.arange(len(noiselevels) * len(methodarr))
    # color100 = ['#FFC300','#FF5733','#900C3F']
    # color1k = ['yellow','wheat','r']
    axarray = []

    for fnum, fname in enumerate(fff):
        import json
        outfile111 = "results/plots/Jerrors_" + fname + ".json"
        if outfile111:
            with open(outfile111, 'r') as fn:
                data = json.load(fn)
        plotd = {}
        for snum, sample in enumerate(allsamples):
            plotd[sample] = {}
            plotd[sample]['mean'] = []
            plotd[sample]['sd'] = []
            for nnum, noise in enumerate(noiselevels):
                for method in methodarr:
                    meankey = method + 'mean'
                    sdkey = method + 'sd'

                    plotd[sample]['mean'].append(
                        data[sample][noise][fname][meankey])
                    plotd[sample]['sd'].append(
                        data[sample][noise][fname][sdkey])
        if (len(axarray) > 0):
            ax = plt.subplot2grid((totalrow, totalcol),
                                  (int(fnum / totalcol), int(fnum % totalcol)),
                                  sharex=axarray[0],
                                  sharey=axarray[0])
            axarray.append(ax)
        else:
            ax = plt.subplot2grid((totalrow, totalcol),
                                  (int(fnum / totalcol), int(fnum % totalcol)))
            axarray.append(ax)
        ax.set_xlim(-.3, 11.7)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        plt.axvspan(-.3, 3.7, alpha=0.5, color='pink')
        plt.axvspan(3.7, 7.7, alpha=0.5, color='lightgrey')
        plt.axvspan(7.7, 11.7, alpha=0.5, color='cyan')
        plt.title(fname)
        # plt.text(1,3.35, "$\\epsilon = 0$", fontsize=10)
        # plt.text(4,3.35, "$\\epsilon = 10^{-6}$", fontsize=10)
        # plt.text(7,3.35, "$\\epsilon = 10^{-2}$", fontsize=10)
        labels = [
            'Latin hypercube sampling', 'Split latin hypercube sampling',
            'Sparse grids'
        ]
        legendarr = [
            '$\\epsilon=0$', '$\\epsilon=10^{-6}$', '$\\epsilon=10^{-2}$'
        ]
        # plt.tight_layout()
        for snum, sample in enumerate(allsamples):
            if (sample == 'sg'):
                ax.bar(X111 + snum * width,
                       np.array(plotd[sample]['mean']) + baseline,
                       width,
                       color=color[snum],
                       label=labels[snum])
            else:
                ax.bar(X111 + snum * width,
                       np.array(plotd[sample]['mean']) + baseline,
                       width,
                       color=color[snum],
                       yerr=np.array(plotd[sample]['sd']),
                       align='center',
                       ecolor=ecolor,
                       capsize=3,
                       label=labels[snum])

        if (fnum == 0):
            l1 = ffffff.legend(loc='upper center', ncol=3, fontsize=20)
        l2 = ffffff.legend(legendarr,
                           loc='upper center',
                           ncol=4,
                           bbox_to_anchor=(0.435, 0.83),
                           fontsize=20,
                           borderaxespad=0.,
                           shadow=False)
        # ax.label_outer()
        # if(fnum==0):
        #     l222 = ffffff.legend(loc='upper center', ncol=4,bbox_to_anchor=(0.5, 0.92), fontsize = 20,borderaxespad=0.,shadow=False)

        ax.set_xticks(X111 + (len(allsamples) - 1) * width / 2)
        xlab = [
            'Algorithm \\ref{ALG:MVVandQR} w/o DR',
            'Algorithm \\ref{ALG:MVVandQR}', 'Algorithm \\ref{A:Polyak}',
            'Poly. Approx.', 'Algorithm \\ref{ALG:MVVandQR} w/o DR',
            'Algorithm \\ref{ALG:MVVandQR}', 'Algorithm \\ref{A:Polyak}',
            'Poly. Approx.', 'Algorithm \\ref{ALG:MVVandQR} w/o DR',
            'Algorithm \\ref{ALG:MVVandQR}', 'Algorithm \\ref{A:Polyak}',
            'Poly. Approx.'
        ]
        methodlabel = ['$r_1$', '$r_2$', '$r_3$', '$r_4$']
        xlab1 = np.concatenate((methodlabel, methodlabel, methodlabel),
                               axis=None)
        ax.set_xticklabels(xlab1, fontsize=22)
        # ax.set_xlabel("Approach",fontsize=22)
        ax.set_ylabel("$\\log_{10}\\left[\\Delta_r\\right]$", fontsize=22)
        # ax.label_outer()
    # ffffff.text(0.08, 0.5, "$\\log_{10}\\left[\\Delta_r\\right]$", fontsize=22,va='center', rotation='vertical')

    # plt.show()
    plt.gca().yaxis.set_major_formatter(
        mtick.FuncFormatter(lambda x, _: x - baseline))
    # plt.tight_layout()
    # plt.savefig("../../log/errors.png", bbox_extra_artists=(l1,l111,), bbox_inches='tight')
    ffffff.savefig('../../log/errors.png',
                   bbox_extra_artists=(
                       l1,
                       l2,
                   ),
                   bbox_inches='tight')
    # plt.savefig("../../log/errors.png")
    plt.clf()
    plt.close('all')

    exit(0)
    for snum, sample in enumerate(allsamples):
        import matplotlib.pyplot as plt
        plt.rc('ytick', labelsize=14)
        fig, axarr = plt.subplots(3, 1, sharey=True, figsize=(21, 20))
        for nnum, noise in enumerate(noiselevels):
            pa = []
            ra = []
            rard = []
            rasip = []
            paerror = []
            raerror = []
            rarderror = []
            rasiperror = []
            for fnum, fname in enumerate(fff):
                pa.append(data[sample][noise][fname]['pamean'])
                paerror.append(data[sample][noise][fname]['pasd'])

                ra.append(data[sample][noise][fname]['ramean'])
                raerror.append(data[sample][noise][fname]['rasd'])

                rard.append(data[sample][noise][fname]['rardmean'])
                rarderror.append(data[sample][noise][fname]['rardsd'])

                rasip.append(data[sample][noise][fname]['rasipmean'])
                rasiperror.append(data[sample][noise][fname]['rasipsd'])

            p1 = axarr[nnum].bar(X111,
                                 np.array(pa) + baseline,
                                 width,
                                 color=color[0],
                                 yerr=np.array(paerror),
                                 align='center',
                                 ecolor=ecolor,
                                 capsize=3)
            p2 = axarr[nnum].bar(X111 + width,
                                 np.array(ra) + baseline,
                                 width,
                                 color=color[1],
                                 yerr=np.array(raerror),
                                 align='center',
                                 ecolor=ecolor,
                                 capsize=3)
            p3 = axarr[nnum].bar(X111 + 2 * width,
                                 np.array(rard) + baseline,
                                 width,
                                 color=color[2],
                                 yerr=np.array(rarderror),
                                 align='center',
                                 ecolor=ecolor,
                                 capsize=3)
            p4 = axarr[nnum].bar(X111 + 3 * width,
                                 np.array(rasip) + baseline,
                                 width,
                                 color=color[3],
                                 yerr=np.array(rasiperror),
                                 align='center',
                                 alpha=0.5,
                                 ecolor=ecolor,
                                 capsize=3)
            axarr[nnum].legend(
                (p1[0], p2[0], p3[0], p4[0]),
                ('Polynomial Approx. ', 'Algorithm \\ref{ALG:MVVandQR}',
                 'Algorithm \\ref{ALG:MVVandQR} w/ DR',
                 'Algorithm \\ref{A:Polyak}'),
                loc='upper right',
                fontsize=15)

        for ax in axarr.flat:
            ax.set_xticks(X111 + 3 * width / 2)
            xlab = []
            for f in fff:
                print(f)
                # xlab.append("\\ref{fn:%s}"%(f))
                xlab.append("%s" % (f))
            ax.set_xticklabels(xlab, fontsize=14)
            ax.set_ylabel('$log_{10}(\\Delta_r)$', fontsize=17)
            ax.label_outer()

        plt.gca().yaxis.set_major_formatter(
            mtick.FuncFormatter(lambda x, _: x - baseline))
        plt.tight_layout()
        print(xlab)
        # plt.show()
        # plt.savefig("plots/Perrorbars.pgf", bbox_inches="tight")
        # outfile111 = "results/plots/Perrorbars_for_%s.pdf"%(sample)
        outfile111 = "results/plots/Perrorbars_for_%s.pgf" % (sample)
        plt.savefig(outfile111, bbox_inches="tight")
        plt.clf()
        plt.close('all')

        # elif(plottype == 'pernoiselevel'):

        # FOR FUTURE
        # approxqqq = ["Polynomial Approximation", 'RA (linear algebra) without degree reduction', 'RA (linear algebra) with degree reduction', 'Pole-free RA']
        # for nnum,noise in enumerate(noiselevels):
        #     import matplotlib.pyplot as plt
        #     plt.rc('ytick',labelsize=14)
        #     fig, axarr = plt.subplots(4, 1, sharey=True,figsize=(21,20))
        #     for anum,approx in enumerate(["pa","ra","rard","rasip"]):
        #         barobj = {}
        #         for snum,sample in enumerate(allsamples):
        #             mean = []
        #             sd = []
        #             for fnum,fname in enumerate(fff):
        #                 mean.append(data[sample][noise][fname][approx+"mean"])
        #                 sd.append(data[sample][noise][fname][approx+"sd"])
        #             barobj[snum] = axarr[anum].bar(X111+snum*width, np.array(mean)+baseline, width,color=color[snum], yerr=np.array(sd),align='center',  ecolor=ecolor, capsize=3)
        #
        #         axarr[anum].legend((barobj[0][0],barobj[1][0],barobj[2][0],barobj[3][0]),('Uniform Random','Latin Hypercube','Sobol Sequence', 'Sparse Grids'),loc = 'upper right',fontsize = 15)
        #         axarr[anum].set_title(" approx = "+approxqqq[anum],fontsize = 15)

        for nnum, noise in enumerate(noiselevels):
            import matplotlib.pyplot as plt
            plt.rc('ytick', labelsize=14)
            fig, axarr = plt.subplots(4, 1, sharey=True, figsize=(21, 20))
            for anum, approx in enumerate(["pa", "ra", "rard", "rasip"]):
                for snum, sample in enumerate(allsamples):
                    barobj = []
                    mean = []
                    sd = []
                    for fnum, fname in enumerate(fff):
                        mean.append(data[sample][noise][fname][approx +
                                                               "mean"])
                        sd.append(data[sample][noise][fname][approx + "sd"])
                    barobj.append(axarr[anum].bar(X111 + snum * width,
                                                  np.array(mean) + baseline,
                                                  width,
                                                  color=color[snum],
                                                  yerr=np.array(sd),
                                                  align='center',
                                                  ecolor=ecolor,
                                                  capsize=3))
                # axarr[anum].legend(barobj,('Polynomial Approx. ', 'Algorithm \\ref{ALG:MVVandQR}','Algorithm \\ref{A:Polyak}'),loc = 'upper right',fontsize = 15)
                axarr[anum].set_title(str(allsamples) + " approx = " + approx)
            for ax in axarr.flat:
                ax.set_xticks(X111 + (len(allsamples) - 1) * width / 2)
                xlab = []
                for f in fff:
                    print(f)
                    # xlab.append("\\ref{fn:%s}"%(f))
                    xlab.append("%s" % (f))
                ax.set_xticklabels(xlab, fontsize=14)
                ax.set_ylabel('$log_{10}(\\Delta_r)$', fontsize=17)
                ax.label_outer()

            plt.gca().yaxis.set_major_formatter(
                mtick.FuncFormatter(lambda x, _: x - baseline))
            plt.tight_layout()
            print(xlab)
            # plt.show()
            # plt.savefig("plots/Perrorbars.pgf", bbox_inches="tight")
            # outfile111 = "results/plots/Perrorbars_for_%s.pdf"%(noise)
            outfile111 = "results/plots/Perrorbars_for_%s.pgf" % (noise)
            plt.savefig(outfile111, bbox_inches="tight")
            plt.clf()
            plt.close('all')
Beispiel #12
0
def runfacevsinner():
    def getData(X_train, fn, noisepct, seed):
        """
        TODO use eval or something to make this less noisy
        """
        from apprentice import testData
        if fn == "f18":
            Y_train = [testData.f18(x) for x in X_train]
        elif fn == "f20":
            Y_train = [testData.f20(x) for x in X_train]
        else:
            raise Exception("function {} not implemented, exiting".format(fn))
        np.random.seed(seed)

        stdnormalnoise = np.zeros(shape=(len(Y_train)), dtype=np.float64)
        for i in range(len(Y_train)):
            stdnormalnoise[i] = np.random.normal(0, 1)
        # return Y_train
        return np.atleast_2d(
            np.array(Y_train) * (1 + noisepct * stdnormalnoise))

    def getbox(f):
        minbox = []
        maxbox = []
        if (f == "f18"):
            minbox = [-0.95, -0.95, -0.95, -0.95]
            maxbox = [0.95, 0.95, 0.95, 0.95]
        elif (f == "f20"):
            minbox = [10**-6, 10**-6, 10**-6, 10**-6]
            maxbox = [4 * np.pi, 4 * np.pi, 4 * np.pi, 4 * np.pi]
        else:
            minbox = [-1, -1]
            maxbox = [1, 1]
        return minbox, maxbox

    from apprentice import tools
    data = {'info': []}
    import apprentice
    dim = 4
    seedarr = [54321, 456789, 9876512, 7919820, 10397531]

    m = 5
    n = 5
    tstimes = 2
    ts = "2x"
    # fname = "f20-"+str(dim)+"D_ts"+ts
    fname = "f20-" + str(dim) + "D"
    from apprentice import tools
    from pyDOE import lhs
    npoints = tstimes * tools.numCoeffsRapp(dim, [m, n])
    print(npoints)
    epsarr = []
    for d in range(dim):
        #epsarr.append((maxarr[d] - minarr[d])/10)
        epsarr.append(10**-6)

    facespctarr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    folder = "f18_f20_facevsinner"
    for numex, ex in enumerate(["exp1", "exp2", "exp3", "exp4", "exp5"]):
        seed = seedarr[numex]
        for facenum, facepct in enumerate(facespctarr):
            facepoints = int(np.ceil(npoints * facepct))
            insidepoints = int(npoints - facepoints)
            # print(insidepoints)
            # Generate inside points
            for fname in ['f18', 'f20']:
                Xmain = np.empty([0, dim])
                minarrinside = []
                maxarrinside = []
                minarr, maxarr = getbox(fname)
                if (insidepoints > 1):
                    for d in range(dim):
                        minarrinside.append(minarr[d] + epsarr[d])
                        maxarrinside.append(maxarr[d] - epsarr[d])
                    X = lhs(dim, samples=insidepoints, criterion='maximin')
                    s = apprentice.Scaler(np.array(X, dtype=np.float64),
                                          a=minarrinside,
                                          b=maxarrinside)
                    X = s.scaledPoints
                    Xmain = np.vstack((Xmain, X))

                #Generate face points
                perfacepoints = int(np.ceil(facepoints / (2 * dim)))
                if (perfacepoints > 1):
                    index = 0
                    for d in range(dim):
                        for e in [minarr[d], maxarr[d]]:
                            index += 1
                            np.random.seed(seed + index * 100)
                            X = lhs(dim,
                                    samples=perfacepoints,
                                    criterion='maximin')
                            minarrface = np.empty(shape=dim, dtype=np.float64)
                            maxarrface = np.empty(shape=dim, dtype=np.float64)
                            for p in range(dim):
                                if (p == d):
                                    if e == maxarr[d]:
                                        minarrface[p] = e - epsarr[d]
                                        maxarrface[p] = e
                                    else:
                                        minarrface[p] = e
                                        maxarrface[p] = e + epsarr[d]
                                else:
                                    minarrface[p] = minarr[p]
                                    maxarrface[p] = maxarr[p]
                            s = apprentice.Scaler(np.array(X,
                                                           dtype=np.float64),
                                                  a=minarrface,
                                                  b=maxarrface)
                            X = s.scaledPoints
                            Xmain = np.vstack((Xmain, X))
                Xmain = np.unique(Xmain, axis=0)
                X = Xmain
                # formatStr = "{0:0%db}"%(dim)
                # for d in range(2**dim):
                #     binArr = [int(x) for x in formatStr.format(d)[0:]]
                #     val = []
                #     for i in range(dim):
                #         if(binArr[i] == 0):
                #             val.append(minarr[i])
                #         else:
                #             val.append(maxarr[i])
                #     X[d] = val
                if not os.path.exists(folder + "/" + ex + '/benchmarkdata'):
                    os.makedirs(folder + "/" + ex + '/benchmarkdata',
                                exist_ok=True)
                noise = "0"
                noisestr, noisepct = getnoiseinfo(noise)

                Y = getData(X, fn=fname, noisepct=noisepct, seed=seed)
                infile = "%s/%s/benchmarkdata/%s%s_splitlhs_f%d_i%d.txt" % (
                    folder, ex, fname, noisestr, facepoints, insidepoints)
                print(infile)
                np.savetxt(infile, np.hstack((X, Y.T)), delimiter=",")

                folderplus = "%s/%s/%s%s_splitlhs" % (folder, ex, fname,
                                                      noisestr)
                fndesc = "%s%s_splitlhs_f%d_i%d" % (fname, noisestr,
                                                    facepoints, insidepoints)
                if not os.path.exists(folderplus + "/outrasip"):
                    os.makedirs(folderplus + "/outrasip", exist_ok=True)
                if not os.path.exists(folderplus + "/log/consolelograsip"):
                    os.makedirs(folderplus + "/log/consolelograsip",
                                exist_ok=True)
                m = str(m)
                n = str(n)
                consolelog = folderplus + "/log/consolelograsip/" + fndesc + "_p" + m + "_q" + n + "_ts2x.log"
                outfile = folderplus + "/outrasip/" + fndesc + "_p" + m + "_q" + n + "_ts2x.json"
                data['info'].append({
                    'exp': ex,
                    'fname': fname,
                    'outfile': outfile,
                    'facepoints': facepoints,
                    'insidepoints': insidepoints
                })
                penaltyparam = 0
                cmd = 'nohup python runrappsip.py %s %s %s %s Cp %f %s %s >%s 2>&1 &' % (
                    infile, fndesc, m, n, penaltyparam, folderplus, outfile,
                    consolelog)
                # print(cmd)
                # exit(1)
                os.system(cmd)

    import json
    with open(folder + "/data.json", "w") as f:
        json.dump(data, f, indent=4, sort_keys=True)
Beispiel #13
0
def cubeplot():
    from mpl_toolkits.mplot3d import Axes3D
    import matplotlib.pyplot as plt
    import numpy as np
    from itertools import product, combinations

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.set_aspect("equal")

    vertices = np.empty(shape=(0, 3))
    # draw cube
    r = [-1, 1]
    coord = combinations(np.array(list(product(r, r, r))), 2)
    for s, e in coord:
        if np.sum(np.abs(s - e)) == r[1] - r[0]:
            ax.plot3D(*zip(s, e), color="b")
        # print(s)
        vertices = np.append(vertices, [s], axis=0)
        vertices = np.append(vertices, [e], axis=0)
    vertices = np.unique(vertices, axis=0)
    print(np.shape(vertices))

    print(vertices)

    npoints = 50
    eps = 0.2
    dim = 3
    from pyDOE import lhs
    import apprentice

    # for v in vertices:
    #     minarr = []
    #     maxarr = []
    #     for p in v:
    #         if(p==1):
    #             maxarr.append(p)
    #             minarr.append(p-eps)
    #         elif(p==-1):
    #             maxarr.append(p+eps)
    #             minarr.append(p)
    #     for d in range(dim):
    #         if minarr[d] == -1+eps:
    #             maxarr[d] == 1
    #         elif maxarr[d] == 1-eps:
    #             minarr[d] == -1
    #         print(v,minarr,maxarr)
    #         s = apprentice.Scaler(np.array(X, dtype=np.float64), a=minarr, b=maxarr)
    #         X = s.scaledPoints
    #         # ax.scatter3D(X[:,0],X[:,1],X[:,2])
    ax.set_xlabel("x")
    ax.set_ylabel("y")
    ax.set_zlabel("z")

    # plt.show()
    # exit(1)

    Xmain = np.empty([0, 3])
    index = -1
    for d in range(dim):
        for x in [-1, 1]:
            index += 1
            seedarr = [221, 323, 545, 435, 944, 664]
            np.random.seed(seedarr[index])
            X = lhs(dim, samples=npoints, criterion='maximin')
            minarr = [0, 0, 0]
            maxarr = [0, 0, 0]
            for p in range(dim):
                if (p == d):
                    if x == 1:
                        minarr[p] = x + ((-1 * x) / np.abs(x)) * eps
                        maxarr[p] = x
                    else:
                        minarr[p] = x
                        maxarr[p] = x + ((-1 * x) / np.abs(x)) * eps
                else:
                    minarr[p] = -1
                    maxarr[p] = 1

            print(minarr, maxarr)
            s = apprentice.Scaler(np.array(X, dtype=np.float64),
                                  a=minarr,
                                  b=maxarr)
            X = s.scaledPoints
            # X = np.append(X,[[0,0,0]],axis = 0)
            Xmain = np.vstack((Xmain, X))
            ax.scatter3D(X[:, 0], X[:, 1], X[:, 2])
            # if(index == 4): break
        # print(Xmain)
    print(np.shape(Xmain))
    Xmain = np.unique(Xmain, axis=0)
    print(np.shape(Xmain))

    # minarr = [-1,   1-eps,  -1]
    # maxarr = [1,    1,      1]
    # print(minarr,maxarr)
    # s = apprentice.Scaler(np.array(X, dtype=np.float64), a=minarr, b=maxarr)
    # X = s.scaledPoints
    # ax.scatter3D(X[:,0],X[:,1],X[:,2])

    # minarr = [-1,   -1,  -1]
    # maxarr = [1,    1,      -1+eps]
    # s = apprentice.Scaler(np.array(X, dtype=np.float64), a=minarr, b=maxarr)
    # X = s.scaledPoints
    # ax.scatter3D(X[:,0],X[:,1],X[:,2])

    plt.show()
Beispiel #14
0
def generatebenchmarkdata(m, n):
    seedarr = [54321, 456789, 9876512, 7919820, 10397531]
    folder = "results"
    samplearr = ["mc", "lhs", "so", "sg", "splitlhs"]
    # samplearr = ["lhs","sg","splitlhs"]
    # samplearr = ["lhs","splitlhs"]
    # samplearr = ["splitlhs"]
    from apprentice import tools
    from pyDOE import lhs
    import apprentice
    ts = 2
    farr = getfarr()
    for fname in farr:
        dim = getdim(fname)
        minarr, maxarr = getbox(fname)
        npoints = ts * tools.numCoeffsRapp(dim, [int(m), int(n)])
        print(npoints)
        for sample in samplearr:
            for numex, ex in enumerate(
                ["exp1", "exp2", "exp3", "exp4", "exp5"]):
                seed = seedarr[numex]
                np.random.seed(seed)
                if (sample == "mc"):
                    Xperdim = ()
                    for d in range(dim):
                        Xperdim = Xperdim + (np.random.rand(npoints, ) *
                                             (maxarr[d] - minarr[d]) +
                                             minarr[d], )
                    X = np.column_stack(Xperdim)
                    formatStr = "{0:0%db}" % (dim)
                    for d in range(2**dim):
                        binArr = [int(x) for x in formatStr.format(d)[0:]]
                        val = []
                        for i in range(dim):
                            if (binArr[i] == 0):
                                val.append(minarr[i])
                            else:
                                val.append(maxarr[i])
                        X[d] = val
                elif (sample == "sg"):
                    from dolo.numeric.interpolation.smolyak import SmolyakGrid
                    s = 0
                    l = 1
                    while (s < npoints):
                        sg = SmolyakGrid(a=minarr, b=maxarr, l=l)
                        s = sg.grid.shape[0]
                        l += 1
                    X = sg.grid
                elif (sample == "so"):
                    X = my_i4_sobol_generate(dim, npoints, seed)
                    s = apprentice.Scaler(np.array(X, dtype=np.float64),
                                          a=minarr,
                                          b=maxarr)
                    X = s.scaledPoints
                elif (sample == "lhs"):
                    X = lhs(dim, samples=npoints, criterion='maximin')
                    s = apprentice.Scaler(np.array(X, dtype=np.float64),
                                          a=minarr,
                                          b=maxarr)
                    X = s.scaledPoints
                elif (sample == "splitlhs"):
                    epsarr = []
                    for d in range(dim):
                        #epsarr.append((maxarr[d] - minarr[d])/10)
                        epsarr.append(10**-6)

                    facepoints = int(
                        2 *
                        tools.numCoeffsRapp(dim - 1, [int(m), int(n)]))
                    insidepoints = int(npoints - facepoints)
                    Xmain = np.empty([0, dim])
                    # Generate inside points
                    minarrinside = []
                    maxarrinside = []
                    for d in range(dim):
                        minarrinside.append(minarr[d] + epsarr[d])
                        maxarrinside.append(maxarr[d] - epsarr[d])
                    X = lhs(dim, samples=insidepoints, criterion='maximin')
                    s = apprentice.Scaler(np.array(X, dtype=np.float64),
                                          a=minarrinside,
                                          b=maxarrinside)
                    X = s.scaledPoints
                    Xmain = np.vstack((Xmain, X))

                    #Generate face points
                    perfacepoints = int(np.ceil(facepoints / (2 * dim)))
                    index = 0
                    for d in range(dim):
                        for e in [minarr[d], maxarr[d]]:
                            index += 1
                            np.random.seed(seed + index * 100)
                            X = lhs(dim,
                                    samples=perfacepoints,
                                    criterion='maximin')
                            minarrface = np.empty(shape=dim, dtype=np.float64)
                            maxarrface = np.empty(shape=dim, dtype=np.float64)
                            for p in range(dim):
                                if (p == d):
                                    if e == maxarr[d]:
                                        minarrface[p] = e - epsarr[d]
                                        maxarrface[p] = e
                                    else:
                                        minarrface[p] = e
                                        maxarrface[p] = e + epsarr[d]
                                else:
                                    minarrface[p] = minarr[p]
                                    maxarrface[p] = maxarr[p]
                            s = apprentice.Scaler(np.array(X,
                                                           dtype=np.float64),
                                                  a=minarrface,
                                                  b=maxarrface)
                            X = s.scaledPoints
                            Xmain = np.vstack((Xmain, X))
                    Xmain = np.unique(Xmain, axis=0)
                    X = Xmain
                    formatStr = "{0:0%db}" % (dim)
                    for d in range(2**dim):
                        binArr = [int(x) for x in formatStr.format(d)[0:]]
                        val = []
                        for i in range(dim):
                            if (binArr[i] == 0):
                                val.append(minarr[i])
                            else:
                                val.append(maxarr[i])
                        X[d] = val

                if not os.path.exists(folder + "/" + ex + '/benchmarkdata'):
                    os.makedirs(folder + "/" + ex + '/benchmarkdata',
                                exist_ok=True)
                for noise in ["0", "10-2", "10-4", "10-6"]:
                    noisestr, noisepct = getnoiseinfo(noise)

                    Y = getData(X, fn=fname, noisepct=noisepct, seed=seed)

                    outfile = "%s/%s/benchmarkdata/%s%s_%s.txt" % (
                        folder, ex, fname, noisestr, sample)
                    print(outfile)
                    np.savetxt(outfile, np.hstack((X, Y.T)), delimiter=",")
                if (sample == "sg"):
                    break
Beispiel #15
0
def generatespecialdata():
    from apprentice import tools
    from pyDOE import lhs
    m = 5
    n = 5
    dim = 2
    farr = ["f8", "f9", "f12"]
    noisearr = ["0", "10-12", "10-10", "10-8", "10-6", "10-4"]
    ts = 2
    npoints = ts * tools.numCoeffsRapp(dim, [int(m), int(n)])
    # sample = "sg"
    # from dolo.numeric.interpolation.smolyak import SmolyakGrid
    # s = 0
    # l = 2
    # while(s < npoints):
    #     sg = SmolyakGrid(a=[-1,-1],b=[1,1], l=l)
    #     s = sg.grid.shape[0]
    #     l+=1
    # X = sg.grid
    # lennn = sg.grid.shape[0]

    # import apprentice
    # sample = "lhs"
    # X = lhs(dim, samples=npoints, criterion='maximin')
    # s = apprentice.Scaler(np.array(X, dtype=np.float64), a=[-1,-1], b=[1,1])
    # X = s.scaledPoints
    # lennn = npoints

    import apprentice
    seed = 54321
    sample = "so"
    X = my_i4_sobol_generate(dim, npoints, seed)
    s = apprentice.Scaler(np.array(X, dtype=np.float64), a=[-1, -1], b=[1, 1])
    X = s.scaledPoints
    lennn = npoints

    stdnormalnoise = np.zeros(shape=(lennn), dtype=np.float64)
    for i in range(lennn):
        stdnormalnoise[i] = np.random.normal(0, 1)
    for fname in farr:
        minarr, maxarr = getbox(fname)

        for noise in noisearr:
            noisestr = ""
            noisepct = 0
            if (noise != "0"):
                noisestr = "_noisepct" + noise
            if (noise == "10-4"):
                noisepct = 10**-4
            elif (noise == "10-6"):
                noisepct = 10**-6
            elif (noise == "10-8"):
                noisepct = 10**-8
            elif (noise == "10-10"):
                noisepct = 10**-10
            elif (noise == "10-12"):
                noisepct = 10**-12
            print(noisepct)
            Y = getData(X, fn=fname, noisepct=0, seed=seed)
            Y_train = np.atleast_2d(
                np.array(Y) * (1 + noisepct * stdnormalnoise))
            outfolder = "/Users/mkrishnamoorthy/Desktop/Data"
            outfile = "%s/%s%s_%s.txt" % (outfolder, fname, noisestr, sample)
            print(outfile)
            np.savetxt(outfile, np.hstack((X, Y_train.T)), delimiter=",")
Beispiel #16
0
def getresults(farr, noisearr, tarr, ts, allsamples, usecornerpoints):
    import apprentice
    m = 5
    n = 5
    thresholdvalarr = np.array([float(t) for t in tarr])
    thresholdvalarr = np.sort(thresholdvalarr)

    results = {}
    for fnum, fname in enumerate(farr):
        results[fname] = {}
        dim = getdim(fname)
        if (usecornerpoints == 1):
            infile = "results/plots/poledata_corner" + str(dim) + "D.csv"
        else:
            infile = "results/plots/poledata_inside" + str(dim) + "D.csv"

        X_test = np.loadtxt(infile, delimiter=',')

        print(len(X_test))
        minarr, maxarr = getbox(fname)
        s = apprentice.Scaler(np.array(X_test, dtype=np.float64),
                              a=minarr,
                              b=maxarr)
        X_test = s.scaledPoints

        Y_test = np.array(getData(X_test, fname, 0))
        maxY_test = max(1, abs(np.max(Y_test)))
        print(fname, maxY_test)

        results[fname]['npoints'] = len(Y_test)

        for snum, sample in enumerate(allsamples):
            results[fname][sample] = {}
            for noise in noisearr:
                results[fname][sample][noise] = {}
                noisestr, noisepct = getnoiseinfo(noise)

                resdata = {}
                resdata['rapp'] = {}
                resdata['rapprd'] = {}
                resdata['rappsip'] = {}

                resdata['rapp']['l2all'] = []
                resdata['rapprd']['l2all'] = []
                resdata['rappsip']['l2all'] = []

                for tval in thresholdvalarr:
                    for method in ['rapp', 'rapprd', 'rappsip']:
                        resdata[method][str(tval)] = {}
                        resdata[method][str(tval)]['no'] = []
                        resdata[method][str(tval)]['l2count'] = []
                        resdata[method][str(tval)]['l2notcount'] = []

                for rnum, run in enumerate(
                    ["exp1", "exp2", "exp3", "exp4", "exp5"]):
                    fndesc = "%s%s_%s_%s" % (fname, noisestr, sample, ts)
                    folder = "results/%s/%s" % (run, fndesc)
                    # print(folder)
                    pq = "p%d_q%d" % (m, n)
                    # print(run, fname,noisestr,sample,m,n)

                    rappsipfile = "%s/outrasip/%s_%s_ts%s.json" % (
                        folder, fndesc, pq, ts)
                    rappfile = "%s/outra/%s_%s_ts%s.json" % (folder, fndesc,
                                                             pq, ts)
                    rapprdfile = "%s/outrard/%s_%s_ts%s.json" % (
                        folder, fndesc, pq, ts)

                    if not os.path.exists(rappsipfile):
                        print("rappsipfile %s not found" % (rappsipfile))
                        if (knowmissing(rappsipfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)

                    if not os.path.exists(rappfile):
                        print("rappfile %s not found" % (rappfile))
                        if (knowmissing(rappfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)

                    if not os.path.exists(rapprdfile):
                        print("rappfile %s not found" % (rapprdfile))
                        if (knowmissing(rapprdfile) == 1):
                            if (sample == "sg"):
                                break
                            continue
                        exit(1)
                    print(fndesc + " Start")

                    rappsip = RationalApproximationSIP(rappsipfile)
                    try:
                        Y_pred_rappsip = rappsip.predictOverArray(X_test)
                    except:
                        Y_pred_rappsip = findpredval(X_test, rappsip)

                    rapp = RationalApproximationONB(fname=rappfile)
                    try:
                        Y_pred_rapp = np.array([rapp(x) for x in X_test])
                    except:
                        Y_pred_rapp = findpredval(X_test, rapp)

                    rapprd = RationalApproximationONB(fname=rapprdfile)
                    try:
                        Y_pred_rapprd = np.array([rapprd(x) for x in X_test])
                    except:
                        Y_pred_rapprd = findpredval(X_test, rapprd)

                    print(fndesc + " Done")
                    sys.stdout.flush()

                    l2allrapp = np.sum((Y_pred_rapp - Y_test)**2)
                    l2allrapprd = np.sum((Y_pred_rapprd - Y_test)**2)
                    l2allrappsip = np.sum((Y_pred_rappsip - Y_test)**2)
                    # print(l2allrapp,l2allrapprd,l2allrappsip)

                    resdata['rapp']['l2all'].append(np.sqrt(l2allrapp))
                    resdata['rapprd']['l2all'].append(np.sqrt(l2allrapprd))
                    resdata['rappsip']['l2all'].append(np.sqrt(l2allrappsip))

                    for tnum, tval in enumerate(thresholdvalarr):
                        for method in ['rapp', 'rapprd', 'rappsip']:
                            resdata[method][str(tval)]['no'].append(0)
                            resdata[method][str(tval)]['l2count'].append(0.)
                            resdata[method][str(tval)]['l2notcount'].append(0.)

                    for num, yt in enumerate(Y_test):
                        for method, pred in zip(
                            ['rapp', 'rapprd', 'rappsip'],
                            [Y_pred_rapp, Y_pred_rapprd, Y_pred_rappsip]):
                            yp = pred[num]
                            for tnum, tval in enumerate(thresholdvalarr):
                                if (abs(yp) / maxY_test > tval):
                                    resdata[method][str(tval)]['no'][rnum] += 1
                                    resdata[method][str(
                                        tval)]['l2count'][rnum] += (yp - yt)**2

                    for tnum, tval in enumerate(thresholdvalarr):
                        for method, l2all in zip(
                            ['rapp', 'rapprd', 'rappsip'],
                            [l2allrapp, l2allrapprd, l2allrappsip]):
                            l2count = resdata[method][str(
                                tval)]['l2count'][rnum]
                            resdata[method][str(
                                tval)]['l2notcount'][rnum] = np.sqrt(l2all -
                                                                     l2count)
                            resdata[method][str(
                                tval)]['l2count'][rnum] = np.sqrt(l2count)

                    if (sample == "sg"):
                        break
                missingmean = -1
                for method in ['rapp', 'rapprd', 'rappsip']:
                    l2allarr = resdata[method]['l2all']
                    results[fname][sample][noise][method] = {}
                    if (len(l2allarr) != 0):
                        results[fname][sample][noise][method][
                            'l2all'] = np.average(l2allarr)
                        results[fname][sample][noise][method][
                            'l2allsd'] = np.std(l2allarr)
                    else:
                        results[fname][sample][noise][method][
                            'l2all'] = missingmean
                        results[fname][sample][noise][method]['l2allsd'] = 0

                for tval in thresholdvalarr:
                    for method in ['rapp', 'rapprd', 'rappsip']:
                        results[fname][sample][noise][method][str(tval)] = {}
                        for key in ['l2notcount', 'l2count', 'no']:

                            arr = resdata[method][str(tval)][key]
                            if (len(arr) != 0):
                                results[fname][sample][noise][method][str(
                                    tval)][key] = np.average(arr)
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'sd'] = np.std(arr)
                            else:
                                results[fname][sample][noise][method][str(
                                    tval)][key] = missingmean
                                results[fname][sample][noise][method][str(
                                    tval)][key + 'sd'] = 0

        print("done with fn: %s for usecornerpoints = %d" %
              (fname, usecornerpoints))

    return results
Beispiel #17
0
    import sys

    assert (sys.argv[1] != sys.argv[2])
    binids, P = app.io.readApprox(sys.argv[1])

    SC = [p._scaler for p in P]

    DIM = SC[0].dim

    xmin, xmax = [], []
    for d in range(DIM):
        xmin.append(max([s.box[d][0] for s in SC]))
        xmax.append(min([s.box[d][1] for s in SC]))

    # New scaler
    sc = app.Scaler([xmin, xmax])
    sc._pnames = SC[0].pnames

    NC = app.tools.numCoeffsPoly(P[0].dim, P[0].m)
    X = P[0]._scaler.drawSamples(NC)
    A = np.prod(np.power(sc.scale(X), P[0]._struct_p[:, np.newaxis]), axis=2).T

    Z = []
    import time
    t0 = time.time()
    for num, p in enumerate(P):
        # cnew = app.tools.refitPoly(p, sc)
        cnew = app.tools.refitPolyAX(p, A, X)
        Z.append(cnew)
        if (num + 1) % 50 == 0:
            print("{}/{} after {} seconds".format(num + 1, len(P),
Beispiel #18
0
#!/usr/bin/env python

import json
import apprentice

import numpy as np

if __name__ == "__main__":

    import sys
    M=json.load(open(sys.argv[1]))
    X = np.array(M["x"])
    Y = np.array(M["fun"])

    S=apprentice.Scaler(sys.argv[2])

    # Trivial new box based on min/max of results
    M_min = np.min(X, axis=0)
    M_max = np.max(X, axis=0)

    # Extra width
    allowance=0.25
    #
    new_box = []
    # Figure out if we hit the wall
    for num, (r, l) in enumerate(zip(np.isclose(S.sbox[:,0], S(M_min, unscale=True), rtol=1e-2), np.isclose(S.sbox[:,1], S(M_max, unscale=True), rtol=1e-2) )):
        # print(num, r,l)
        dist = M_max[num] - M_min[num]
        if not r and not l: new_box.append( [ M_min[num] - allowance*dist, M_max[num] + allowance*dist  ])
        elif r and not l:  new_box.append( [ M_min[num] - dist, M_max[num] + allowance*dist  ])
        elif l and not r:  new_box.append( [ M_min[num] - allowance*dist, M_max[num] + dist  ])
Beispiel #19
0
def mkCov(yerrs):
    return np.atleast_2d(yerrs).T * np.atleast_2d(yerrs) * np.eye(
        yerrs.shape[0])


if __name__ == "__main__":
    import sys

    # At the moment, the object selection for the chi2 minimisation is
    # simply everything that is in the approximation file
    binids, RA = readApprox(sys.argv[1])
    Y, E = readExpData(sys.argv[2], [str(b) for b in binids])
    E2 = [e**2 for e in E]

    S = apprentice.Scaler("{}.scaler".format(sys.argv[1]))

    # NOTE: all evaluations are happening in the scaled world


    def chi2(x):
        return sum([(Y[i] - RA[i](x))**2 / E2[i] for i in range(len(binids))])

    from scipy import optimize
    res = optimize.minimize(chi2, S.center, bounds=S.sbox)
    print("Minimum found at {}".format(S(res["x"], unscale=True)))

    # Now do some more universes
    NSAMPLES = 1000
    COV = mkCov(E)
    import scipy.stats as st