示例#1
0
    def cost(x, axis=None, samples=Ns):
        """lower bound on expected model output

    Inputs:
        x: list of NoisyModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        lower bound on expected value of model output
        """
        # build a model, F(x|a), and tune "a" for optimal F.
        kwds = dict(mu=x[0], sigma=0.0, zmu=x[1], zsigma=0.0,
        #           uid=False, cached=True) # active updates enabled
                    uid=True, cached=False) # active updates disabled
        #print('building model F(x|a)...')
        model = NoisyModel('model', model=toy, nx=nx, ny=ny, **kwds)

        rnd = Ns if model.rnd else None
        #print('building UQ objective of expected model output...')
        b = ExpectedValue(model, bnd, constraint=scons, cvalid=is_cons, samples=rnd)
        i = counter.count()
        #print('solving for lower bound on expected model output...')
        solver = b.lower_bound(axis=axis, id=i, **param)
        if type(solver) is not tuple:
            solver = (solver,) #FIXME: save solver to DB (or pkl)
        if axis is None:
            results = tuple(-s.bestEnergy for s in solver) #NOTE: -1 for GLB
            #print('[id: %s] %s' % (i, tuple(s.bestSolution for s in solver)))
        else:
            results = -solver[axis].bestEnergy #NOTE: -1 for GLB
            #print('[id: %s] %s' % (i, solver[axis].bestSolution))
        return results
示例#2
0
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model error, for truth and Interp(data)

    Inputs:
        x: list of InterpModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model error
        """
        # CASE 1: F(x|a) = F'(x|a'). Tune A for optimal G.
        kwds = dict(smooth=x[0], noise=0.0, method='thin_plate', extrap=False)

        #print('building estimator G(x) from truth data...')
        surrogate = InterpModel('surrogate', nx=nx, ny=ny, data=truth, **kwds)
        #print('building UQ model of model error...')
        error = ErrorModel('error', model=truth, surrogate=surrogate)

        rnd = Ns if error.rnd else None
        #print('building UQ objective of expected model error...')
        b = ExpectedValue(error, bnd, constraint=scons, cvalid=is_cons, samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model error...')
        solver = b.upper_bound(axis=axis, id=i, **param)
        if type(solver) is not tuple:
            solver = (solver,) #FIXME: save solver to DB (or pkl)
        if axis is None:
            results = tuple(-s.bestEnergy for s in solver) #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, tuple(s.bestSolution for s in solver)))
        else:
            results = -solver[axis].bestEnergy #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, solver[axis].bestSolution))
        return results
示例#3
0
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model output

    Inputs:
        x: list of model hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model output
        """
        # build a model, F(x|a), and tune "a" for optimal F.
        toy_ = wrap(d=x[0], e=x[1])(toy)  #NOTE: reduces nx by 2
        #print('building model F(x|a)...')
        model = WrapModel('model', model=toy_, nx=nx, ny=ny, rnd=False)

        rnd = Ns if model.rnd else None
        #print('building UQ objective of expected model output...')
        b = ExpectedValue(model,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model output...')
        solver = b.upper_bound(axis=axis, id=i, **param)
        if type(solver) is not tuple:
            solver = (solver, )  #FIXME: save solver to DB (or pkl)
        if axis is None:
            results = tuple(-s.bestEnergy for s in solver)  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, tuple(s.bestSolution for s in solver)))
        else:
            results = -solver[axis].bestEnergy  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, solver[axis].bestSolution))
        return results
示例#4
0
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model error, for truth and Learn(data)

    Inputs:
        x: list of LearnedModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model error
        """
        # CASE 1: F(x|a) = F'(x|a'). Tune A for optimal G.
        args = dict(hidden_layer_sizes=hidden_layers(*x),
                    max_iter=100,
                    n_iter_no_change=5,
                    solver='lbfgs',
                    learning_rate_init=0.001)  #FIXME: max_iter=1000
        from sklearn.neural_network import MLPRegressor
        from sklearn.preprocessing import StandardScaler
        from ml import Estimator, MLData, improve_score
        kwds = dict(estimator=MLPRegressor(**args), transform=StandardScaler())
        # iteratively improve estimator
        mlp = Estimator(**kwds)  #FIXME: traintest so train != test ?
        best = improve_score(mlp,
                             MLData(data.coords, data.coords, data.values,
                                    data.values),
                             tries=1,
                             verbose=True)  #FIXME: tries = 2
        mlkw = dict(estimator=best.estimator, transform=best.transform)

        #print('building estimator G(x) from truth data...')
        surrogate = LearnedModel('surrogate', nx=nx, ny=ny, data=truth, **mlkw)
        #print('building UQ model of model error...')
        error = ErrorModel('error', model=truth, surrogate=surrogate)

        rnd = Ns if error.rnd else None
        #print('building UQ objective of expected model error...')
        b = ExpectedValue(error,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model error...')
        solved = b.upper_bound(axis=axis, id=i, **param)
        if type(solved) is not tuple:
            solved = (solved, )
        if axis is None:
            results = tuple(-s for s in solved)  #NOTE: -1 for LUB
        else:
            results = -solved[axis]  #NOTE: -1 for LUB
        return results
示例#5
0
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model output

    Inputs:
        x: list of NoisyModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model output
        """
        # build a model, F(x|a), and tune "a" for optimal F.
        kwds = dict(
            mu=x[0],
            sigma=0.0,
            zmu=x[1],
            zsigma=0.0,
            #           uid=False, cached=True) # active updates enabled
            uid=True,
            cached=False)  # active updates disabled
        #print('building model F(x|a)...')
        model = NoisyModel('model', model=toy, nx=nx, ny=ny, **kwds)

        rnd = Ns if model.rnd else None
        #print('building UQ objective of expected model output...')
        b = ExpectedValue(model,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model output...')
        solved = b.upper_bound(axis=axis, id=i, **param)
        if type(solved) is not tuple:
            solved = (solved, )
        if axis is None:
            results = solved  #NOTE: -1 for GLB
        else:
            results = solved[axis]  #NOTE: -1 for GLB
        return results
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model error, for surrogate and 'truth'

    Inputs:
        x: list of model hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model error
        """
        # CASE 0: |F(x|a) - F'(x|a')|, no G. Tune "a" for optimal F, a = x[-2:]
        toy_ = wrap(d=x[0], e=x[1])(toy)
        #print('building model F(x|a) of truth...')
        model = WrapModel('model', model=toy_, nx=nx, ny=ny, rnd=False)

        #print('building UQ model of model error...')
        error = ErrorModel('error', model=truth, surrogate=model)

        rnd = Ns if error.rnd else None
        #print('building UQ objective of expected model error...')
        b = ExpectedValue(error,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model error...')
        solver = b.upper_bound(axis=axis, id=i, **param)
        if type(solver) is not tuple:
            solver = (solver, )  #FIXME: save solver to DB (or pkl)
        if axis is None:
            results = tuple(-s.bestEnergy for s in solver)  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, tuple(s.bestSolution for s in solver)))
        else:
            results = -solver[axis].bestEnergy  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, solver[axis].bestSolution))
        return results
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model error, for surrogate and 'truth'

    Inputs:
        x: list of NoisyModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model error
        """
        # CASE 3: |F(x|a) - F'(x|a')|, no G. Tune "a" for optimal F.
        approx = dict(mu=x[0], sigma=0.0, zmu=x[1], zsigma=0.0)
        #print('building model F(x|a) of truth...')
        model = NoisyModel('model', model=toy, nx=nx, ny=ny, **approx)

        #print('building UQ model of model error...')
        error = ErrorModel('error', model=truth, surrogate=model)

        rnd = Ns if error.rnd else None
        #print('building UQ objective of expected model error...')
        b = ExpectedValue(error,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model error...')
        solved = b.upper_bound(axis=axis, id=i, **param)
        if type(solved) is not tuple:
            solved = (solved, )
        if axis is None:
            results = tuple(-s for s in solved)  #NOTE: -1 for LUB
        else:
            results = -solved[axis]  #NOTE: -1 for LUB
        return results
    # build a model that approximates 'truth'
    #print('building model F(x|a) of truth...')
    approx = dict(mu=-.05, sigma=0., zmu=.05, zsigma=0.)
    model = NoisyModel('model', model=toy, nx=nx, ny=ny, **approx)
    #print('building estimator G(x) from truth data...')
    kwds = dict(smooth=0.0, noise=0.0, method='thin_plate', extrap=False)
    surrogate = InterpModel('surrogate', nx=nx, ny=ny, data=truth, **kwds)
    #print('building UQ model of model error...')
    error = ErrorModel('error', model=model, surrogate=surrogate)

    rnd = Ns if error.rnd else None
    #print('building UQ objective of expected model error...')
    b = ExpectedValue(error,
                      bnd,
                      constraint=scons,
                      cvalid=is_cons,
                      samples=rnd)
    #print('solving for lower bound on expected model error...')
    b.lower_bound(axis=None, id=0, **param)
    print("lower bound per axis:")
    for axis, solver in b._lower.items():
        print("%s: %s @ %s" % (axis, solver.bestEnergy, solver.bestSolution))

    #print('solving for upper bound on expected model error...')
    param['opts']['termination'] = COG(1e-10, 200)  #NOTE: short stop?
    param['npop'] = 160  #NOTE: increase if poor convergence
    param['stepmon'] = VerboseLoggingMonitor(1,
                                             20,
                                             filename='log.txt',
                                             label='upper')
示例#9
0
  npts = [2, 1, 1] (i.e. two Dirac masses on x[0], one elsewhere)
  sum(wx[i]_j) for j in [0,npts], for each i
  E|model(x)| = 6.5 +/- 1.0

Solves for two scenarios of x that produce upper bound on E|model(x)|,
given the bounds, normalization, and moment constraints.
"""

if __name__ == '__main__':

    from spec3D import *
    from ouq import ExpectedValue
    from mystic.bounds import MeasureBounds
    from ouq_models import WrapModel
    from surrogate import marc_surr as toy
    nx = 3
    ny = None
    Ns = 25

    # build a model representing 'truth'
    nargs = dict(nx=nx, ny=ny, rnd=False)
    model = WrapModel('model', toy, **nargs)

    # calculate upper bound on expected value, where F(x) has uncertainty
    bnd = MeasureBounds(xlb, xub, n=npts, wlb=wlb, wub=wub)
    b = ExpectedValue(model, bnd, constraint=scons, cvalid=is_cons, samples=Ns)
    b.upper_bound(axis=None, **param)
    print("upper bound per axis:")
    for axis, solver in b._upper.items():
        print("%s: %s @ %s" % (axis, -solver.bestEnergy, solver.bestSolution))