Ejemplo n.º 1
0
    def cost(x, axis=None, samples=Ns):
        """upper bound on expected model error, for truth and Learn(data)

    Inputs:
        x: list of LearnedModel hyperparameters
        axis: int, the index of y on which to find bound (all, by default)
        samples: int, number of samples, for a non-deterministic OUQ model

    Returns:
        upper bound on expected value of model error
        """
        # CASE 1: F(x|a) = F'(x|a'). Tune A for optimal G.
        args = dict(hidden_layer_sizes=hidden_layers(*x),
                    max_iter=100,
                    n_iter_no_change=5,
                    solver='lbfgs',
                    learning_rate_init=0.001)  #FIXME: max_iter=1000
        from sklearn.neural_network import MLPRegressor
        from sklearn.preprocessing import StandardScaler
        from ml import Estimator, MLData, improve_score
        kwds = dict(estimator=MLPRegressor(**args), transform=StandardScaler())
        # iteratively improve estimator
        mlp = Estimator(**kwds)  #FIXME: traintest so train != test ?
        best = improve_score(mlp,
                             MLData(data.coords, data.coords, data.values,
                                    data.values),
                             tries=1,
                             verbose=True)  #FIXME: tries = 2
        mlkw = dict(estimator=best.estimator, transform=best.transform)

        #print('building estimator G(x) from truth data...')
        surrogate = LearnedModel('surrogate', nx=nx, ny=ny, data=truth, **mlkw)
        #print('building UQ model of model error...')
        error = ErrorModel('error', model=truth, surrogate=surrogate)

        rnd = Ns if error.rnd else None
        #print('building UQ objective of expected model error...')
        b = ExpectedValue(error,
                          bnd,
                          constraint=scons,
                          cvalid=is_cons,
                          samples=rnd)
        i = counter.count()
        #print('solving for upper bound on expected model error...')
        solved = b.upper_bound(axis=axis, id=i, **param)
        if type(solved) is not tuple:
            solved = (solved, )
        if axis is None:
            results = tuple(-s for s in solved)  #NOTE: -1 for LUB
        else:
            results = -solved[axis]  #NOTE: -1 for LUB
        return results
Ejemplo n.º 2
0
    model = NoisyModel('model', model=toy, nx=nx, ny=ny, **approx)
    # build a surrogate model by training on the data
    args = dict(hidden_layer_sizes=(100, 75, 50, 25),
                max_iter=1000,
                n_iter_no_change=5,
                solver='lbfgs',
                learning_rate_init=0.001)
    from sklearn.neural_network import MLPRegressor
    from sklearn.preprocessing import StandardScaler
    from ml import Estimator, MLData, improve_score
    kwds = dict(estimator=MLPRegressor(**args), transform=StandardScaler())
    # iteratively improve estimator
    mlp = Estimator(**kwds)  #FIXME: traintest so train != test ?
    best = improve_score(mlp,
                         MLData(data.coords, data.coords, data.values,
                                data.values),
                         tries=10,
                         verbose=True)
    mlkw = dict(estimator=best.estimator, transform=best.transform)

    #print('building estimator G(x) from truth data...')
    surrogate = LearnedModel('surrogate', nx=nx, ny=ny, data=truth, **mlkw)
    #print('building UQ model of model error...')
    error = ErrorModel('error', model=model, surrogate=surrogate)

    rnd = Ns if error.rnd else None
    #print('building UQ objective of expected model error...')
    b = ExpectedValue(error,
                      bnd,
                      constraint=scons,
                      cvalid=is_cons,