Ejemplo n.º 1
0
def setup_problem():

    # initialize the problem
    problem = opt.Problem()

    # setup variables, list style
    problem.variables = [
        #   [ 'tag' , x0, (lb ,ub) , scl ],
        ['x1', 0., (-2., 2.), 1.0],
        ['x2', 0., (-2., 2.), 1.0],
    ]

    # setup variables, arrays
    var = opt.Variable()
    var.tag = 'x3'
    var.initial = np.array([10., 10., 10.])
    ub = np.array([30.] * 3)
    lb = np.array([-1.] * 3)
    var.bounds = (lb, ub)
    var.scale = 1.0
    problem.variables.append(var)

    # initialize evaluator
    test_eval = The_Evaluator()

    # setup objective
    problem.objectives = [
        #   [ func   , 'tag', scl ],
        [test_eval, 'f', 1.0],
    ]

    # setup constraint, list style
    problem.constraints = [
        #   [ func , ('tag' ,'><=', val), scl] ,
        [test_eval, ('c', '=', 1.), 1.0],
    ]

    # setup constraint, array style
    con = opt.Constraint()
    con.evaluator = test_eval
    con.tag = 'c2'
    con.sense = '>'
    con.edge = np.array([3., 3., 3.])
    problem.constraints.append(con)

    # print
    print problem

    # done!
    return problem
Ejemplo n.º 2
0
def setup_problem():

    # initialize the problem
    problem = opt.Problem()

    # variables, list style
    problem.variables = [
        #   [ 'tag' , x0, (lb ,ub) , scl ],
        ['x1', 0., (-2., 2.), 1.0],
        ['x2', 0., (-2., 2.), 1.0],
    ]

    # variables, array style
    var = opt.Variable()
    var.tag = 'x3'
    var.initial = np.array([10., 10., 10.])
    ub = np.array([30.] * 3)
    lb = np.array([-1.] * 3)
    var.bounds = (lb, ub)
    var.scale = opt.scaling.Linear(scale=4.0, center=10.0)
    problem.variables.append(var)

    # objectives
    problem.objectives = [
        #   [ func   , 'tag', scl ],
        [test_func, 'f', 1.0],
    ]

    # constraints, list style
    problem.constraints = [
        #   [ func , ('tag' ,'><=', val), scl] ,
        [test_func, ('c', '=', 1.), 1.0],
    ]

    # constraints, array style
    con = opt.Equality()
    con.evaluator = test_func
    con.tag = 'c2'
    con.sense = '='
    con.edge = np.array([3., 3., 3.])
    problem.constraints.append(con)

    # print
    print problem

    # expected answer
    truth = obunch()
    truth.variables = obunch()
    truth.objectives = obunch()
    truth.equalities = obunch()

    truth.variables.x1 = -1.5
    truth.variables.x2 = -1.5
    truth.variables.x3 = np.array([4., 4., 4.])

    truth.objectives.f = 148.5

    truth.equalities.c = 1.0
    truth.equalities.c2 = np.array([3., 3., 3.])

    problem.truth = truth

    # done!
    return problem
Ejemplo n.º 3
0
def main():

    # ---------------------------------------------------------
    #  Setup
    # ---------------------------------------------------------
    # Choose the function and bounds

    # the target function, defined at the end of this file
    The_Func = composite_function
    The_Con = composite_constraint

    # hypercube bounds
    ND = 4  # dimensions
    XB = np.array([[-2., 2.]] * ND)

    # ---------------------------------------------------------
    #  Training Data
    # ---------------------------------------------------------
    # Select training data randomly with Latin Hypercube Sampling

    # number of samples
    ns = 250
    ## ns = 10  # try for lower model accuracy

    # perform sampling with latin hypercube
    XS = VyPy.sampling.lhc_uniform(XB, ns)

    # evaluate function and gradients
    FS, DFS = The_Func(XS)
    CS, DCS = The_Con(XS)

    # ---------------------------------------------------------
    #  Machine Learning
    # ---------------------------------------------------------

    # number of active domain dimensions
    N_AS = 1

    #Model = gpr.library.Gaussian(XB,XS,FS,DFS)
    Model = active_subspace.build_surrogate(XS,
                                            FS,
                                            DFS,
                                            XB,
                                            N_AS,
                                            probNze=-2.0)

    # pull function handles for plotting and evaluating
    g_x = Model.g_x  # the surrogate
    #g_x = Model.predict_YI
    f_x = lambda (Z): The_Func(Z)[0]  # the truth function

    # ---------------------------------------------------------
    #  Evaluate a Testing Set
    # ---------------------------------------------------------
    # Run a test sample on the functions

    nt = 200  # number of test samples
    XT = VyPy.sampling.lhc_uniform(XB, nt)

    # functions at training data locations
    FSI = g_x(XS)
    FST = f_x(XS)

    # functions at grid testing locations
    FTI = g_x(XT)
    FTT = f_x(XT)

    # ---------------------------------------------------------
    #  Model Errors
    # ---------------------------------------------------------
    # estimate the rms training and testing errors

    print 'Estimate Modeling Errors ...'

    # the scaling object
    Scaling = Model.M_Y.Scaling  # careful, this is in the active subspace
    #Scaling = Model.Scaling

    # scale data - training samples
    FSI_scl = Scaling.Y.set_scaling(FSI)
    FST_scl = Scaling.Y.set_scaling(FST)

    # scale data - grid testing samples
    FTI_scl = FTI / Scaling.Y  # alternate syntax
    FTT_scl = FTT / Scaling.Y

    # rms errors
    ES_rms = np.sqrt(np.mean((FSI_scl - FST_scl)**2))
    EI_rms = np.sqrt(np.mean((FTI_scl - FTT_scl)**2))

    print '  Training Error = %.3f%%' % (ES_rms * 100.)
    print '  Testing Error  = %.3f%%' % (EI_rms * 100.)

    # ---------------------------------------------------------
    #  Optimization
    # ---------------------------------------------------------

    problem = opt.Problem()

    var = opt.Variable()
    var.tag = 'x'
    var.initial = np.array([[0.0] * ND])
    var.bounds = XB.T
    problem.variables.append(var)

    obj = opt.Objective()
    obj.evaluator = lambda X: {
        'f': f_x(X['x']) + np.linalg.norm(X['x'], axis=1) / 10.
    }
    #obj.evaluator = lambda X: {'f' : g_x(X['x'])+np.linalg.norm(X['x'],axis=1)/10.}
    obj.tag = 'f'
    problem.objectives.append(obj)

    driver = opt.drivers.CMA_ES(0)

    result = driver.run(problem)

    Xmin = result[1]['x']

    # ---------------------------------------------------------
    #  Plotting
    # ---------------------------------------------------------
    # plot the estimated and truth surface, evaluate rms error

    plt.figure(0)
    plt.plot(Model.d, 'bo-')
    plt.title('Eigenvalue Powers')

    print 'Plot Response Surface ...'

    # center point
    #X0 = [1.0] * ND
    X0 = Xmin

    ## rosenbrock local minimum for 4 <= dim <= 7
    #X0[0] = -1.

    # plot spider legs
    fig = plt.figure(1)
    ax = VyPy.plotting.spider_axis(fig, X0, XB)
    VyPy.plotting.spider_trace(ax, g_x, X0, XB, 100, 'b-', lw=2, label='Fit')
    VyPy.plotting.spider_trace(ax, f_x, X0, XB, 100, 'r-', lw=2, label='Truth')
    ax.legend()
    ax.set_zlabel('F')

    # in active domain
    U = Model.U
    Y0 = active_subspace.project.simple(X0, U)
    g_y = Model.g_y
    YB = Model.YB

    # plot spider legs
    fig = plt.figure(2)
    ax = VyPy.plotting.spider_axis(fig, Y0, YB)
    VyPy.plotting.spider_trace(ax, g_y, Y0, YB, 100, 'b-', lw=2, label='Fit')
    ax.legend()
    ax.set_zlabel('F')

    plt.draw()
    plt.show(block=True)

    # Done!
    return