Example #1
0
        def rosen_obj(params, shift):
            val = rosen(params["x_half_a"] -
                        shift) + rosen(params["x_half_b"] - shift)

            dval = OrderedDict([
                ("x_half_a", rosen_der(params["x_half_a"] - shift)),
                ("x_half_b", rosen_der(params["x_half_b"] - shift)),
            ])
            return val, dval
Example #2
0
    def compute_dr_wrt(self, wrt):
        if wrt is self.x:
            if visualize:
                import matplotlib.pyplot as plt
                residuals = np.sum(self.r**2)
                print('------> RESIDUALS %.2e' % (residuals,))
                print('------> CURRENT GUESS %s' % (str(self.x.r),))
                plt.figure(123)
                
                if not hasattr(self, 'vs'):
                    self.vs = []
                    self.xs = []
                    self.ys = []
                self.vs.append(residuals)
                self.xs.append(self.x.r[0])
                self.ys.append(self.x.r[1])
                plt.clf();
                plt.subplot(1,2,1)
                plt.plot(self.vs)
                plt.subplot(1,2,2)
                plt.plot(self.xs, self.ys)
                plt.draw()


            return row(rosen_der(self.x.r))
Example #3
0
    def compute_dr_wrt(self, wrt):
        if wrt is self.x:
            if visualize:
                import matplotlib.pyplot as plt
                residuals = np.sum(self.r**2)
                print '------> RESIDUALS %.2e' % (residuals,)
                print '------> CURRENT GUESS %s' % (str(self.x.r),)
                plt.figure(123)
                
                if not hasattr(self, 'vs'):
                    self.vs = []
                    self.xs = []
                    self.ys = []
                self.vs.append(residuals)
                self.xs.append(self.x.r[0])
                self.ys.append(self.x.r[1])
                plt.clf();
                plt.subplot(1,2,1)
                plt.plot(self.vs)
                plt.subplot(1,2,2)
                plt.plot(self.xs, self.ys)
                plt.draw()


            return row(rosen_der(self.x.r))
Example #4
0
 def testEstimation(self):
     values = np.random.uniform(-5.0, 5.0, size=[10, 2])
     for i in range(10):
         true_grad = rosen_der(values[i, :])
         pred_grad = self.estimator.simpleFiniteDifferences(values[i, :])
         self.assertAlmostEqual(true_grad[0], pred_grad[0], 4)
     self.assertAlmostEqual(true_grad[1], pred_grad[1], 4)
 def testMaximization(self):
     self.optimizer.isMaximize = True
     rosen_inv = lambda x: - rosen(x)
     rosen_der_inv = lambda x: - rosen_der(x)
     max_params, max_value, _ = self.optimizer.optimize(rosen_inv, rosen_der_inv, x0=self.x0)
     self.assertAlmostEqual(self.reference_value, max_value, 10)
     self.assertAlmostEqual(self.reference_params[0], max_params[0], 4)
     self.assertAlmostEqual(self.reference_params[1], max_params[1], 4)
Example #6
0
 def nltest(x,grad):
     nonlocal counter
     nonlocal countergrad
     if len(grad) > 0:
         countergrad += 1
         grad[:] = rosen_der(x)
         return grad
     counter += 1
     return rosen(x)
Example #7
0
def test_optimization_trust():
    print("**********************************************")
    print("TEST Newton trust region ")
    x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
    res = optimize.minimize(
        optimize.rosen, x0, method='trust-ncg',
        jac=optimize.rosen_der,
        hess=optimize.rosen_hess,
        options={'gtol': 1e-8, 'disp': True})
    print(res.x)
    print(optimize.rosen(x0).shape)
    print(optimize.rosen_der(x0).shape)
    print(optimize.rosen_hess(x0).shape)
    return res.fun
Example #8
0
def test_gradient():
    """ Test the gradient using rosen and higher order functions
    """
    def F(x):
        return x[0]**2 + x[0]*x[1] + x[1]**2
    def F_grad(x):
        return array([2*x[0]+x[1],x[0]+2*x[1]])
    opt1 = p.OptimizationProblem(rosen)
    opt2 = p.OptimizationProblem(F)
    for i in range(-3,3):
        for j in range(-3,3):
            k  = opt1.gradient([float(i),float(j)]) - \
                rosen_der([float(i),float(j)])
            kk = opt2.gradient([float(i),float(j)]) - \
                 F_grad([float(i),float(j)])
            assert(sum( abs((k+kk)) <1e-5 )==2)
Example #9
0
def rosen_obj_func_grad(w):
    return sparse.csc_matrix(rosen_der(w.T.toarray()[0])).T
Example #10
0
def rosen_obj(params):
    val = rosen(params["x"])

    dval = OrderedDict()
    dval["x"] = rosen_der(params["x"])
    return val, dval
Example #11
0
 def rosen_der_wrapper(self, x, args=()):
     self.ngev += 1
     return rosen_der(x, *args)
Example #12
0
 def fprime(self, x):
     return rosen_der(x)
Example #13
0
eps = 1.0e-5
xtol = 1.0e-16
icall = 0
iflag = 0

# initial guess
x = np.zeros(n)
for i in range(0, n, 2):
    x[i] = -1.2
    x[i + 1] = 1.0
x0 = x.copy()
# initial evaluation
fval = 0.0
gval = np.zeros_like(x)
fval = rosen(x)
gval = rosen_der(x)
print("initial function value = {}".format(fval))
print("initial gradient norm = {}".format(np.sqrt(np.dot(gval, gval))))

for icall in range(2000):
    [xk, oflag] = lbfgs.lbfgs(n=n, m=m, x=x, f=fval, g=gval, \
        diagco=diagco, diag=diag, \
        iprint=iprint, eps=eps, xtol=xtol, w=work, iflag=iflag)
    iflag = oflag
    x = xk[:]
    fval = rosen(x)
    gval = rosen_der(x)
    print("iflag = {}".format(iflag))
    #print("x - x0 = {}".format(x-x0))
    #print("diag = {}".format(diag))
    #print("current function value = {}".format(fval))
Example #14
0
 def gradient(x, data=None):
     return rosen_der(x)
Example #15
0
 def grad(self, x):
     g = rosen_der(x)
     return g
Example #16
0
 def gradient(x, data):
     return rosen_der(x)
Example #17
0
 def gradient(self):
     inp = self._position.to_global_data_rw()
     out = ift.Field.from_global_data(space, rosen_der(inp))
     return out
Example #18
0
 def rosen_der_wrapper(self, x, args=()):
     self.ngev += 1
     return rosen_der(x, *args)
Example #19
0
 def _getGradient(self, x, *args):
     return rosen_der(x)
Example #20
0
def grad_rosen(xx):
    yy = xx.flatten()
    gra = spo.rosen_der(yy)
    return gra.reshape((gra.shape[0], 1))
Example #21
0
def f(x, g):
    g[:] = rosen_der(x)
    print "one call"
    return rosen(x)
def fp(x):
    return rosen_der(x).reshape(3, 1)
def rosenbrock_function_jacobian(samples):
    assert samples.shape[1] == 1
    return rosen_der(samples).T
def rosen_func_and_der(x):
    return rosen(x), rosen_der(x)
Example #25
0
def SensEq(x, f, g, gc):
    dfdx = rosen_der(x)
    return dfdx, []
Example #26
0
def rosenbrock_grad_f(x):
    return rosen_der(x)
def rosen_der_inv(x):
    return - rosen_der(x)
Example #28
0
 def fprime(self, x):
     return rosen_der(x)
Example #29
0
def callback(xk):
    print(xk)
    print(rosen_der(xk["x"]))
    dr = rosen_der(xk["x"])
    log.append([xk["x"][0], xk["x"][1], -lr * dr[0], -lr * dr[1]])
Example #30
0
 def SensEq2(p, r, g, x):
     return rosen_der(p)
Example #31
0
def func_grad_hess(x, *args):

    f = optimize.rosen(x)
    g = optimize.rosen_der(x)
    h = optimize.rosen_hess(x)
    return (f, g, h)
Example #32
0
 def cost_fun(params, **kwargs):
     return rosen_der([params['a'], params['b']])
Example #33
0
def func_grad_hess(x,*args):

    f = optimize.rosen(x)
    g = optimize.rosen_der(x)
    h= optimize.rosen_hess(x)
    return (f,g,h)
def f(x):
    time.sleep(0.01)
    return [opt.rosen(x), opt.rosen_der(x)]
Example #35
0
def f(x):
    time.sleep(0.1)
    return [opt.rosen(x), opt.rosen_der(x)]