def test_fmin_lbfgs(): def f(x, g, *args): g[0] = 2 * x return x ** 2 xmin = fmin_lbfgs(f, 100., line_search='armijo') assert_array_equal(xmin, [0]) xmin = fmin_lbfgs(f, 100., line_search='strongwolfe') assert_array_equal(xmin, [0])
def test_fmin_lbfgs(): def f(x, g, *args): g[0] = 2 * x return x**2 xmin = fmin_lbfgs(f, 100., line_search='armijo') assert_array_equal(xmin, [0]) xmin = fmin_lbfgs(f, 100., line_search='strongwolfe') assert_array_equal(xmin, [0])
def test_fmin_lbfgs(): def f(x, g, *args): g = [2 * x] for (i, e) in enumerate(res): g[i] = e return x**2 xmin = fmin_lbfgs(f, 100., line_search='armijo') assert_array_equal(xmin, [0]) xmin = fmin_lbfgs(f, 100., line_search='strongwolfe') assert_array_equal(xmin, [0])
def test_fmin_lbfgs(): def f(x, g, *args): g = [2 * x] for (i, e) in enumerate(res): g[i] = e; return x ** 2 xmin = fmin_lbfgs(f, 100., line_search='armijo') assert_array_equal(xmin, [0]) xmin = fmin_lbfgs(f, 100., line_search='strongwolfe') assert_array_equal(xmin, [0])
def test_owl_line_search_warning_explicit(self): def f(x, g, *args): g[0] = 2 * x return x ** 2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='default') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='morethuente') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='armijo') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='strongwolfe')
def test_fmin_lbfgs(): def f(x, g, *args): g[0] = 2 * x return x**2 xmin = fmin_lbfgs(f, 100.) assert_array_equal(xmin, [0])
def _minimize(self, initial_val, loss_grad_func, equality_funcs, equality_grad_funcs, inequality_funcs, inequality_grad_funcs, packed_bounds, step_callback, optimizer_kwargs): """Wrapper for a particular optimization algorithm implementation. It would be appropriate for a subclass implementation of this method to raise `NotImplementedError` if unsupported arguments are passed: e.g. if an algorithm does not support constraints but `len(equality_funcs) > 0`. Args: initial_val: A NumPy vector of initial values. loss_grad_func: A function accepting a NumPy packed variable vector and returning two outputs, a loss value and the gradient of that loss with respect to the packed variable vector. equality_funcs: A list of functions each of which specifies a scalar quantity that an optimizer should hold exactly zero. equality_grad_funcs: A list of gradients of equality_funcs. inequality_funcs: A list of functions each of which specifies a scalar quantity that an optimizer should hold >= 0. inequality_grad_funcs: A list of gradients of inequality_funcs. packed_bounds: A list of bounds for each index, or `None`. step_callback: A callback function to execute at each optimization step, supplied with the current value of the packed variable vector. optimizer_kwargs: Other key-value arguments available to the optimizer. Returns: The optimal variable vector as a NumPy vector. """ def loss_grad_func_pylbfgs(x, g): xval, gval = loss_grad_func(x) g[:] = gval return xval return fmin_lbfgs(loss_grad_func_pylbfgs, initial_val, progress=None, **optimizer_kwargs)
def lbfgs_modified_logistic_regression(X, y, b=None): """Same as modified LR, but solved using lbfgs.""" X, theta, N, M = prepend_and_vars(X) if b is None: fix_b, b = False, DEFAULT_B else: fix_b, b = True, b def f(w, g, X, y): """Accepts x, and g. Returns value at x, and gradient at g. """ b = w[0] theta = w[1:] value = np.sum(np.abs(y - (1.0 / (1.0 + (b ** 2) + X.dot(theta))))) # now fill in the g ewx = np.exp(-X.dot(theta)) b2ewx = (b * b) + ewx p = ((y - 1.0) / b2ewx) + (1.0 / (1.0 + b2ewx)) dLdw = (p * ewx).reshape((X.shape[0], 1)) * X if not fix_b: w[0] = np.sum(-2 * b * p) w[1:] = np.sum(dLdw, axis=0) return value import lbfgs w = np.hstack([np.array([b,]), theta]) answer = lbfgs.fmin_lbfgs(f, w, args=(X, y,)) theta, b = answer[1:], answer[0] return theta, b
def test_fmin_lbfgs(): def f(x, g, *args): g[0] = 2 * x return x ** 2 xmin = fmin_lbfgs(f, 100.) assert_array_equal(xmin, [0])
def test_2d(): def f(x, g, f_calls): #f_calls, = args assert_equal(x.shape, (2, 2)) assert_equal(g.shape, x.shape) g[:] = 2 * x f_calls[0] += 1 return (x**2).sum() def progress(x, g, fx, xnorm, gnorm, step, k, ls, p_calls): assert_equal(x.shape, (2, 2)) assert_equal(g.shape, x.shape) assert_equal(np.sqrt((x**2).sum()), xnorm) assert_equal(np.sqrt((g**2).sum()), gnorm) p_calls[0] += 1 return 0 f_calls = [0] p_calls = [0] xmin = fmin_lbfgs(f, [[10., 100.], [44., 55.]], progress, args=[f_calls]) assert_greater(f_calls, 0) assert_greater(p_calls, 0) assert_array_almost_equal(xmin, [[0, 0], [0, 0]])
def test_2d(): def f(x, g, f_calls): #f_calls, = args assert x.shape == (2, 2) assert g.shape == x.shape g[:] = 2 * x f_calls[0] += 1 return (x**2).sum() def progress(x, g, fx, xnorm, gnorm, step, k, ls, *args): assert x.shape == (2, 2) assert g.shape == x.shape assert np.sqrt((x**2).sum()) == xnorm assert np.sqrt((g**2).sum()) == gnorm p_calls[0] += 1 return 0 f_calls = [0] p_calls = [0] xmin = fmin_lbfgs(f, [[10., 100.], [44., 55.]], progress, args=[f_calls]) assert f_calls[0] > 0 assert p_calls[0] > 0 assert_array_almost_equal(xmin, [[0, 0], [0, 0]])
def test_owl_qn(self): def f(x, g, *args): g[0] = 2 * x return x**2 xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='wolfe') assert_array_equal(xmin, [0])
def _interpolate_image_cs(image, sample_mask, c=15): ri_vector, = np.where(sample_mask.ravel()) b_vector = image.ravel()[ri_vector].copy() image_dims = image.shape def _evaluate(x, g): # return squared norm of residuals and set the gradient x2 = x.reshape(image_dims) Ax2 = _idct2(x2) Ax = Ax2.flat[ri_vector] Axb = Ax - b_vector fx = np.sum(np.power(Axb, 2)) Axb2 = np.zeros(x2.shape) Axb2.flat[ri_vector] = Axb AtAxb2 = 2 * _dct2(Axb2) AtAxb = AtAxb2.reshape(x.shape) np.copyto(g, AtAxb) return fx x0 = np.zeros_like(image).ravel() x = fmin_lbfgs(_evaluate, x0, orthantwise_c=c, line_search='wolfe') # transform the output back into the spatial domain x = _idct2(x.reshape(image_dims)) return x
def test_2d(): def f(x, g, f_calls): #f_calls, = args assert x.shape == (2, 2) assert g.shape == x.shape g[:] = 2 * x f_calls[0] += 1 return (x ** 2).sum() def progress(x, g, fx, xnorm, gnorm, step, k, ls, *args): assert x.shape == (2, 2) assert g.shape == x.shape assert np.sqrt((x ** 2).sum()) == xnorm assert np.sqrt((g ** 2).sum()) == gnorm p_calls[0] += 1 return 0 f_calls = [0] p_calls = [0] xmin = fmin_lbfgs(f, [[10., 100.], [44., 55.]], progress, args=[f_calls]) assert f_calls[0] > 0 assert p_calls[0] > 0 assert_array_almost_equal(xmin, [[0, 0], [0, 0]])
def test_2d(): def f(x, g, f_calls): #f_calls, = args assert_equal(x.shape, (2, 2)) assert_equal(g.shape, x.shape) g[:] = 2 * x f_calls[0] += 1 return (x ** 2).sum() def progress(x, g, fx, xnorm, gnorm, step, k, ls, *args): assert_equal(x.shape, (2, 2)) assert_equal(g.shape, x.shape) assert_equal(np.sqrt((x ** 2).sum()), xnorm) assert_equal(np.sqrt((g ** 2).sum()), gnorm) p_calls[0] += 1 return 0 f_calls = [0] p_calls = [0] xmin = fmin_lbfgs(f, [[10., 100.], [44., 55.]], progress, args=[f_calls]) assert_greater(f_calls[0], 0) assert_greater(p_calls[0], 0) assert_array_almost_equal(xmin, [[0, 0], [0, 0]])
def test_owl_line_search_default(self): def f(x, g, *args): g[0] = 2 * x return x ** 2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1)
def test_owl_qn(self): def f(x, g, *args): g[0] = 2 * x return x ** 2 xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='wolfe') assert_array_equal(xmin, [0])
def test_owl_line_search_default(self): def f(x, g, *args): g[0] = 2 * x return x**2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1)
def test_owl_wolfe_no_warning(self): """ This test is an attempt to show that wolfe throws no warnings. """ def f(x, g, *args): g[0] = 2 * x return x**2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='wolfe')
def test_owl_line_search_warning_explicit(self): def f(x, g, *args): g[0] = 2 * x return x**2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='default') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='morethuente') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='armijo') with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='strongwolfe')
def test_owl_wolfe_no_warning(self): """ This test is an attempt to show that wolfe throws no warnings. """ def f(x, g, *args): g[0] = 2 * x return x ** 2 with pytest.warns(UserWarning, match="OWL-QN"): xmin = fmin_lbfgs(f, 100., orthantwise_c=1, line_search='wolfe')
def test_input_validation(): with pytest.raises(TypeError): fmin_lbfgs([], 1e4) with pytest.raises(TypeError): fmin_lbfgs(lambda x: x, 1e4, "ham") with pytest.raises(TypeError): fmin_lbfgs(lambda x: x, "spam")
def go(self): # Xat2 = owlqn(self.nx*self.ny, self.evaluate, None, 5) print("Starting optimizations") starttime = time.time() Xat2 = lbfgs.fmin_lbfgs(self.evaluate, self.lastXat2, orthantwise_c=5) print("Optimization found after {0:0.1f} seconds.".format(time.time() - starttime)) self.lastXat2 = Xat2 # transform the output back into the spatial domain Xat = Xat2.reshape(self.nx, self.ny).T # stack columns Xa = idct(Xat) Xa[Xa < 0] = 0 Xa[Xa > 255] = 255 return Xa
def Compress(input_FileName, input_Sample_x, input_Sample_y): try: global nx; global ny; global b; global ri; sample_sizes = (float(input_Sample_x), float(input_Sample_y)) base_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'web/UploadFiles/')); base_path_save = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'web/static/images/SaveFiles/')); Xorig = spimg.imread(base_path+"/"+input_FileName+'.jpg') ny,nx,nchan = Xorig.shape Z = [np.zeros(Xorig.shape, dtype='uint8') for s in sample_sizes] masks = [np.zeros(Xorig.shape, dtype='uint8') for s in sample_sizes] for i,s in enumerate(sample_sizes): k = round(nx * ny * s) ri = np.random.choice(nx * ny, k, replace=False) for j in range(nchan): X = Xorig[:,:,j].squeeze() Xm = np.zeros(X.shape) Xm.T.flat[ri] = X.T.flat[ri] masks[i][:,:,j] = Xm b = X.T.flat[ri].astype(float) x0 = np.ones(X.shape); Xat2 = lb.fmin_lbfgs(evaluate, x0, args =(5,), orthantwise_c=5, line_search='wolfe') Xat = Xat2.reshape(nx, ny).T Xa = idct2(Xat) Z[i][:,:,j] = Xa.astype('uint8') rgbArray = np.zeros((ny,nx,3),'uint8') rgbArray[..., 0] = Z[1][:,:,0] rgbArray[..., 1] = Z[1][:,:,1] rgbArray[..., 2] = Z[1][:,:,2] img = Image.fromarray(rgbArray) rgbMask = np.zeros((ny,nx,3), 'uint8') rgbMask[...,0] = masks[1][:,:,0] rgbMask[...,1] = masks[1][:,:,1] rgbMask[...,2] = masks[1][:,:,2] scipy.misc.imsave(base_path_save+"/"+input_FileName+'_original.jpg', Xorig); scipy.misc.imsave(base_path_save+"/"+input_FileName+'_compressed.jpg', rgbMask); scipy.misc.imsave(base_path_save+"/"+input_FileName+'_recovered.jpg', img); return True; except Exception as e: return False; # if __name__=="__main__": # Compress("Test",0.1,0.1)
def owlqn( loss_grad_fn: Callable[[np.ndarray], Tuple[float, np.ndarray]], x0: np.ndarray, lambda1: float = 0, max_iterations: int = 200, **kwargs, ) -> np.ndarray: """ Wrapper around owlqn that converts max_iter errors to warnings (see `fmin_lbfgs`). """ def f(x: np.ndarray, gradient: np.ndarray) -> float: loss, grad = loss_grad_fn(x) gradient[:] = grad return loss try: # PyLBFGS throws an error if max_iterations is exceeded, this is a workaround to convert it into a warning def p(x, g, fx, xnorm, gnorm, step, k, num_eval, *args): if k >= max_iterations: x0[:] = x x0 = fmin_lbfgs( f=f, x0=x0, progress=p, orthantwise_c=lambda1, max_iterations=max_iterations, line_search="wolfe" if lambda1 > 0 else "default", **kwargs, ) except LBFGSError as error: if (error.args[0] != "The algorithm routine reaches the maximum number of iterations." ): raise error else: warn( "LBFGS optimisation reaches the maximum number of iterations.", SliseWarning, ) return x0
def lbfgs_modified_logistic_regression(X, y, b=None): """Same as modified LR, but solved using lbfgs.""" X, theta, N, M = prepend_and_vars(X) if b is None: fix_b, b = False, DEFAULT_B else: fix_b, b = True, b def f(w, g, X, y): """Accepts x, and g. Returns value at x, and gradient at g. """ b = w[0] theta = w[1:] value = np.sum(np.abs(y - (1.0 / (1.0 + (b**2) + X.dot(theta))))) # now fill in the g ewx = np.exp(-X.dot(theta)) b2ewx = (b * b) + ewx p = ((y - 1.0) / b2ewx) + (1.0 / (1.0 + b2ewx)) dLdw = (p * ewx).reshape((X.shape[0], 1)) * X if not fix_b: w[0] = np.sum(-2 * b * p) w[1:] = np.sum(dLdw, axis=0) return value import lbfgs w = np.hstack([np.array([ b, ]), theta]) answer = lbfgs.fmin_lbfgs(f, w, args=( X, y, )) theta, b = answer[1:], answer[0] return theta, b
"""Trivial example: minimize x**2 from any start value""" import lbfgs import sys from scipy.optimize import minimize, rosen, rosen_der import numpy as np x0 = np.array([1.3, 0.7]) def f(x, g): g[:] = rosen_der(x) print "one call" return rosen(x) def progress(x, g, f_x, xnorm, gnorm, step, k, ls): """Report optimization progress.""" #print("x = %8.2g f(x) = %8.2g f'(x) = %8.2g" % (x, f_x, g)) pass print("Minimum found", lbfgs.fmin_lbfgs(f, x0, progress))
"""Trivial example: minimize x**2 from any start value""" import lbfgs import numpy as np import sys def f(x, g): """Returns x**2 and stores its gradient in g[0]""" x = x[0] g[0] = 2*x return x**2 x0 = np.asarray([float(sys.argv[1])]) print lbfgs.fmin_lbfgs(f, x0)[0]
"""Trivial example: minimize x**2 from any start value""" import lbfgs import sys def f(x, g): """Returns x**2 and stores its gradient in g[0]""" x = x[0] g[0] = 2 * x return x**2 def progress(x, g, f_x, xnorm, gnorm, step, k, ls): """Report optimization progress.""" print("x = %8.2g f(x) = %8.2g f'(x) = %8.2g" % (x, f_x, g)) try: x0 = float(sys.argv[1]) except IndexError: print("usage: python %s start-value" % sys.argv[0]) sys.exit(1) print("Minimum found: %f" % lbfgs.fmin_lbfgs(f, x0, progress)[0])
"""Trivial example: minimize x**2 from any start value""" import lbfgs import sys def f(x, g): """Returns x**2 and stores its gradient in g[0]""" x = x[0] g[0] = 2*x return x**2 def progress(x, g, f_x, xnorm, gnorm, step, k, ls): """Report optimization progress.""" print("x = %8.2g f(x) = %8.2g f'(x) = %8.2g" % (x, f_x, g)) try: x0 = float(sys.argv[1]) except IndexError: print("usage: python %s start-value" % sys.argv[0]) sys.exit(1) print("Minimum found: %f" % lbfgs.fmin_lbfgs(f, x0, progress)[0])