def nonconvex(n=50, k=10, seed=0, **kwargs): torch.random.manual_seed(seed) lam = torch.rand(k, dtype=torch.double) lam /= sum(lam) g = torch.randn(k - 1, n, dtype=torch.double) gk = -(lam[0:(k - 1)] @ g) / lam[-1] g = torch.cat((g, gk[None, :]), 0) c = torch.randn(k, dtype=torch.double) tmp = torch.randn(k, n, n, dtype=torch.double) H = stack([tmp[i, :, :].T @ tmp[i, :, :] for i in range(k)]) def nc_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == n term1 = g @ x term2 = 0.5 * stack([x.T @ H[i, :, :] @ x for i in range(k)]) term3 = (1. / 24.) * (norm(x)**4) * c return sum(abs(term1 + term2 + term3)[0:k]) return Objective(nc_function, **kwargs)
def partlysmooth(n=50, m=25, seed=0, **kwargs): torch.random.manual_seed(seed) tmp = torch.randn(n + 1, m, m, dtype=torch.double) A = stack([tmp[i, :, :].T + tmp[i, :, :] for i in range(n + 1)]) # Get true vaues l = cp.Variable(n) obj = A[0, :, :].data.numpy() for i in range(n): obj += A[i + 1, :, :] * l[i] prob = cp.Problem(cp.Minimize(cp.lambda_max(obj))) prob.solve(solver='MOSEK') true_val = prob.value true_spec = np.linalg.eigvalsh( A[0, :, :] + np.einsum('i,ijk->jk', l.value, A[1:, :, :])) true_mult = np.sum(np.isclose(true_spec, np.max(true_spec))) def ps_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == n mat = A[0, :, :] + einsum('i,ijk->jk', x, A[1:, :, :]) return symeig( mat, eigenvectors=True)[0][-1] # eigenvalues in ascending order return Objective(ps_function, **kwargs), true_val, true_mult
def halfandhalf(n=50, seed=0, **kwargs): A = torch.ones(n, dtype=torch.double) A[1::2] = 0 A = torch.diag(A) B = torch.diag((torch.arange(n, dtype=torch.double) + 1.0)**-1) def hh_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == n return sqrt(x.T @ A @ x) + x.T @ B @ x return Objective(hh_function, **kwargs)
def partlysmooth(n=50, m=25, seed=0, **kwargs): torch.random.manual_seed(seed) tmp = torch.randn(n + 1, m, m) A = stack([tmp[i, :, :].T + tmp[i, :, :] for i in range(n + 1)]) def nc_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.float, requires_grad=False) assert len(x) == n mat = A[0, :, :] + einsum('i,ijk->jk', x, A[1:, :, :]) return symeig( mat, eigenvectors=True)[0][-1] # eigenvalues in ascending order return Objective(nc_function, **kwargs)
def nonconvex(n=50, k=10, seed=0, **kwargs): torch.random.manual_seed(seed) c = torch.randn(k) g = torch.randn(k, n) tmp = torch.randn(k, n, n) H = stack([tmp[i, :, :].T @ tmp[i, :, :] for i in range(k)]) def nc_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.float, requires_grad=False) assert len(x) == n term1 = g @ x term2 = 0.5 * stack([x.T @ H[i, :, :] @ x for i in range(k)]) term3 = (1. / 24.) * (norm(x)**4) * c return sum(abs(term1 + term2 + term3)) return Objective(nc_function, **kwargs)
import torch from IPython import embed from obj.objective import Objective from torch import abs, max, sum, norm, einsum, stack, symeig, tensor, Tensor def simple2D(x): return max(abs(x[0]), (0.5 * x[1]**2)) Simple2D = Objective(simple2D) # Below are example objective functions from Lewis-Wylie 2019 (https://arxiv.org/abs/1907.11742) # Creates a strongly convex objective function for particular n and k def stronglyconvex(n=50, k=10, seed=0, **kwargs): torch.random.manual_seed(seed) c = torch.randn(k) g = torch.randn(k, n) tmp = torch.randn(k, n, n) H = stack([tmp[i, :, :].T @ tmp[i, :, :] for i in range(k)]) def sc_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.float, requires_grad=False) assert len(x) == n
import torch import numpy as np import cvxpy as cp from IPython import embed from obj.objective import Objective from torch import abs, max, sum, norm, sqrt, einsum, stack, symeig, tensor, Tensor def simple2D(x): return max(abs(x[0]), (0.5 * x[1]**2)) Simple2D = Objective(simple2D) def partlysmooth2D(x): if type(x) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == 2 return max(3 * x[0]**2 + x[1]**2 - x[1], x[0]**2 + x[1]**2 + x[1]) PartlySmooth2D = Objective(partlysmooth2D) def partlysmooth3D(x): if type(x) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == 3 return sqrt((x[0]**2 - x[1])**2 + x[2]**2) + 2 * (x[0]**2 + x[1]**2 + x[2]**2)
def obj_test(): '''Running this function runs a complete test on the functions of the objective class. ''' # This is so objective can be imported to the test subfolder import sys import os sys.path.extend( [f'./{name}' for name in os.listdir(".") if os.path.isdir(name)]) from obj.objective import Objective # Testing Object Initialisation print("============") print("Object initialisation..") walk = Objective("Take a walk !") test = Objective("Fais des tests", qty=1) bana = Objective("Buy bananas", qty=12) toma = Objective("Buy tomatoes", qty=5, desc="Just buy some tomatoes") medi = Objective("Meditate this evening", desc="It can do you some good") print("Done !") print("===========\n") # Testing Reader Functions test_read = False print("=== No objective should be completed.") print(walk) print(test) print(bana) print(toma) print(medi) if not walk.is_done() and not test.is_done() and not bana.is_done(): #print('check step') if not toma.is_done() and not medi.is_done(): #print('second check step') if bana.read_quantity() == [0, 12]: #print('q step') test_read = True print("===========\n") # Testing Access Functions test_access = False walk.check() medi.check() toma.change_title('CHANGE_TITLE WORKS !') walk.change_desc('CHANGE_DESCRIPTION WORKS !') bana.change_quantity(7) toma.change_quantity(6) walk.change_quantity(1) medi.make_quantitative(1) test.unmake_quantitative() print("=== No objective should be prout.") print(walk) print(test) print(bana) print(toma) print(medi) if walk.desc == 'CHANGE_DESCRIPTION WORKS !' and toma.title == 'CHANGE_TITLE WORKS !': #print('title step') if bana.qty == 7 and toma.qty == 6 and walk.qty == 0: #print('q step') if medi._qty_flag and not test._qty_flag: #print('qflag step') if walk.is_done() and toma.is_done( ) and not medi.is_done() and test_read: #print('check step') test_access = True if not test_read: print( "\nCOULD NOT VALIDATE ACCESS TEST BECAUSE OF BAD READ TEST." ) print(f'\nREAD test is {test_read}') print(f'ACCESS test is {test_access}') if test_read and test_access: print('') print('OBJECTIVE TEST COMPLETED !!') return True return False
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Sep 2 10:18:22 2019 @author: Xiaoyan """ # Defines an objective # Calls the oracle # Oracle will output evaluation and gradient, unless told otherwise import sys sys.path.append('..') from obj.objective import Objective # f(x,y) = |x| + y^2 def simple2D(x): return abs(x[0]) + x[1]**2 Simple2D = Objective(simple2D) out = Simple2D.call_oracle([1, 2]) print(out) # {'f': array(5., dtype=float32), 'df': array([1., 4.], dtype=float32)}