def testSpecificProblemUnboundedSet(self): """ Some specific problems have been choosen to be tested. The set is unbounded, so it fails in the auxiliary path-follow part. """ # prepare set of testing cases data0 = { 'c': matrix([[1], [1]]), 'A': [identity(3), matrix([[1, 1, 0], [1, 1, 0], [0, 0, 0]]), matrix([[1, 0, 1], [0, 0, 1], [1, 1, 1]])], 'startPoint': matrix([[0], [0]]) } data1 = { 'c': matrix([[1], [1], [1]]), 'A': [identity(3), matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]]), matrix([[0, 1, 0], [1, 0, 1], [0, 1, 0]]), matrix([[1, 1, 0], [1, 0, 1], [0, 1, 1]])], 'startPoint': matrix([[0], [0], [0]]) } parameters = [data0, data1] # test all cases for i in range(0, len(parameters)): with self.subTest(i = i): problem = SDPSolver(parameters[i]['c'], [parameters[i]['A']]) with self.assertRaises(LinAlgError): problem.solve(parameters[i]['startPoint'], problem.auxFollow)
def testRandomProblemBoundedSetDampedNewton(self): """ Test some random generated problems, which should be always bounded. Test one problem for dimensions specified below. """ # specify dimensions dims = [1, 2, 3, 4, 5, 6, 7, 10, 25] # test all of them for n in dims: with self.subTest(i = n): # starting point startPoint = zeros((n, 1)); # objective function c = ones((n, 1)) # get LMI matrices A = [identity(n)]; for i in range(0, n): A.append(Utils.randomSymetric(n)) # init SDP program problem = SDPSolver(c, [A]) # bound the problem problem.bound(1) # solve timeBefore = process_time(); problem.solve(startPoint, problem.dampedNewton) elapsedTime = process_time() - timeBefore #print(elapsedTime) # the matrix have to be semidefinite positive (eigenvalues >= 0) eigs = problem.eigenvalues() for eig in eigs: self.assertGreaterEqual(eig, 0) # the smallest eigenvalue has to be near zero self.assertLessEqual(eigs[0], 10**(-3))
def __init__(self, f, g, d): """ Initialization of the POP problem. Args: f (dictionary: tuple => int): representation of the objective function f(x) g (dictionary: tuple => int): representation of the constraining function g(x) d (int): degree of the relaxation """ # get number of variables key = list(f.keys())[0] self.n = len(key) self.d = d # disable output logging.basicConfig(stream = sys.stdout, format = '%(message)s') self.logStdout = logging.getLogger() # generate all variables up to degree 2*d allVar = self.generateVariablesUpDegree(2*self.d) # collect all variables used varUsed = allVar # generate moment matrix and localizing matrix self.MM = self.momentMatrix(self.d, varUsed) self.LM = self.localizingMatrix(self.d - 1, varUsed, g) # generate objective function for SDP self.c = zeros((len(varUsed) - 1, 1)) for variable in range(1, len(varUsed)): self.c[variable - 1, 0] = f.get(varUsed[variable], 0) # initialize SDP Solver self.SDP = SDPSolver(self.c, [self.MM, self.LM])
# Problem statement # min c0*x0 + c1*x1 # s. t. I_3 + A0*x0 + A1*x1 >= 0 c = array([[1], [1]]) A0 = array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]) A1 = array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) # starting point startPoint = array([[0], [0]]) # create the solver object problem = SDPSolver(c, [[eye(3), A0, A1]]) # enable graphs problem.setDrawPlot(True) # enable informative output problem.setPrintOutput(True) # enable bounding into ball with radius 1 #problem.bound(1) # solve! x = problem.solve(startPoint, problem.dampedNewton) print(x) # print eigenvalues
def testSpecificProblemBoundedSet(self): """ Some specific problems have been choosen to be tested. """ # prepare set of testing cases data0 = { 'c': matrix([[1], [1]]), 'A': [[identity(3), matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]]), matrix([[0, 1, 0], [1, 0, 1], [0, 1, 0]])]], 'startPoint': matrix([[0], [0]]), 'result': matrix([[-0.777673169427983], [-0.592418253409468]]) } data1 = { 'c': matrix([[1], [1]]), 'A': [[identity(3), matrix([[ 1, 0, -1], [ 0, -1, 0], [-1, 0, -1]]), matrix([[ 0, 1, 0], [ 1, 0, 1], [ 0, 1, 0]])]], 'startPoint': matrix([[0], [0]]), 'result': matrix([[-0.541113957864176], [-0.833869642997048]]) } data2 = { 'c': matrix([[1], [1], [1]]), 'A': [[identity(3), matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]]), matrix([[0, 1, 0], [1, 0, 1], [0, 1, 0]]), matrix([[0, 0, 0], [0, 0, -1], [0, -1, 0]])]], 'startPoint': matrix([[0], [0], [0]]), 'result': matrix([[-0.987675582117481], [-0.0243458354034874], [-1.98752220767823]]) } # unbounded example manualy bounded data3 = { 'c': matrix([[1], [1]]), 'A': [[identity(3), matrix([[1, 1, 0], [1, 1, 0], [0, 0, 0]]), matrix([[1, 0, 1], [0, 0, 1], [1, 1, 1]])], [identity(3), matrix([[0, 1, 0], [1, 0, 0], [0, 0, 0]]), matrix([[0, 0, 1], [0, 0, 0], [1, 0, 0]])], ], 'startPoint': matrix([[0], [0]]), 'result': matrix([[-0.367456763013021], [-0.228720901608749]]) } parameters = [data0, data1, data2, data3] # test all cases for i in range(0, len(parameters)): with self.subTest(i = i): # init prolem problem = SDPSolver(parameters[i]['c'], parameters[i]['A']) # solve and compare the results self.assertLessEqual(norm(problem.solve(parameters[i]['startPoint'], problem.auxFollow) - parameters[i]['result']), 10**(-5)) # the matrix have to be semidefinite positive (eigenvalues >= 0) eigs = problem.eigenvalues() for eig in eigs: self.assertGreaterEqual(eig, 0) # the smallest eigenvalue has to be near zero self.assertLessEqual(eigs[0], 10**(-3))
class POPSolver: """ Class providing POP (Polynomial Optimization Problem) Solver. Solves problem in this form: min f(x) s.t. g(x) >= 0 by Pavel Trutman, [email protected] """ def __init__(self, f, g, d): """ Initialization of the POP problem. Args: f (dictionary: tuple => int): representation of the objective function f(x) g (dictionary: tuple => int): representation of the constraining function g(x) d (int): degree of the relaxation """ # get number of variables key = list(f.keys())[0] self.n = len(key) self.d = d # disable output logging.basicConfig(stream = sys.stdout, format = '%(message)s') self.logStdout = logging.getLogger() # generate all variables up to degree 2*d allVar = self.generateVariablesUpDegree(2*self.d) # collect all variables used varUsed = allVar # generate moment matrix and localizing matrix self.MM = self.momentMatrix(self.d, varUsed) self.LM = self.localizingMatrix(self.d - 1, varUsed, g) # generate objective function for SDP self.c = zeros((len(varUsed) - 1, 1)) for variable in range(1, len(varUsed)): self.c[variable - 1, 0] = f.get(varUsed[variable], 0) # initialize SDP Solver self.SDP = SDPSolver(self.c, [self.MM, self.LM]) def solve(self, startPoint): """ Solves a POP problem. Args: startPoint (Matrix): some feasible point of the SDP problem Returns: Matrix: solution of the problem """ # solve the SDP porblem y = self.SDP.solve(startPoint, self.SDP.dampedNewton) # extract solutions of the POP problem x = y[0:self.n, :] return x def setPrintOutput(self, printOutput): """ Enables or disables printing of the computation state. Args: printOuput (bool): True - enables the output, False - disables the output Returns: None """ self.SDP.setPrintOutput(printOutput) if printOutput: self.logStdout.setLevel(logging.INFO) else: self.logStdout.setLevel(logging.WARNING) def momentMatrix(self, d, varUsed): """ Constructs moment matrix. Args: d (int): degree of the relaxation varUsed (list of tuples): all variables that are used Returns: list: list of moment matrices """ varUpD = self.generateVariablesUpDegree(d) varUsedNum = len(varUsed) dimM = len(varUpD) MM = [zeros((dimM, dimM)) for i in range(0, varUsedNum)] for i in range(0, dimM): for j in range(i, dimM): # sum up the degrees varCur = tuple(sum(t) for t in zip(varUpD[i], varUpD[j])) # find this variable amongs used vars index = [k for k in range(0, varUsedNum) if varUsed[k] == varCur] if len(index) > 0: pos = index[0] MM[pos][i, j] = 1 MM[pos][j, i] = 1 return MM def localizingMatrix(self, d, varUsed, g): """ Constructs localizing matrix. Args: d (int): degree of the relaxation varUsed (list of tuples): all variables that are used g (dictionary: tuple => int): representation of the constraining function g(x) Returns: list: list of localizing matrices """ varUpD = self.generateVariablesUpDegree(d) varUsedNum = len(varUsed) dimM = len(varUpD) LM = [zeros((dimM, dimM)) for i in range(0, varUsedNum)] for mon, coef in g.items(): for i in range(0, dimM): for j in range(i, dimM): # sum up the degrees varCur = tuple(sum(t) for t in zip(varUpD[i], varUpD[j], mon)) # find this variable amongs used vars index = [k for k in range(0, varUsedNum) if varUsed[k] == varCur] if len(index) > 0: pos = index[0] LM[pos][i, j] += coef if i != j: LM[pos][j, i] += coef return LM def generateVariablesDegree(self, d, n): """ Generates whole set of variables of given degree. Args: d (int): degree of the variables n (int): number of unknowns Returns: list: list of all variables """ # generate zero degree variables if d == 0: return [(0,)*n] # generrate one degree variables elif d == 1: variables = [] for i in range(0, n): t = [0]*n t[i] = 1 variables.append(tuple(t)) return variables # there is only one unkown with the degree d elif n == 1: return [(d,)] # generate variables in general case else: variables = [] for i in range(0, d + 1): innerVariables = self.generateVariablesDegree(d - i, n - 1) variables.extend([v + (i,) for v in innerVariables]) return variables def generateVariablesUpDegree(self, d): """ Generates whole set of variables up to given degree. Args: d (int): maximal degree of the variables Returns: list: list of variables """ variables = [] for i in range(0, d + 1): variables.extend(self.generateVariablesDegree(i, self.n)) return variables def getFeasiblePoint(self, R): """Finds feasible point for SDP problem arisen from the POP problem. Args: R (int): a radius of a ball from which x points are choosen Returns: Matrix: feasible point for the SDP problem """ N = comb(self.n + self.d, self.n) N = ceil(N*1.5 + 1) # generate all variable usedVars = self.generateVariablesUpDegree(2*self.d)[1:] y = zeros((len(usedVars), 1)) i = 0 # choose many points x to moment matrix have full rank while i < N: # select x from the ball with given radius x = uniform(-R, R, (self.n, 1)) if norm(x) < R: # generate points y from it for alpha in range(0, len(usedVars)): yTemp = 1 for j in range(0, self.n): yTemp *= x[j, 0]**usedVars[alpha][j] y[alpha, 0] += yTemp i += 1 y = y / N return y