def test_linear_op_data_term_wrong(): m = 40 d = 10 if getNewOptVals: A,y = getLSdata(m,d) cache['Awrongdata']=A cache['ywrongdata']=y else: A=cache['Awrongdata'] y=cache['ywrongdata'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 projSplit.setDualScaling(gamma) p = 15 d2 = 11 H = np.random.normal(0,1,[d2,p]) try: projSplit.addData(A,y,2,processor,normalize=False,intercept=False, linearOp = aslinearoperator(H)) notExcept = True except: notExcept = False assert notExcept == False
def test_linear_op_l1(norm,inter): m = 40 d = 10 p = 15 if getNewOptVals: A = cache.get('AlinL1') y = cache.get('ylinL1') H = cache.get('HlinL1') if A is None: A,y = getLSdata(m,d) H = np.random.normal(0,1,[p,d]) cache['AlinL1']=A cache['ylinL1']=y cache['HlinL1']=H else: A=cache['AlinL1'] y=cache['ylinL1'] H=cache['HlinL1'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter) lam = 0.01 step = 1.0 regObj = L1(lam,step) projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H)) projSplit.run(maxIterations=5000,keepHistory = True, nblocks = 1, primalTol=1e-3,dualTol=1e-3) ps_val = projSplit.getObjective() if getNewOptVals: opt = cache.get((norm,inter,'optlinL1')) if opt is None: (m,d) = A.shape if norm: Anorm = A scaling = np.linalg.norm(Anorm,axis=0) scaling += 1.0*(scaling < 1e-10) Anorm = np.sqrt(m)*Anorm/scaling A = Anorm if inter: AwithIntercept = np.zeros((m,d+1)) AwithIntercept[:,0] = np.ones(m) AwithIntercept[:,1:(d+1)] = A A = AwithIntercept HwithIntercept = np.zeros((p,d+1)) HwithIntercept[:,0] = np.zeros(p) HwithIntercept[:,1:(d+1)] = H H = HwithIntercept x_cvx = cvx.Variable(d+1) else: x_cvx = cvx.Variable(d) f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y) f += lam*cvx.norm(H @ x_cvx,1) prob = cvx.Problem(cvx.Minimize(f)) prob.solve(verbose=True) opt = prob.value cache[(norm,inter,'optlinL1')]=opt else: opt=cache[(norm,inter,'optlinL1')] primViol = projSplit.getPrimalViolation() dualViol = projSplit.getDualViolation() print("primal violation = {}".format(primViol)) print("dual violation = {}".format(dualViol)) print("ps val = {}".format(ps_val)) print("cvx val = {}".format(opt)) assert ps_val - opt < 1e-2
def test_multi_linear_op_l1(norm,inter,testNumber,numblocks): m = 40 d = 10 numregs = 5 if getNewOptVals and (testNumber==0): A,y = getLSdata(m,d) cache['AmutliLinL1']=A cache['ymutliLinL1']=y H = [] for i in range(numregs): p = np.random.randint(1,100) H.append(np.random.normal(0,1,[p,d])) cache['HmultiLinL1']=H else: H=cache['HmultiLinL1'] A=cache['AmutliLinL1'] y=cache['ymutliLinL1'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 if norm and inter: gamma = 1e2 projSplit.setDualScaling(gamma) projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter) lam = [] for i in range(numregs): lam.append(0.001*(i+1)) step = 1.0 regObj = L1(lam[-1],step) projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H[i])) projSplit.run(maxIterations=5000,keepHistory = True, nblocks = numblocks, primalTol=1e-6,dualTol=1e-6) ps_val = projSplit.getObjective() if getNewOptVals: if norm: Anorm = A m = Anorm.shape[0] scaling = np.linalg.norm(Anorm,axis=0) scaling += 1.0*(scaling < 1e-10) Anorm = np.sqrt(m)*Anorm/scaling A = Anorm if inter: AwithIntercept = np.zeros((m,d+1)) AwithIntercept[:,0] = np.ones(m) AwithIntercept[:,1:(d+1)] = A A = AwithIntercept (m,d) = A.shape x_cvx = cvx.Variable(d) f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y) for i in range(numregs): if inter: f += lam[i]*cvx.norm(H[i] @ x_cvx[1:d],1) else: f += lam[i]*cvx.norm(H[i] @ x_cvx,1) prob = cvx.Problem(cvx.Minimize(f)) prob.solve(verbose=True) opt = prob.value cache[(norm,inter,'opt')]=opt else: opt=cache[(norm,inter,'opt')] print("ps val = {}".format(ps_val)) print("cvx val = {}".format(opt)) assert ps_val - opt < 1e-2
projSplit.setDualScaling(gamma) p = 15 d2 = 11 H = np.random.normal(0,1,[d2,p]) try: projSplit.addData(A,y,2,processor,normalize=False,intercept=False, linearOp = aslinearoperator(H)) notExcept = True except: notExcept = False assert notExcept == False f2fix = lp.Forward2Fixed() back2exact = lp.BackwardExact() backCG = lp.BackwardCG() f1bt = lp.Forward1Backtrack() backLB = lp.BackwardLBFGS() TryAll = [] testNumber = 0 for i in [False,True]: for j in [False,True]: for k in [False,True]: for l in [False,True]: for p in [backLB,f2fix,back2exact,f1bt,backCG]: TryAll.append((i,j,k,l,p,testNumber)) testNumber += 1 @pytest.mark.parametrize("norm,inter,addL1,add2L1,processor,testNumber",TryAll)
if getNewOptVals and (gf == 1.0): AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A result = np.linalg.lstsq(AwithIntercept, y, rcond=None) xhat = result[0] LSval = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y, 2)**2 / m cache['optf2bt'] = LSval else: LSval = cache['optf2bt'] assert ps_val - LSval < 1e-2 stepsize = 1e-1 f2fixed = lp.Forward2Fixed(stepsize) f2backtrack = lp.Forward2Backtrack() f2affine = lp.Forward2Affine() f1fixed = lp.Forward1Fixed(stepsize) f1bt = lp.Forward1Backtrack() back_exact = lp.BackwardExact() backCG = lp.BackwardCG() backLBFGS = lp.BackwardLBFGS() ToDo = [] firsttest = True for i in [False, True]: for j in [False, True]: for blk in range(1, 5): for process in [ backLBFGS, f2fixed, f2backtrack, f2affine, f1fixed, f1bt, back_exact, backCG
import numpy as np import pickle import pytest from matplotlib import pyplot as plt if getNewOptVals: from utils import runCVX_LR from utils import getLRdata cache = {} else: np.random.seed(1) with open('results/cache_L1LR', 'rb') as file: cache = pickle.load(file) stepsize = 1e-1 f2fixed = lp.Forward2Fixed(stepsize) f2bt = lp.Forward2Backtrack() f1fixed = lp.Forward1Fixed(stepsize) f1bt = lp.Forward1Backtrack() backLBFGS = lp.BackwardLBFGS() processors = [f2fixed, f2bt, f1fixed, f1bt, backLBFGS] toDo = [] testNumber = 0 for processor in processors: for inter in [False, True]: for norm in [False, True]: toDo.append((processor, norm, inter, testNumber)) testNumber += 1
def test_embedded(processor, testNumber): m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['A_embed'] = A cache['y_embed'] = y else: A = cache['A_embed'] y = cache['y_embed'] projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) lam = 0.01 step = 1.0 regObj = L1(lam, step) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False, embed=regObj) if getNewOptVals and (testNumber == 0): opt, _ = runCVX_lasso(A, y, lam) cache['embed_opt1'] = opt else: opt = cache['embed_opt1'] for nblocks in range(1, 11, 3): projSplit.run(maxIterations=1000, keepHistory=True, nblocks=nblocks) ps_val = projSplit.getObjective() print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2 projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=5) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): opt2, _ = runCVX_lasso(A, y, 2 * lam) cache['embed_opt2'] = opt2 else: opt2 = cache['embed_opt2'] print('cvx opt val = {}'.format(opt2)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt2) < 1e-2 projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e-2 projSplit.setDualScaling(gamma) lam = 0.01 step = 1.0 regObj = L1(lam, step) projSplit.addData(A, y, 2, processor, normalize=True, intercept=True, embed=regObj) regObj = L1(lam, step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=5) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A opt3, _ = runCVX_lasso(AwithIntercept, y, 2 * lam, True, True) cache['embed_opt3'] = opt3 else: opt3 = cache['embed_opt3'] print('cvx opt val = {}'.format(opt3)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt3) < 1e-2