def test_linear_op_data_term_wrong(): m = 40 d = 10 if getNewOptVals: A,y = getLSdata(m,d) cache['Awrongdata']=A cache['ywrongdata']=y else: A=cache['Awrongdata'] y=cache['ywrongdata'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 projSplit.setDualScaling(gamma) p = 15 d2 = 11 H = np.random.normal(0,1,[d2,p]) try: projSplit.addData(A,y,2,processor,normalize=False,intercept=False, linearOp = aslinearoperator(H)) notExcept = True except: notExcept = False assert notExcept == False
def test_backward(nblk, inter, norm, processor): m = 80 d = 20 if getNewOptVals: A = cache.get('Aback') y = cache.get('yback') if A is None: A, y = getLSdata(m, d) cache['Aback'] = A cache['yback'] = y else: A = cache.get('Aback') y = cache.get('yback') projSplit = ps.ProjSplitFit() gamma = 1e-3 #if nblk==10: # gamma = 1e3 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=norm, intercept=inter) projSplit.run(maxIterations=5000, keepHistory=True, nblocks=nblk, blockActivation="random", primalTol=1e-7, dualTol=1e-7) #psvals = projSplit.getHistory()[0] #plt.plot(psvals) #plt.show() ps_opt = projSplit.getObjective() print('ps func opt = {}'.format(ps_opt)) if getNewOptVals: LSval = cache.get((inter, 'optback')) if LSval is None: if inter: AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A result = np.linalg.lstsq(AwithIntercept, y, rcond=None) xhat = result[0] LSval = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y, 2)**2 / m else: result = np.linalg.lstsq(A, y, rcond=None) xhat = result[0] LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m cache[(inter, 'optback')] = LSval else: LSval = cache.get((inter, 'optback')) print('LSval = {}'.format(LSval)) assert ps_opt - LSval < 1e-2
def test_blockIs1bug(processor): m = 40 d = 10 if getNewOptVals: A = cache.get('AblockBug') y = cache.get('yblockBug') if A is None: A, y = getLSdata(m, d) cache['AblockBug'] = A cache['yblockBug'] = y else: A = cache.get('AblockBug') y = cache.get('yblockBug') projSplit = ps.ProjSplitFit() gamma = 1e1 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1, blockActivation="random") ps_opt = projSplit.getObjective() print('ps func opt = {}'.format(ps_opt)) if getNewOptVals: LSval = cache.get('optBug') if LSval is None: result = np.linalg.lstsq(A, y, rcond=None) xhat = result[0] LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m cache['optBug'] = LSval else: LSval = cache.get('optBug') print('LSval = {}'.format(LSval)) assert abs(LSval - ps_opt) < 1e-2
def test_l1_lasso_blocks(processor, testNumber): m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['lassoA'] = A cache['lassoy'] = y else: A = cache['lassoA'] y = cache['lassoy'] projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False) lam = 0.01 step = 1.0 regObj = L1(lam, step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): opt, xopt = runCVX_lasso(A, y, lam) cache['optlasso'] = opt cache['xlasso'] = xopt else: opt = cache['optlasso'] xopt = cache['xlasso'] print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2 for numBlocks in range(2, 10): projSplit.run(maxIterations=2000, keepHistory=True, nblocks=numBlocks) ps_val = projSplit.getObjective() #print('cvx opt val = {}'.format(opt)) #print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2
def test_linear_op_l1(norm,inter): m = 40 d = 10 p = 15 if getNewOptVals: A = cache.get('AlinL1') y = cache.get('ylinL1') H = cache.get('HlinL1') if A is None: A,y = getLSdata(m,d) H = np.random.normal(0,1,[p,d]) cache['AlinL1']=A cache['ylinL1']=y cache['HlinL1']=H else: A=cache['AlinL1'] y=cache['ylinL1'] H=cache['HlinL1'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter) lam = 0.01 step = 1.0 regObj = L1(lam,step) projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H)) projSplit.run(maxIterations=5000,keepHistory = True, nblocks = 1, primalTol=1e-3,dualTol=1e-3) ps_val = projSplit.getObjective() if getNewOptVals: opt = cache.get((norm,inter,'optlinL1')) if opt is None: (m,d) = A.shape if norm: Anorm = A scaling = np.linalg.norm(Anorm,axis=0) scaling += 1.0*(scaling < 1e-10) Anorm = np.sqrt(m)*Anorm/scaling A = Anorm if inter: AwithIntercept = np.zeros((m,d+1)) AwithIntercept[:,0] = np.ones(m) AwithIntercept[:,1:(d+1)] = A A = AwithIntercept HwithIntercept = np.zeros((p,d+1)) HwithIntercept[:,0] = np.zeros(p) HwithIntercept[:,1:(d+1)] = H H = HwithIntercept x_cvx = cvx.Variable(d+1) else: x_cvx = cvx.Variable(d) f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y) f += lam*cvx.norm(H @ x_cvx,1) prob = cvx.Problem(cvx.Minimize(f)) prob.solve(verbose=True) opt = prob.value cache[(norm,inter,'optlinL1')]=opt else: opt=cache[(norm,inter,'optlinL1')] primViol = projSplit.getPrimalViolation() dualViol = projSplit.getDualViolation() print("primal violation = {}".format(primViol)) print("dual violation = {}".format(dualViol)) print("ps val = {}".format(ps_val)) print("cvx val = {}".format(opt)) assert ps_val - opt < 1e-2
def test_linear_op_data_term(norm,inter,addL1,add2L1,processor,testNumber): m = 40 d = 10 p = 15 d2 = 10 if getNewOptVals and (testNumber==0): A,y = getLSdata(m,d) H = np.random.normal(0,1,[d2,p]) cache['AdataTerm']=A cache['ydataTerm']=y cache['HdataTerm']=H else: A = cache['AdataTerm'] y = cache['ydataTerm'] H = cache['HdataTerm'] projSplit = ps.ProjSplitFit() processor.setStep(5e-1) gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter, linearOp = aslinearoperator(H)) lam = 0.01 step = 1.0 if addL1: regObj = L1(lam,step) projSplit.addRegularizer(regObj) if add2L1: regObj2 = L1(lam,step) projSplit.addRegularizer(regObj2) projSplit.run(maxIterations=10000,keepHistory = True, nblocks = 3,primalTol=1e-3,dualTol=1e-3) ps_val = projSplit.getObjective() primViol = projSplit.getPrimalViolation() dualViol = projSplit.getDualViolation() print("primal violation = {}".format(primViol)) print("dual violation = {}".format(dualViol)) if getNewOptVals: opt = cache.get((addL1,add2L1,inter,norm,'optdata')) if opt == None: if norm == True: scaling = np.linalg.norm(A,axis=0) scaling += 1.0*(scaling < 1e-10) A = np.sqrt(A.shape[0])*A/scaling if inter == True: AwithIntercept = np.zeros((m,d+1)) AwithIntercept[:,0] = np.ones(m) AwithIntercept[:,1:(d+1)] = A A = AwithIntercept HwithIntercept = np.zeros((d2+1,p+1)) HwithIntercept[:,0] = np.zeros(d2+1) HwithIntercept[0] = np.ones(p+1) HwithIntercept[0,0] = 1.0 HwithIntercept[1:(d2+1),1:(p+1)] = H H = HwithIntercept (m,_) = A.shape if inter: x_cvx = cvx.Variable(p+1) else: x_cvx = cvx.Variable(p) f = (1/(2*m))*cvx.sum_squares(A@H@x_cvx - y) if addL1: f += lam*cvx.norm(x_cvx,1) if add2L1: f += lam*cvx.norm(x_cvx,1) prob = cvx.Problem(cvx.Minimize(f)) prob.solve(verbose=True) opt = prob.value cache[(addL1,add2L1,inter,norm,'optdata')]=opt else: opt=cache[(addL1,add2L1,inter,norm,'optdata')] print("ps opt = {}".format(ps_val)) print("cvx opt = {}".format(opt)) assert(ps_val-opt<1e-2)
def test_multi_linear_op_l1(norm,inter,testNumber,numblocks): m = 40 d = 10 numregs = 5 if getNewOptVals and (testNumber==0): A,y = getLSdata(m,d) cache['AmutliLinL1']=A cache['ymutliLinL1']=y H = [] for i in range(numregs): p = np.random.randint(1,100) H.append(np.random.normal(0,1,[p,d])) cache['HmultiLinL1']=H else: H=cache['HmultiLinL1'] A=cache['AmutliLinL1'] y=cache['ymutliLinL1'] projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e0 if norm and inter: gamma = 1e2 projSplit.setDualScaling(gamma) projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter) lam = [] for i in range(numregs): lam.append(0.001*(i+1)) step = 1.0 regObj = L1(lam[-1],step) projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H[i])) projSplit.run(maxIterations=5000,keepHistory = True, nblocks = numblocks, primalTol=1e-6,dualTol=1e-6) ps_val = projSplit.getObjective() if getNewOptVals: if norm: Anorm = A m = Anorm.shape[0] scaling = np.linalg.norm(Anorm,axis=0) scaling += 1.0*(scaling < 1e-10) Anorm = np.sqrt(m)*Anorm/scaling A = Anorm if inter: AwithIntercept = np.zeros((m,d+1)) AwithIntercept[:,0] = np.ones(m) AwithIntercept[:,1:(d+1)] = A A = AwithIntercept (m,d) = A.shape x_cvx = cvx.Variable(d) f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y) for i in range(numregs): if inter: f += lam[i]*cvx.norm(H[i] @ x_cvx[1:d],1) else: f += lam[i]*cvx.norm(H[i] @ x_cvx,1) prob = cvx.Problem(cvx.Minimize(f)) prob.solve(verbose=True) opt = prob.value cache[(norm,inter,'opt')]=opt else: opt=cache[(norm,inter,'opt')] print("ps val = {}".format(ps_val)) print("cvx val = {}".format(opt)) assert ps_val - opt < 1e-2
def test_user_defined_embedded(processor, testNumber): def val1(x): return 0.5 * np.linalg.norm(x, 2)**2 def prox1(x, scale): return (1 + scale)**(-1) * x def val2(x): return np.linalg.norm(x, 2) def prox2(x, scale): normx = np.linalg.norm(x, 2) if normx <= scale: return 0 * x else: return (normx - scale) * x / normx tau = 0.2 def val3(x): if ((x <= tau) & (x >= -tau)).all(): return 0 else: return float('inf') def prox3(x, scale): ones = np.ones(x.shape) return tau * (x >= tau) * ones - tau * (x <= -tau) * ones + ( (x <= tau) & (x >= -tau)) * x m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['Aembed'] = A cache['yembed'] = y else: A = cache['Aembed'] y = cache['yembed'] projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) try: scaling = projSplit.getScale() exceptMade = False except: exceptMade = True if exceptMade == False: raise Exception regObj = [] nu = [0.01, 0.03, 0.1] step = [1.0, 1.0, 1.0] regObj.append(Regularizer(prox1, val1, nu[0], step[0])) regObj.append(Regularizer(prox2, val2, nu[1], step[1])) regObj.append(Regularizer(prox3, val3, nu[2], step[2])) projSplit.addData(A, y, 2, processor, normalize=False, intercept=True, embed=regObj[2]) projSplit.addRegularizer(regObj[0]) projSplit.addRegularizer(regObj[1]) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=5, resetIterate=True) if getNewOptVals and (testNumber == 0): AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A (m, d) = AwithIntercept.shape x_cvx = cvx.Variable(d) f = (1 / (2 * m)) * cvx.sum_squares(AwithIntercept @ x_cvx - y) constraints = [-tau <= x_cvx[1:d], x_cvx[1:d] <= tau] f += 0.5 * nu[0] * cvx.norm(x_cvx[1:d], 2)**2 f += nu[1] * cvx.norm(x_cvx[1:d], 2) obj = cvx.Minimize(f) prob = cvx.Problem(obj, constraints) prob.solve(verbose=False) #opt = prob.value xopt = x_cvx.value xopt = np.squeeze(np.array(xopt)) cache['xoptembedded'] = xopt else: xopt = cache['xoptembedded'] xps = projSplit.getSolution() print("Norm error = {}".format(np.linalg.norm(xopt - xps, 2))) assert (np.linalg.norm(xopt - xps, 2) < 1e-2)
def test_l1_intercept_and_normalize(processor, inter, norm): m = 40 d = 10 if getNewOptVals: A = cache.get('Al1intAndNorm') y = cache.get('yl1intAndNorm') if A is None: A, y = getLSdata(m, d) cache['Al1intAndNorm'] = A cache['yl1intAndNorm'] = y else: A = cache['Al1intAndNorm'] y = cache['yl1intAndNorm'] projSplit = ps.ProjSplitFit() if inter and norm: gamma = 1e-2 elif (inter == False) and norm: gamma = 1e-4 else: gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=norm, intercept=inter) lam = 1e-3 step = 1.0 regObj = L1(lam, step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=5000, keepHistory=True, nblocks=10, primalTol=1e-3, dualTol=1e-3) ps_val = projSplit.getObjective() primViol = projSplit.getPrimalViolation() dualViol = projSplit.getDualViolation() print("primal violation = {}".format(primViol)) print("dual violation = {}".format(dualViol)) if getNewOptVals: opt = cache.get((inter, norm, 'l1opt')) if opt is None: if norm: Anorm = np.copy(A) n = A.shape[0] scaling = np.linalg.norm(Anorm, axis=0) scaling += 1.0 * (scaling < 1e-10) Anorm = np.sqrt(n) * Anorm / scaling else: Anorm = A AwithIntercept = np.zeros((m, d + 1)) if inter: AwithIntercept[:, 0] = np.ones(m) else: AwithIntercept[:, 0] = np.zeros(m) AwithIntercept[:, 1:(d + 1)] = Anorm opt, _ = runCVX_lasso(AwithIntercept, y, lam, True) cache[(inter, norm, 'l1opt')] = opt else: opt = cache[(inter, norm, 'l1opt')] print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2
def test_user_defined(processor, testNumber): def val1(x): return 0.5 * np.linalg.norm(x, 2)**2 def prox1(x, scale): return (1 + scale)**(-1) * x def val2(x): return np.linalg.norm(x, 2) def prox2(x, scale): normx = np.linalg.norm(x, 2) if normx <= scale: return 0 * x else: return (normx - scale) * x / normx tau = 0.2 def val3(x): if ((x <= tau) & (x >= -tau)).all(): return 0 else: return float('inf') def prox3(x, scale): ones = np.ones(x.shape) return tau * (x >= tau) * ones - tau * (x <= -tau) * ones + ( (x <= tau) & (x >= -tau)) * x funcList = [(val3, prox3), (val1, prox1), (val2, prox2)] i = 0 m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['Auser'] = A cache['yuser'] = y else: A = cache['Auser'] y = cache['yuser'] for (val, prox) in funcList: projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False) nu = 5.5 step = 1e0 regObj = Regularizer(prox, val, scaling=nu, step=step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1, resetIterate=True, primalTol=1e-12, dualTol=1e-12) ps_val = projSplit.getObjective() (m, d) = A.shape if getNewOptVals and (testNumber == 0): x_cvx = cvx.Variable(d) f = (1 / (2 * m)) * cvx.sum_squares(A @ x_cvx - y) if i == 0: constraints = [-tau <= x_cvx, x_cvx <= tau] elif i == 1: f += 0.5 * nu * cvx.norm(x_cvx, 2)**2 constraints = [] elif i == 2: f += nu * cvx.norm(x_cvx, 2) constraints = [] obj = cvx.Minimize(f) prob = cvx.Problem(obj, constraints) prob.solve(verbose=True) opt = prob.value xopt = x_cvx.value xopt = np.squeeze(np.array(xopt)) cache[(i, 'optuser')] = opt cache[(i, 'xuser')] = xopt else: opt = cache[(i, 'optuser')] xopt = cache[(i, 'xuser')] if i == 0: xps = projSplit.getSolution() print(np.linalg.norm(xopt - xps, 2)) assert (np.linalg.norm(xopt - xps, 2) < 1e-2) else: print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2 i += 1 # test combined m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['Acombined'] = A cache['ycombined'] = y else: A = cache['Acombined'] y = cache['ycombined'] projSplit = ps.ProjSplitFit() projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False) nu1 = 0.01 step = 1e0 regObj = Regularizer(prox1, val1, scaling=nu1, step=step) projSplit.addRegularizer(regObj) nu2 = 0.05 step = 1e0 regObj = Regularizer(prox2, val2, scaling=nu2, step=step) projSplit.addRegularizer(regObj) step = 1e0 regObj = Regularizer(prox3, val3, step=step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1, resetIterate=True, primalTol=1e-12, dualTol=1e-12) ps_val = projSplit.getObjective() xps = projSplit.getSolution() if getNewOptVals and (testNumber == 0): x_cvx = cvx.Variable(d) f = (1 / (2 * m)) * cvx.sum_squares(A @ x_cvx - y) constraints = [-tau <= x_cvx, x_cvx <= tau] f += 0.5 * nu1 * cvx.norm(x_cvx, 2)**2 f += nu2 * cvx.norm(x_cvx, 2) obj = cvx.Minimize(f) prob = cvx.Problem(obj, constraints) prob.solve(verbose=True) opt = prob.value xopt = x_cvx.value xopt = np.squeeze(np.array(xopt)) cache['optcombined'] = opt cache['xcombined'] = xopt else: opt = cache['optcombined'] xopt = cache['xcombined'] assert (np.linalg.norm(xopt - xps, 2) < 1e-2)
def test_embedded(processor, testNumber): m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['A_embed'] = A cache['y_embed'] = y else: A = cache['A_embed'] y = cache['y_embed'] projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) lam = 0.01 step = 1.0 regObj = L1(lam, step) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False, embed=regObj) if getNewOptVals and (testNumber == 0): opt, _ = runCVX_lasso(A, y, lam) cache['embed_opt1'] = opt else: opt = cache['embed_opt1'] for nblocks in range(1, 11, 3): projSplit.run(maxIterations=1000, keepHistory=True, nblocks=nblocks) ps_val = projSplit.getObjective() print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2 projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=5) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): opt2, _ = runCVX_lasso(A, y, 2 * lam) cache['embed_opt2'] = opt2 else: opt2 = cache['embed_opt2'] print('cvx opt val = {}'.format(opt2)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt2) < 1e-2 projSplit = ps.ProjSplitFit() stepsize = 1e-1 processor = lp.Forward2Fixed(stepsize) gamma = 1e-2 projSplit.setDualScaling(gamma) lam = 0.01 step = 1.0 regObj = L1(lam, step) projSplit.addData(A, y, 2, processor, normalize=True, intercept=True, embed=regObj) regObj = L1(lam, step) projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=5) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A opt3, _ = runCVX_lasso(AwithIntercept, y, 2 * lam, True, True) cache['embed_opt3'] = opt3 else: opt3 = cache['embed_opt3'] print('cvx opt val = {}'.format(opt3)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt3) < 1e-2
def test_l1_multi_lasso(processor, testNumber, equalize): m = 40 d = 10 if getNewOptVals and (testNumber == 0): A, y = getLSdata(m, d) cache['Amulti'] = A cache['ymulti'] = y else: A = cache['Amulti'] y = cache['ymulti'] projSplit = ps.ProjSplitFit() gamma = 1e0 projSplit.setDualScaling(gamma) projSplit.addData(A, y, 2, processor, normalize=False, intercept=False) lam = 0.01 step = 1.0 regObj = L1(lam, step) fac = 5 # add the same regularizer twice, same as using # it once with twice the parameter for _ in range(fac): projSplit.addRegularizer(regObj) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1, equalizeStepsizes=equalize) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): opt, _ = runCVX_lasso(A, y, fac * lam) cache['opt_multi'] = opt else: opt = cache['opt_multi'] print('cvx opt val = {}'.format(opt)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt) < 1e-2 # test with intercept projSplit.addData(A, y, 2, processor, normalize=False, intercept=True) projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1) ps_val = projSplit.getObjective() if getNewOptVals and (testNumber == 0): AwithIntercept = np.zeros((m, d + 1)) AwithIntercept[:, 0] = np.ones(m) AwithIntercept[:, 1:(d + 1)] = A opt_multi_inter, _ = runCVX_lasso(AwithIntercept, y, fac * lam, True) cache['opt_multi_inter'] = opt_multi_inter else: opt_multi_inter = cache['opt_multi_inter'] #print('cvx opt val = {}'.format(opt)) #print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt_multi_inter) < 1e-2 # test multi-data-blocks for bblocks in range(2, 11): projSplit.run(maxIterations=2000, keepHistory=True, nblocks=bblocks) ps_val = projSplit.getObjective() print('cvx opt val = {}'.format(opt_multi_inter)) print('ps opt val = {}'.format(ps_val)) assert abs(ps_val - opt_multi_inter) < 1e-2