Exemple #1
0
def test_one_sided():
    projSplit = ps.ProjSplitFit()

    def deriv(x,y):
        return (x>=y)*(x-y)

    def val(x,y):
        return (x>=y)*(x-y)**2

    loss = ls.LossPlugIn(deriv)
    loss = ls.LossPlugIn(deriv,val)
    m = 20
    d = 50
    A = normal(0,1,[m,d])
    y = normal(0,1,m)

    projSplit.addData(A,y,loss=loss,intercept=False,normalize=False)
    projSplit.addRegularizer(L1())
    projSplit.run(keepHistory=False,nblocks=10)


    primTol = projSplit.getPrimalViolation()
    dualTol = projSplit.getDualViolation()

    assert primTol <1e-6
    assert dualTol <1e-6
Exemple #2
0
def test_linear_op_data_term_wrong():
    m = 40
    d = 10
    if getNewOptVals:
        A,y = getLSdata(m,d)
        cache['Awrongdata']=A
        cache['ywrongdata']=y
    else:
        A=cache['Awrongdata']
        y=cache['ywrongdata']



    projSplit = ps.ProjSplitFit()
    stepsize = 1e-1
    processor = lp.Forward2Fixed(stepsize)
    gamma = 1e0
    projSplit.setDualScaling(gamma)
    p = 15
    d2 = 11
    H = np.random.normal(0,1,[d2,p])
    try:
        projSplit.addData(A,y,2,processor,normalize=False,intercept=False,
                      linearOp = aslinearoperator(H))
        notExcept = True

    except:
        notExcept = False

    assert notExcept == False
Exemple #3
0
def test_ls_PrimDual(processor, inter, norm, nblk, firsttest):
    processor.setStep(1e-1)
    projSplit = ps.ProjSplitFit()
    m = 10
    d = 20
    if getNewOptVals and firsttest:
        A = np.random.normal(0, 1, [m, d])
        y = np.random.normal(0, 1, m)
        cache['AprimDual'] = A
        cache['yprimDual'] = y
    else:
        A = cache['AprimDual']
        y = cache['yprimDual']

    projSplit.setDualScaling(1e-1)
    projSplit.addData(A, y, 2, processor, intercept=inter, normalize=norm)
    projSplit.run(maxIterations=None,
                  keepHistory=True,
                  primalTol=1e-3,
                  dualTol=1e-3,
                  nblocks=nblk,
                  historyFreq=1)

    print("Primal violation = {}".format(projSplit.getPrimalViolation()))
    print("Dual violation = {}".format(projSplit.getDualViolation()))

    assert projSplit.getPrimalViolation() < 1e-3
    assert projSplit.getDualViolation() < 1e-3
Exemple #4
0
def test_bad_getParams():
    projSplit = ps.ProjSplitFit()
    try:
        testing = projSplit.numPrimalVars()
        testing = projSplit.numObservations()


        testing = projSplit.getObjective()


        testing = projSplit.getSolution()


        testing = projSplit.getDualViolation()


        testing = projSplit.getHistory()


        testing = projSplit.getPrimalViolation()

        testing = projSplit.getScale()
        noExcept = True
    except:
        noExcept = False

    assert noExcept == False
Exemple #5
0
def test_a_reg(builtInReg, cvxf, testNum):
    projSplit = ps.ProjSplitFit()

    if getNewOptVals:
        A = cache.get('Areg')
        y = cache.get('yreg')
        if A is None:
            A = np.random.normal(0, 1, [m, d])
            y = np.random.normal(0, 1, m)
            cache['Areg'] = A
            cache['yreg'] = y
    else:
        A = cache['Areg']
        y = cache['yreg']

    projSplit.addData(A, y, intercept=False, normalize=False, loss=2)
    projSplit.addRegularizer(builtInReg)
    projSplit.run(nblocks=5)
    psval = projSplit.getObjective()

    if getNewOptVals:
        opt = cache.get(('optreg', testNum))
        if opt is None:
            cvxf += (1 / (2 * m)) * cvx.sum_squares(A @ xcvx - y)
            prob = cvx.Problem(cvx.Minimize(cvxf))
            prob.solve(verbose=False)
            opt = prob.value
            cache[('optreg', testNum)] = opt
    else:
        opt = cache[('optreg', testNum)]

    print(f"psval = {psval}")
    print(f"CVX opt = {opt}")

    assert psval - opt < 1e-5
Exemple #6
0
def test_add_linear_ops():
    projSplit = ps.ProjSplitFit()
    m = 10
    d = 20
    A = np.random.normal(0, 1, [m, d])
    y = np.random.normal(0, 1, m)
    processDummy = ProcessDummy()
    projSplit.addData(A, y, 2, processDummy)

    p = 11
    H = np.random.normal(0, 1, [p, d])
    lam = 0.01
    step = 1.0
    regObj = L1(lam, step)

    projSplit.addRegularizer(regObj, linearOp=aslinearoperator(H))

    d2 = 9
    H = np.random.normal(0, 1, [p, d2])
    try:
        projSplit.addRegularizer(regObj, linearOp=aslinearoperator(H)) == -1
        noExcept = True
    except:
        noExcept = False

    assert noExcept == False
Exemple #7
0
def test_ls_Int_Norm(processor, norm, inter, firsttest):
    processor.setStep(5e-1)
    projSplit = ps.ProjSplitFit()
    m = 20
    d = 10
    print(f"firsttest = {firsttest}")
    print(f"getNewOptVals={getNewOptVals}")
    if getNewOptVals and firsttest:
        A = np.random.normal(0, 1, [m, d])
        y = np.random.normal(0, 1, m)
        cache['AlsintNorm'] = A
        cache['ylsintNorm'] = y
    else:
        A = cache['AlsintNorm']
        y = cache['ylsintNorm']

    gamma = 1e-2
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=norm, intercept=inter)
    projSplit.run(maxIterations=5000,
                  keepHistory=True,
                  nblocks=10,
                  primalTol=0.0,
                  dualTol=0.0)
    ps_sol = projSplit.getSolution()
    ps_sol_simp = projSplit.getSolution(ergodic="simple")
    ps_sol_weight = projSplit.getSolution(ergodic="weighted")

    if getNewOptVals:
        LSval = cache.get((inter, norm, 'lsIntNormOpt'))
        if LSval is None:
            if inter:
                AwithIntercept = np.zeros((m, d + 1))
                AwithIntercept[:, 0] = np.ones(m)
                AwithIntercept[:, 1:(d + 1)] = A
                result = np.linalg.lstsq(AwithIntercept, y, rcond=None)
                xhat = result[0]
            else:
                result = np.linalg.lstsq(A, y, rcond=None)
                xhat = result[0]

            if norm == False:
                assert (np.linalg.norm(xhat - ps_sol) < 1e-2)
                assert (np.linalg.norm(xhat - ps_sol_simp) < 1e-2)
                assert (np.linalg.norm(xhat - ps_sol_weight) < 1e-2)

            if inter:
                LSval = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y,
                                             2)**2 / m
            else:
                LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m
            cache[(inter, norm, 'lsIntNormOpt')] = LSval

    else:
        LSval = cache.get((inter, norm, 'lsIntNormOpt'))

    psSolVal = projSplit.getObjective()

    assert (abs(psSolVal - LSval) < 1e-2)
Exemple #8
0
def test_add_regularizer():
    projSplit = ps.ProjSplitFit()
    scale = 11.5
    regObj = L1(scale)
    projSplit.addRegularizer(regObj)
    scale2 = 15.7
    regObj.setScaling(scale2)
    assert (projSplit.allRegularizers[0].getScaling() == scale2)
Exemple #9
0
def test_backward(nblk, inter, norm, processor):
    m = 80
    d = 20
    if getNewOptVals:
        A = cache.get('Aback')
        y = cache.get('yback')
        if A is None:

            A, y = getLSdata(m, d)
            cache['Aback'] = A
            cache['yback'] = y
    else:
        A = cache.get('Aback')
        y = cache.get('yback')

    projSplit = ps.ProjSplitFit()
    gamma = 1e-3
    #if nblk==10:
    #    gamma = 1e3
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=norm, intercept=inter)

    projSplit.run(maxIterations=5000,
                  keepHistory=True,
                  nblocks=nblk,
                  blockActivation="random",
                  primalTol=1e-7,
                  dualTol=1e-7)

    #psvals = projSplit.getHistory()[0]
    #plt.plot(psvals)
    #plt.show()
    ps_opt = projSplit.getObjective()
    print('ps func opt = {}'.format(ps_opt))

    if getNewOptVals:
        LSval = cache.get((inter, 'optback'))
        if LSval is None:
            if inter:
                AwithIntercept = np.zeros((m, d + 1))
                AwithIntercept[:, 0] = np.ones(m)
                AwithIntercept[:, 1:(d + 1)] = A
                result = np.linalg.lstsq(AwithIntercept, y, rcond=None)
                xhat = result[0]
                LSval = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y,
                                             2)**2 / m
            else:
                result = np.linalg.lstsq(A, y, rcond=None)
                xhat = result[0]
                LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m
            cache[(inter, 'optback')] = LSval
    else:
        LSval = cache.get((inter, 'optback'))

    print('LSval = {}'.format(LSval))

    assert ps_opt - LSval < 1e-2
Exemple #10
0
def test_all_adds_together(sparse_type):
    psObj = ps.ProjSplitFit()
    obs = sparse_type([[1, 2, 3], [4, 5, 6]])
    y = [1, 1]
    H = sparse_type([[1, 1, 7, 8], [7, 9, 7, 8], [4, 4, 3, 4]])
    psObj.addData(obs, y, 2, linearOp=H)
    regObj = regularizers.L1()
    G = sparse_type([[1, 1, 1, 11], [7, 7, 11, 42]])
    psObj.addRegularizer(regObj, linearOp=G)
Exemple #11
0
def test_wrong_obs_matrix():
    psObj = ps.ProjSplitFit()
    obs = [[1, 2, 3], [4, 5, 6]]
    y = [1, 1]
    with pytest.raises(Exception):
        psObj.addData(obs, y, 2)
    obs = "hello world"
    with pytest.raises(Exception):
        psObj.addData(obs, y, 2)
Exemple #12
0
def test_good_embed():
    projSplit = ps.ProjSplitFit()
    m = 10
    d = 20
    A = np.random.normal(0, 1, [m, d])
    y = np.random.normal(0, 1, m)
    processDummy = ProcessDummy()
    regObj = L1()
    projSplit.addData(A, y, 2, processDummy, embed=regObj)
    assert projSplit.numRegs == 0
Exemple #13
0
def test_L1LR(processor, nrm, inter, testNumber):

    m = 40
    d = 10
    if getNewOptVals and (testNumber == 0):
        A, y = getLRdata(m, d)
        cache['A'] = A
        cache['y'] = y
    else:
        A = cache['A']
        y = cache['y']

    projSplit = ps.ProjSplitFit()

    gamma = 1e0
    projSplit.setDualScaling(gamma)
    projSplit.addData(A,
                      y,
                      'logistic',
                      processor,
                      normalize=nrm,
                      intercept=inter)
    lam = 5e-2
    step = 1.0
    regObj = L1(lam, step)
    projSplit.addRegularizer(regObj)
    projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1)
    ps_val = projSplit.getObjective()

    if getNewOptVals:
        opt = cache.get((nrm, inter, 'opt'))
        if opt is None:
            if nrm:
                Anorm = A
                n = A.shape[0]
                scaling = np.linalg.norm(Anorm, axis=0)
                scaling += 1.0 * (scaling < 1e-10)
                A = np.sqrt(n) * Anorm / scaling

            if inter:
                AwithIntercept = np.zeros((m, d + 1))
                AwithIntercept[:, 0] = np.ones(m)
                AwithIntercept[:, 1:(d + 1)] = A
                A = AwithIntercept

            opt, _ = runCVX_LR(A, y, lam, inter)
            cache[(nrm, inter, 'opt')] = opt
    else:
        opt = cache[(nrm, inter, 'opt')]

    print('cvx opt val = {}'.format(opt))
    print('ps opt val = {}'.format(ps_val))
    assert abs(ps_val - opt) < 1e-2
Exemple #14
0
def test_getParams():
    projSplit = ps.ProjSplitFit()
    m = 10
    d = 20
    A = np.random.normal(0, 1, [m, d])
    y = np.random.normal(0, 1, m)
    processDummy = ProcessDummy()
    projSplit.addData(A, y, 2, processDummy)

    nvar = projSplit.numPrimalVars()
    nobs = projSplit.numObservations()
    assert (nvar == d + 1), "test failed, nvar!=d+1"
    assert (nobs == m), "test failed, nobs != m"
Exemple #15
0
def test_cyclic(processor, firsttest):
    processor.setStep(5e-1)
    projSplit = ps.ProjSplitFit()
    m = 20
    d = 10
    if getNewOptVals and firsttest:
        A = np.random.normal(0, 1, [m, d])
        y = np.random.normal(0, 1, m)
        cache['Acyclic'] = A
        cache['ycyclic'] = y
    else:
        A = cache['Acyclic']
        y = cache['ycyclic']

    if getNewOptVals and firsttest:
        result = np.linalg.lstsq(A, y, rcond=None)
        xhat = result[0]
        LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m
        cache['optCyclic'] = LSval
    else:
        LSval = cache['optCyclic']

    gamma = 1e-1
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=False, intercept=False)

    #projSplit.addRegularizer(regObj)
    projSplit.run(maxIterations=2000,
                  keepHistory=True,
                  nblocks=5,
                  blockActivation="cyclic")
    #fps = projSplit.getHistory()[0]
    #plt.plot(fps)
    #plt.show()

    ps_opt = projSplit.getObjective()
    print("PS opt = {}".format(ps_opt))
    print("LS opt = {}".format(LSval))
    assert abs(ps_opt - LSval) < 1e-2

    for blks in range(2, 7):
        projSplit.run(maxIterations=2000,
                      keepHistory=True,
                      nblocks=5,
                      blockActivation="cyclic",
                      resetIterate=True,
                      blocksPerIteration=blks)

        ps_opt = projSplit.getObjective()
        assert abs(ps_opt - LSval) < 1e-2
Exemple #16
0
def test_other_p(p, process, testNumber):
    process.setStep(1.0)
    gamma = 1e0
    projSplit = ps.ProjSplitFit(gamma)
    m = 40
    d = 20
    if getNewOptVals:
        A = np.random.normal(0, 1, [m, d])
        y = np.random.normal(0, 1, m)
        cache_otherLosses[(testNumber, 'A')] = A
        cache_otherLosses[(testNumber, 'y')] = y
    else:
        A = cache_otherLosses[(testNumber, 'A')]
        y = cache_otherLosses[(testNumber, 'y')]

    projSplit.addData(A, y, p, process, normalize=False, intercept=False)

    lam = 0.01
    regObj = L1(lam, 1.0)
    projSplit.addRegularizer(regObj)

    projSplit.run(primalTol=1e-3,
                  dualTol=1e-3,
                  keepHistory=True,
                  nblocks=2,
                  maxIterations=1000,
                  historyFreq=1)

    ps_val = projSplit.getObjective()

    #ps_vals = projSplit.getHistory()[0]
    #plt.plot(ps_vals)
    #plt.show()

    if getNewOptVals:
        x_cvx = cvx.Variable(d)
        f = (1 / (m * p)) * cvx.pnorm(A @ x_cvx - y, p)**p
        f += lam * cvx.norm(x_cvx, 1)
        prob = cvx.Problem(cvx.Minimize(f))
        prob.solve(verbose=True)
        opt = prob.value
        cache_otherLosses[(testNumber, 'opt')] = opt
    else:
        opt = cache_otherLosses[(testNumber, 'opt')]

    print("ps val  = {}".format(ps_val))
    print("cvx val  = {}".format(opt))

    assert abs(ps_val - opt) < 1e-2
Exemple #17
0
def test_lr(processor, norm, inter):
    processor.setStep(1e0)
    projSplit = ps.ProjSplitFit()
    m = 50
    d = 10
    if getNewOptVals:
        A = cache.get('Alr')
        y = cache.get('ylr')
        if A is None:
            A = np.random.normal(0, 1, [m, d])
            y = 2.0 * (np.random.normal(0, 1, m) > 0) - 1.0
            cache['Alr'] = A
            cache['ylr'] = y
    else:
        A = cache.get('Alr')
        y = cache.get('ylr')

    lam = 0.0
    gamma = 1e-4
    projSplit.setDualScaling(gamma)
    projSplit.addData(A,
                      y,
                      'logistic',
                      processor,
                      normalize=norm,
                      intercept=inter)
    projSplit.run(maxIterations=3000, keepHistory=True, nblocks=10)
    ps_opt_val = projSplit.getObjective()

    if getNewOptVals:
        opt = cache.get((inter, 'optlr'))
        if opt is None:
            if inter:
                AwithIntercept = np.zeros((m, d + 1))
                AwithIntercept[:, 0] = np.ones(m)
                AwithIntercept[:, 1:(d + 1)] = A
                A = AwithIntercept

            opt, _ = runCVX_LR(A, y, lam, intercept=inter)
            cache[(inter, 'optlr')] = opt
    else:
        opt = cache.get((inter, 'optlr'))

    print("ps opt is {}".format(ps_opt_val))
    print("cvx opt is {}".format(opt))

    assert abs(opt - ps_opt_val) < 1e-2
Exemple #18
0
def test_blockIs1bug(processor):
    m = 40
    d = 10
    if getNewOptVals:
        A = cache.get('AblockBug')
        y = cache.get('yblockBug')
        if A is None:

            A, y = getLSdata(m, d)
            cache['AblockBug'] = A
            cache['yblockBug'] = y
    else:
        A = cache.get('AblockBug')
        y = cache.get('yblockBug')

    projSplit = ps.ProjSplitFit()

    gamma = 1e1
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=False, intercept=False)

    projSplit.run(maxIterations=1000,
                  keepHistory=True,
                  nblocks=1,
                  blockActivation="random")

    ps_opt = projSplit.getObjective()
    print('ps func opt = {}'.format(ps_opt))

    if getNewOptVals:
        LSval = cache.get('optBug')
        if LSval is None:
            result = np.linalg.lstsq(A, y, rcond=None)
            xhat = result[0]
            LSval = 0.5 * np.linalg.norm(A.dot(xhat) - y, 2)**2 / m
            cache['optBug'] = LSval
    else:
        LSval = cache.get('optBug')

    print('LSval = {}'.format(LSval))
    assert abs(LSval - ps_opt) < 1e-2
Exemple #19
0
def test_f1backtrack(gf):

    projSplit = ps.ProjSplitFit()
    m = 10
    d = 20
    if getNewOptVals and (gf == 1.0):
        A = np.random.normal(0, 1, [m, d])
        y = np.random.normal(0, 1, m)
        cache['Af1bt'] = A
        cache['yf1bt'] = y
    else:
        A = cache['Af1bt']
        y = cache['yf1bt']

    processor = lp.Forward1Backtrack(growFactor=gf, growFreq=10)

    projSplit.setDualScaling(1e-1)
    projSplit.addData(A, y, 2, processor, intercept=True, normalize=True)

    vec = projSplit.getScaling()
    assert len(vec) == d

    projSplit.run(maxIterations=None,
                  keepHistory=True,
                  primalTol=1e-3,
                  dualTol=1e-3,
                  nblocks=5)
    ps_val = projSplit.getObjective()

    if getNewOptVals and (gf == 1.0):
        AwithIntercept = np.zeros((m, d + 1))
        AwithIntercept[:, 0] = np.ones(m)
        AwithIntercept[:, 1:(d + 1)] = A
        result = np.linalg.lstsq(AwithIntercept, y, rcond=None)
        xhat = result[0]
        LSval = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y, 2)**2 / m
        cache['optf1bt'] = LSval
    else:
        LSval = cache['optf1bt']

    assert ps_val - LSval < 1e-2
Exemple #20
0
def test_l1_lasso_blocks(processor, testNumber):
    m = 40
    d = 10
    if getNewOptVals and (testNumber == 0):
        A, y = getLSdata(m, d)
        cache['lassoA'] = A
        cache['lassoy'] = y
    else:
        A = cache['lassoA']
        y = cache['lassoy']

    projSplit = ps.ProjSplitFit()
    gamma = 1e0
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=False, intercept=False)
    lam = 0.01
    step = 1.0
    regObj = L1(lam, step)
    projSplit.addRegularizer(regObj)
    projSplit.run(maxIterations=1000, keepHistory=True, nblocks=1)
    ps_val = projSplit.getObjective()

    if getNewOptVals and (testNumber == 0):
        opt, xopt = runCVX_lasso(A, y, lam)
        cache['optlasso'] = opt
        cache['xlasso'] = xopt
    else:
        opt = cache['optlasso']
        xopt = cache['xlasso']

    print('cvx opt val = {}'.format(opt))
    print('ps opt val = {}'.format(ps_val))
    assert abs(ps_val - opt) < 1e-2

    for numBlocks in range(2, 10):
        projSplit.run(maxIterations=2000, keepHistory=True, nblocks=numBlocks)
        ps_val = projSplit.getObjective()
        #print('cvx opt val = {}'.format(opt))
        #print('ps opt val = {}'.format(ps_val))
        assert abs(ps_val - opt) < 1e-2
Exemple #21
0
def test_linear_op_l1(norm,inter):


    m = 40
    d = 10
    p = 15
    if getNewOptVals:
        A = cache.get('AlinL1')
        y = cache.get('ylinL1')
        H = cache.get('HlinL1')
        if A is None:
            A,y = getLSdata(m,d)
            H = np.random.normal(0,1,[p,d])
            cache['AlinL1']=A
            cache['ylinL1']=y
            cache['HlinL1']=H
    else:
        A=cache['AlinL1']
        y=cache['ylinL1']
        H=cache['HlinL1']


    projSplit = ps.ProjSplitFit()
    stepsize = 1e-1
    processor = lp.Forward2Fixed(stepsize)
    gamma = 1e0
    projSplit.setDualScaling(gamma)
    projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter)


    lam = 0.01
    step = 1.0
    regObj = L1(lam,step)
    projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H))
    projSplit.run(maxIterations=5000,keepHistory = True, nblocks = 1,
                  primalTol=1e-3,dualTol=1e-3)
    ps_val = projSplit.getObjective()



    if getNewOptVals:
        opt = cache.get((norm,inter,'optlinL1'))
        if opt is None:
            (m,d) = A.shape
            if norm:
                Anorm = A
                scaling = np.linalg.norm(Anorm,axis=0)
                scaling += 1.0*(scaling < 1e-10)
                Anorm = np.sqrt(m)*Anorm/scaling
                A = Anorm
            if inter:
                AwithIntercept = np.zeros((m,d+1))
                AwithIntercept[:,0] = np.ones(m)
                AwithIntercept[:,1:(d+1)] = A
                A = AwithIntercept

                HwithIntercept = np.zeros((p,d+1))
                HwithIntercept[:,0] = np.zeros(p)
                HwithIntercept[:,1:(d+1)] = H
                H = HwithIntercept
                x_cvx = cvx.Variable(d+1)

            else:
                x_cvx = cvx.Variable(d)

            f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y)
            f += lam*cvx.norm(H @ x_cvx,1)
            prob = cvx.Problem(cvx.Minimize(f))
            prob.solve(verbose=True)
            opt = prob.value
            cache[(norm,inter,'optlinL1')]=opt


    else:
        opt=cache[(norm,inter,'optlinL1')]


    primViol = projSplit.getPrimalViolation()
    dualViol = projSplit.getDualViolation()
    print("primal violation = {}".format(primViol))
    print("dual violation = {}".format(dualViol))

    print("ps val = {}".format(ps_val))
    print("cvx val = {}".format(opt))
    assert ps_val - opt < 1e-2
Exemple #22
0
def test_linear_op_data_term(norm,inter,addL1,add2L1,processor,testNumber):


    m = 40
    d = 10
    p = 15
    d2 = 10

    if getNewOptVals and (testNumber==0):

        A,y = getLSdata(m,d)
        H = np.random.normal(0,1,[d2,p])
        cache['AdataTerm']=A
        cache['ydataTerm']=y
        cache['HdataTerm']=H
    else:
        A = cache['AdataTerm']
        y = cache['ydataTerm']
        H = cache['HdataTerm']


    projSplit = ps.ProjSplitFit()

    processor.setStep(5e-1)
    gamma = 1e0
    projSplit.setDualScaling(gamma)




    projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter,
                      linearOp = aslinearoperator(H))

    lam = 0.01
    step = 1.0
    if addL1:
        regObj = L1(lam,step)
        projSplit.addRegularizer(regObj)

    if add2L1:
        regObj2 = L1(lam,step)
        projSplit.addRegularizer(regObj2)

    projSplit.run(maxIterations=10000,keepHistory = True,
                  nblocks = 3,primalTol=1e-3,dualTol=1e-3)
    ps_val = projSplit.getObjective()

    primViol = projSplit.getPrimalViolation()
    dualViol = projSplit.getDualViolation()
    print("primal violation = {}".format(primViol))
    print("dual violation = {}".format(dualViol))



    if getNewOptVals:

        opt = cache.get((addL1,add2L1,inter,norm,'optdata'))

        if opt == None:

            if norm == True:
                scaling = np.linalg.norm(A,axis=0)
                scaling += 1.0*(scaling < 1e-10)
                A = np.sqrt(A.shape[0])*A/scaling
            if inter == True:
                AwithIntercept = np.zeros((m,d+1))
                AwithIntercept[:,0] = np.ones(m)
                AwithIntercept[:,1:(d+1)] = A
                A = AwithIntercept
                HwithIntercept = np.zeros((d2+1,p+1))
                HwithIntercept[:,0] = np.zeros(d2+1)
                HwithIntercept[0] = np.ones(p+1)
                HwithIntercept[0,0] = 1.0
                HwithIntercept[1:(d2+1),1:(p+1)] = H
                H = HwithIntercept


            (m,_) = A.shape
            if inter:
                x_cvx = cvx.Variable(p+1)
            else:
                x_cvx = cvx.Variable(p)

            f = (1/(2*m))*cvx.sum_squares(A@H@x_cvx - y)
            if addL1:
                f += lam*cvx.norm(x_cvx,1)

            if add2L1:
                f += lam*cvx.norm(x_cvx,1)

            prob = cvx.Problem(cvx.Minimize(f))
            prob.solve(verbose=True)
            opt = prob.value
            cache[(addL1,add2L1,inter,norm,'optdata')]=opt

    else:
        opt=cache[(addL1,add2L1,inter,norm,'optdata')]


    print("ps opt = {}".format(ps_val))
    print("cvx opt = {}".format(opt))
    assert(ps_val-opt<1e-2)
Exemple #23
0
def test_multi_linear_op_l1(norm,inter,testNumber,numblocks):


    m = 40
    d = 10
    numregs = 5
    if getNewOptVals and (testNumber==0):
        A,y = getLSdata(m,d)
        cache['AmutliLinL1']=A
        cache['ymutliLinL1']=y
        H = []
        for i in range(numregs):
            p = np.random.randint(1,100)
            H.append(np.random.normal(0,1,[p,d]))

        cache['HmultiLinL1']=H
    else:
        H=cache['HmultiLinL1']
        A=cache['AmutliLinL1']
        y=cache['ymutliLinL1']


    projSplit = ps.ProjSplitFit()
    stepsize = 1e-1
    processor = lp.Forward2Fixed(stepsize)
    gamma = 1e0
    if norm and inter:
        gamma = 1e2
    projSplit.setDualScaling(gamma)
    projSplit.addData(A,y,2,processor,normalize=norm,intercept=inter)


    lam = []
    for i in range(numregs):
        lam.append(0.001*(i+1))
        step = 1.0
        regObj = L1(lam[-1],step)
        projSplit.addRegularizer(regObj,linearOp = aslinearoperator(H[i]))

    projSplit.run(maxIterations=5000,keepHistory = True, nblocks = numblocks,
                  primalTol=1e-6,dualTol=1e-6)
    ps_val = projSplit.getObjective()

    if getNewOptVals:
        if norm:
            Anorm = A
            m = Anorm.shape[0]
            scaling = np.linalg.norm(Anorm,axis=0)
            scaling += 1.0*(scaling < 1e-10)
            Anorm = np.sqrt(m)*Anorm/scaling
            A = Anorm

        if inter:
            AwithIntercept = np.zeros((m,d+1))
            AwithIntercept[:,0] = np.ones(m)
            AwithIntercept[:,1:(d+1)] = A
            A = AwithIntercept


        (m,d) = A.shape
        x_cvx = cvx.Variable(d)
        f = (1/(2*m))*cvx.sum_squares(A@x_cvx - y)
        for i in range(numregs):
            if inter:
                f += lam[i]*cvx.norm(H[i] @ x_cvx[1:d],1)
            else:
                f += lam[i]*cvx.norm(H[i] @ x_cvx,1)
        prob = cvx.Problem(cvx.Minimize(f))
        prob.solve(verbose=True)
        opt = prob.value
        cache[(norm,inter,'opt')]=opt
    else:
        opt=cache[(norm,inter,'opt')]


    print("ps val = {}".format(ps_val))
    print("cvx val = {}".format(opt))


    assert ps_val - opt < 1e-2
Exemple #24
0
def test_getGamma2(gammain):
    projSplit = ps.ProjSplitFit(gammain)
    gamma = projSplit.getDualScaling()
    assert gamma == gammain, "failed, gamma != gammain"
Exemple #25
0
def test_ls_blocks(processor):
    processor.setStep(5e-1)
    projSplit = ps.ProjSplitFit()
    m = 20
    d = 10
    if getNewOptVals:
        A = cache.get('AlsBlocks')
        y = cache.get('ylsBlocks')
        if A is None:
            A = np.random.normal(0, 1, [m, d])
            y = np.random.normal(0, 1, m)
        cache['AlsBlocks'] = A
        cache['ylsBlocks'] = y
    else:
        A = cache.get('AlsBlocks')
        y = cache.get('ylsBlocks')

    gamma = 1e-1
    projSplit.setDualScaling(gamma)
    projSplit.addData(A, y, 2, processor, normalize=False)
    projSplit.run(maxIterations=1500,
                  keepHistory=True,
                  nblocks=10,
                  primalTol=1e-3,
                  dualTol=1e-3)
    assert projSplit.getObjective() >= 0, "objective is not >= 0"
    sol = projSplit.getSolution()
    assert sol.shape == (d + 1, )

    if getNewOptVals:
        LSresid = cache.get('optlsblocks')
        if LSresid is None:
            AwithIntercept = np.zeros((m, d + 1))
            AwithIntercept[:, 0] = np.ones(m)
            AwithIntercept[:, 1:(d + 1)] = A
            result = np.linalg.lstsq(AwithIntercept, y, rcond=None)
            xhat = result[0]

            LSresid = 0.5 * np.linalg.norm(AwithIntercept.dot(xhat) - y,
                                           2)**2 / m
            cache['optlsblocks'] = LSresid
    else:
        LSresid = cache.get('optlsblocks')

    PSresid = projSplit.getObjective()
    assert abs(LSresid - PSresid) < 1e-2

    PSresidErg = projSplit.getObjective(ergodic="simple")
    assert abs(LSresid - PSresidErg) < 1e-2

    PsresidErgWeight = projSplit.getObjective(ergodic="weighted")
    assert abs(LSresid - PsresidErgWeight) < 1e-2

    projSplit.run(maxIterations=1000,
                  keepHistory=True,
                  resetIterate=True,
                  blockActivation="random",
                  nblocks=10)
    PSrandom = projSplit.getObjective()
    assert abs(PSresid - PSrandom) < 1e-2

    projSplit.run(maxIterations=1000,
                  keepHistory=True,
                  resetIterate=True,
                  blockActivation="cyclic",
                  nblocks=10)
    PScyclic = projSplit.getObjective()
    assert abs(PSresid - PScyclic) < 1e-2
Exemple #26
0
def test_bad_gamma(gammain):
    projSplit = ps.ProjSplitFit(gammain)
    gamma = projSplit.getDualScaling()
    assert gamma == 1.0,"failed, gamma != 1.0"
Exemple #27
0
def test_bad_dims(y):
    psObj = ps.ProjSplitFit()
    obs = np.array([[1,2,3],[4,5,6]])
    with pytest.raises(Exception):
        psObj.addData(obs,y,2)
        print("================")

    if loss == "log":
        loss2use = "logistic"
        backProcess = lp.BackwardLBFGS()
    else:
        loss2use = 2
        backProcess = lp.BackwardCG()

    embed = False

    if run2f:
        print("2f")
        gamma2f = gamma2fs[i]
        t0 = time.time()
        psObj = ps.ProjSplitFit(gamma2f)

        if embed:
            psObj.addData(X,
                          y,
                          loss2use,
                          linearOp=H,
                          normalize=False,
                          process=lp.Forward2Backtrack(),
                          embed=regularizers.L1(scaling=(1 - mu) * lam))
        else:
            psObj.addData(X,
                          y,
                          loss2use,
                          linearOp=H,
                          normalize=False,
                                      iter=maxIter, gamma1=tuned,gamma2=tuned,G=G,Gt=Gt)
            results[(i,'tseng')] =  outtseng.finalFuncVal

        if False:
            outfrb = algo.for_reflect_back(theFunc,proxfstar_4_tseng,proxg,theGrad,init,iter=maxIter,
                                       gamma0=tuned,gamma1=tuned,G=G,Gt=Gt,verbose=False,getFuncVals=False)
            results[(i,'frb')] =  outfrb.finalFuncVal

        if loss == "log":
            loss2use = "logistic"
        else:
            loss2use = 2

        gamma = 1.0
        if True :
            psObj = ps.ProjSplitFit(gamma)
            proc = lp.Forward2Backtrack()
            psObj.addData(X,y,loss2use,linearOp=H,normalize=False,process=proc,
                          embed=regularizers.L1(scaling=(1-mu)*lam))
            (nbeta,ngamma) = H.shape
            shape = (ngamma-1,ngamma)
            G_for_ps = sl.LinearOperator(shape,matvec=lambda x: x[:-1],rmatvec = lambda x : np.concatenate((x,np.array([0]))))
            psObj.addRegularizer(regularizers.L1(scaling = mu*lam,step=tuned),linearOp=G_for_ps)
            psObj.run(nblocks=10,maxIterations=maxIter,verbose=False,keepHistory=False,
                      primalTol=0.0,dualTol=0.0,blockActivation="greedy")
            results[(i,'ps2fembed_g')] = psObj.getObjective()


        if False:
            psObj = ps.ProjSplitFit(gamma)
            psObj.addData(X,y,loss2use,linearOp=H,normalize=False,process=lp.Forward1Backtrack(),
Exemple #30
0
import sys
sys.path.append('../')
import numpy as np
import projSplitFit as ps
### Basic Setup with a Quadratic Loss

### Test on random data
m = 500
d = 1000
np.random.seed(1)
A = np.random.normal(0, 1, [m, d])
r = np.random.normal(0, 1, m)

projSplit = ps.ProjSplitFit()
projSplit.addData(A, r, loss=2, intercept=False)
projSplit.run()
optimalVal = projSplit.getObjective()
z = projSplit.getSolution()
print(f"Objective value LS prob = {optimalVal}")

### changing the dual scaling
gamma = 1e2
projSplit.setDualScaling(gamma)
projSplit.run()

### adding a regularizer
from regularizers import L1
lam1 = 0.1
regObj = L1(scaling=lam1)
projSplit.addRegularizer(regObj)
projSplit.run()