예제 #1
0
def Adam(SubsetData, W2, wnn, alpha, RegW, sigma, K=None):
    import numpy as np
    from grad import gradcoswx1coswx2, Lossfunction2
    import copy

    if K is None:
        from sklearn.metrics import pairwise
        K = pairwise.rbf_kernel(SubsetData, gamma=sigma**2 / 2)

    Nfeat = np.shape(W2)[0]
    err2 = 2
    tol = 1e-5
    WTMP = copy.copy(wnn)
    woldnn = copy.copy(wnn)
    gradnorm = []
    loss = []
    it = 0
    m = np.zeros((Nfeat, 1))
    v = np.zeros((Nfeat, 1))
    b1 = 0.9
    b2 = 0.999
    eps = 1e-8
    alpha = 0.1
    while (err2 > tol):

        #np.random.shuffle(SubsetData)
        it = it + 1
        #        print('iteration', it)
        GNew = gradcoswx1coswx2(SubsetData, W2, wnn, sigma, K)

        m = b1 * m + (1 - b1) * GNew
        v = b2 * v + (1 - b2) * (GNew**2)
        alphaN = alpha * (np.sqrt((1 - b2**it) / (1 - b1**it)))

        #        alpha = alphaN /(1+(alphaN*RegW*it))
        wnn = wnn - (alphaN) * (RegW * wnn + (m / (np.sqrt(v) + eps)))
        #        wnn = wnn - (alpha)*(0.1*wnn+GNew)
        #        wnn = wnn - (alpha)*(0.0*wnn+NG)
        WTMP = np.concatenate((WTMP, wnn), axis=1)
        iternorm2 = np.linalg.norm(wnn)
        r, s = np.shape(W2)
        if (s == 1):
            loss.append(Lossfunction2(SubsetData, wnn, sigma, K))
        else:
            loss.append(Lossfunction2(SubsetData, W2, sigma, K, wnn))
        print('Loss is', loss[it - 1])
        err2 = np.linalg.norm(woldnn - wnn)
        #            print('The error2 is', err2)
        if (err2 <= tol):
            #                print('done')
            break
        elif (it > 250):
            #                print('break')
            break
        woldnn = wnn

    return wnn, loss
예제 #2
0
def Adagrad(SubsetData, W2, wnn, alpha, RegW, sigma, K=None):
    import numpy as np
    from grad import gradcoswx1coswx2, Lossfunction2
    import copy

    if K is None:
        from sklearn.metrics import pairwise
        K = pairwise.rbf_kernel(SubsetData, gamma=sigma**2 / 2)

    Nfeat = np.shape(W2)[0]
    err2 = 2
    tol = 1e-5
    WTMP = copy.copy(wnn)
    woldnn = copy.copy(wnn)
    gradnorm = []
    loss = []
    it = 0
    CumGrad = np.zeros((Nfeat, 1))
    eps = 1e-5
    alpha = 0.1

    while (err2 > tol):

        #np.random.shuffle(SubsetData)
        it = it + 1
        #        print('iteration', it)
        GNew = gradcoswx1coswx2(SubsetData, W2, wnn, sigma, K)
        gradnorm.append(np.linalg.norm(GNew))
        CumGrad = CumGrad + GNew**2
        alphaN = alpha / (eps + np.sqrt(CumGrad))

        wnn = wnn - (alphaN) * (RegW * wnn + GNew)

        WTMP = np.concatenate((WTMP, wnn), axis=1)
        iternorm2 = np.linalg.norm(wnn)
        r, s = np.shape(W2)
        if (s == 1):
            loss.append(Lossfunction2(SubsetData, wnn, sigma, K))
        else:
            loss.append(Lossfunction2(SubsetData, W2, sigma, K, wnn))
        print('Loss is', loss[it - 1])

        err2 = np.linalg.norm(woldnn - wnn)
        #            print('The error2 is', err2)
        if (err2 <= tol):
            #                print('done')
            break
        elif (it > 250):
            #                print('break')
            break
        woldnn = wnn


#    minloss = np.argmin()
    return wnn, gradnorm, loss
예제 #3
0
def BoldDrive(SubsetData, W2, wnn, alphaN, RegW, sigma, K=None):
    import numpy as np
    from grad import gradcoswx1coswx2, Lossfunction2
    err2 = 2
    tol = 1e-5
    WTMP = wnn
    woldnn = wnn
    gradnorm = []
    loss = []
    it = 0
    while (err2 > tol):

        #np.random.shuffle(SubsetData)
        r, s = np.shape(W2)
        if (s == 1):
            loss.append(Lossfunction2(SubsetData, wnn, sigma, K))
        else:
            loss.append(Lossfunction2(SubsetData, W2, sigma, K, wnn))
        print('Loss is', loss[it])

        GNew = gradcoswx1coswx2(SubsetData, W2, wnn, sigma, K)
        gradnorm.append(np.linalg.norm(GNew))
        #            alpha = alphaN*np.sqrt(100/(100+ita))
        alpha = alphaN / (1 + (alphaN * RegW * it))
        wnn = wnn - (alpha) * (RegW * wnn + GNew)
        WTMP = np.concatenate((WTMP, wnn), axis=1)
        iternorm2 = np.linalg.norm(wnn)

        err2 = np.linalg.norm(woldnn - wnn)
        #            print('The error2 is', err2)
        if (err2 <= tol):
            #                print('done')
            break
        elif (it > 250):
            #                print('break')
            break
        woldnn = wnn
        it = it + 1

    return wnn, loss
예제 #4
0
def AdaptiveRateBatchCheck(SubsetData, W2, wnn, alphaN, RegW, sigma, K=None):
    import numpy as np
    from grad import gradcoswx1coswx2, Lossfunction2
    from math import copysign
    import copy

    if K is None:
        from sklearn.metrics import pairwise
        K = pairwise.rbf_kernel(SubsetData, gamma=sigma**2 / 2)

    err2 = 2
    tol = 1e-5
    niter = 251
    WTMP = wnn
    woldnn = 0
    gradnorm = []
    loss = np.zeros((niter, 1))
    it = 0
    deltaold = 0.01
    delta = deltaold
    deltaMin = np.tile(0.0, (np.size(wnn), 1))
    deltaMax = np.tile(1.0, (np.size(wnn), 1))
    nplus = 1.1
    nminus = 0.25
    GOld = 0.0
    deltaW = 0.0
    E_old = 500
    E = 0
    alpha = np.zeros((niter, 1))
    Ncheck = 20
    end_ncheck = 0
    n_check = 0
    while (err2 > tol):

        #np.random.shuffle(SubsetData)
        r, s = np.shape(W2)
        if (s == 1):
            loss[it] = (Lossfunction2(SubsetData, wnn, sigma, K))
        else:
            loss[it] = (Lossfunction2(SubsetData, W2, sigma, K, wnn))
        print('Loss is ', loss[it])

        if ((np.mod(it, Ncheck) == 0) & (it > 0)):
            print('Error check', it)
            E = loss[it]
            st_ncheck = end_ncheck
            n_check = n_check + 1
            end_ncheck = Ncheck * n_check
            if (E > E_old):
                print('Error greater', it)
                index = np.argmin(loss[st_ncheck:end_ncheck - 1])
                index = index + st_ncheck
                print('Index', index)
                E = loss[index]
                wnn = WTMP[:, index]
                wnn = wnn[:, np.newaxis]
                alphaN = alpha[index]
                print('alphaN ', alphaN)
            else:
                alphaN = alpha[it - 1]

        GNew = gradcoswx1coswx2(SubsetData, W2, wnn, sigma, K)
        GNew = (RegW * wnn) + GNew
        alpha[it] = (alphaN / (1 + (alphaN * RegW * it)))

        wnn = wnn - (alpha[it]) * (GNew)
        #
        #        if (it==0):
        #            WTMP = wnn
        #        else:
        WTMP = np.concatenate((WTMP, wnn), axis=1)

        iternorm2 = np.linalg.norm(wnn)

        if (it > 0):
            err2 = np.linalg.norm(woldnn - wnn)

        if (err2 <= tol):
            #                print('done')
            break
        elif (it >= niter - 1):
            #           print('break')
            break

        woldnn = copy.copy(wnn)
        E_old = copy.copy(E)
        it = it + 1
        print('Iteration', it)

    minindex = np.argmin(loss)
    wnn = WTMP[:, minindex]
    wnn = wnn[:, np.newaxis]
    return wnn, loss
예제 #5
0
def IPprop(SubsetData, W2, wnn, alphaN, RegW, sigma, K=None):
    import numpy as np
    from grad import gradcoswx1coswx2, Lossfunction2
    from math import copysign
    import copy

    if K is None:
        from sklearn.metrics import pairwise
        K = pairwise.rbf_kernel(SubsetData, gamma=sigma**2 / 2)

    err2 = 2
    tol = 1e-5
    WTMP = wnn
    woldnn = 0
    gradnorm = []
    loss = []
    it = 0
    deltaold = 0.01
    delta = deltaold
    deltaMin = np.tile(0.0, (np.size(wnn), 1))
    deltaMax = np.tile(1.0, (np.size(wnn), 1))
    nplus = 1.1
    nminus = 0.25
    GOld = 0.0
    deltaW = 0.0
    old_E = 500
    while (err2 > tol):

        #np.random.shuffle(SubsetData)
        if (it == 51):
            index = np.argmin(loss)
            wnn = WTMP[:, index]
            wnn = wnn[:, np.newaxis]
            deltaold = 0.01
            old_E = 10000
            print('Index', index)

        r, s = np.shape(W2)
        if (s == 1):
            loss.append(Lossfunction2(SubsetData, wnn, sigma, K))
        else:
            loss.append(Lossfunction2(SubsetData, W2, sigma, K, wnn))

        E = loss[it]
        GNew = gradcoswx1coswx2(SubsetData, W2, wnn, sigma, K)
        GNew = (RegW * wnn) + GNew

        if (it < 51):
            gradnorm.append(np.linalg.norm(GNew))

            alpha = alphaN / (1 + (alphaN * RegW * it))
            wnn = wnn - (alpha) * (GNew)
            #
            WTMP = np.concatenate((WTMP, wnn), axis=1)


####            print('The error2 is', err2)
        else:
            sigGrad = GNew * GOld
            deltaPlus = np.minimum(deltaold * nplus, deltaMax) * (sigGrad > 0)
            deltaminus = np.maximum(deltaold * nminus,
                                    deltaMin) * (sigGrad < 0)
            deltaequal = deltaold * (sigGrad == 0)

            delta = deltaPlus + deltaminus + deltaequal
            signG = np.sign(sigGrad) + (it == 0)
            deltaW = -(signG) * delta * (sigGrad >= 0) - deltaW * (
                (sigGrad < 0) * (E > old_E))
            #GNew = GNew*(sigGrad>0)
            #wnn = wnn + (0.1*wnn+deltaW)
            wnn = wnn + deltaW

        iternorm2 = np.linalg.norm(wnn)

        print('Loss is ', loss[it])

        if (it > 0):
            err2 = np.linalg.norm(woldnn - wnn)

        if (err2 <= tol):
            #                print('done')
            break
        elif (it > 250):
            #                print('break')
            break
        woldnn = copy.copy(wnn)
        deltaold = copy.copy(delta)
        GOld = copy.copy(GNew)

        old_E = copy.copy(E)
        it = it + 1
        print('Iteration', it)

    return wnn, loss