Esempio n. 1
0
def train_and_test(G,y1,y2,ind_train,ind_test,maxiter_final,hparams1,hparams2):
    tW1,f1 = mlogreg.train(G[:,ind_train].squeeze(),y1[:,ind_train].squeeze(),hparams1,None,maxiter_final)
    tW2,f2 = mlogreg.train(G[:,ind_train].squeeze(),y2[:,ind_train].squeeze(),hparams2,None,maxiter_final)

    rate1,_ = mlogreg.accuracy(tW1,G[:,ind_test].squeeze(),y1[:,ind_test].squeeze())
    rate2,_ = mlogreg.accuracy(tW2,G[:,ind_test].squeeze(),y2[:,ind_test].squeeze())
    
    return (rate1,rate2)
Esempio n. 2
0
def selftest2():
    ## Simple neural network filter

    from filterAlg_NN import NN1
    from learningAlg import mlogreg

    # Generate data
    D0 = 5
    K1 = 2
    K2 = 3
    NperClass = 100
    N = NperClass * K1 * K2
    #l = 1.0e-3
    X = np.zeros((D0, NperClass, K1, K2))
    y1 = np.zeros((NperClass, K1, K2), dtype=int)
    y2 = np.zeros((NperClass, K1, K2), dtype=int)
    bias1 = np.random.normal(scale=1.0, size=(D0, K1))
    bias2 = np.random.normal(scale=1.0, size=(D0, K2))
    for k1 in range(K1):
        for k2 in range(K2):
            X[:,:,k1,k2] = \
                np.random.normal(scale=1.0, size=(D0,NperClass)) \
                + np.tile(bias1[:,k1].reshape((D0,1)),(1,NperClass)) \
                + np.tile(bias2[:,k2].reshape((D0,1)),(1,NperClass))
            y1[:, k1, k2] = k1 * np.ones((NperClass, ))
            y2[:, k1, k2] = k2 * np.ones((NperClass, ))

    X = X.reshape((D0, N))
    y1 = y1.reshape((N, ))
    y2 = y2.reshape((N, ))
    Ntrain = np.floor(N / 2.)
    #Ntest = N - Ntrain
    ind = np.random.choice(range(N), size=(N, ), replace=False)
    ind_train = ind[:Ntrain]
    ind_test = ind[Ntrain:]

    ###########################################################################

    maxiter = 30
    maxiter_main = 1
    maxiter_final = 50
    rho = 10.
    lambda0 = 1e-8
    lambda1 = 1e-8
    lambda2 = 1e-8

    d = 2

    hparams0 = {
        'D': D0,
        'nhs': [3, 3, d],
        'activation': 'sigmoid',
        'l': lambda0
    }
    hparams1 = {'K': K1, 'l': lambda1, 'd': d}
    hparams2 = {'K': K2, 'l': lambda2, 'd': d}

    w0_init = NN1.init(hparams0)
    w1_init = mlogreg.init(hparams1)
    w2_init = mlogreg.init(hparams2)

    print '\n\nKiwiel' 's method'
    w0 = w0_init
    w1 = w1_init
    w2 = w2_init
    for iter in range(maxiter):
        #print (W0**2).sum()
        G_train = NN1.g(w0, X[:, ind_train], hparams0)

        # Full training
        tW1, f1 = mlogreg.train(G_train, y1[ind_train], hparams1, None,
                                maxiter_final)
        tW2, f2 = mlogreg.train(G_train, y2[ind_train], hparams2, None,
                                maxiter_final)

        # Testing error
        G_test = NN1.g(w0, X[:, ind_test], hparams0)

        rate1, _ = mlogreg.accuracy(tW1, G_test, y1[ind_test])
        rate2, _ = mlogreg.accuracy(tW2, G_test, y2[ind_test])

        print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1, rate2)

        # run one iteration
        w0,w1,w2 = run(w0,w1,w2,rho,'kiwiel',maxiter_main,\
            X[:,ind_train],y1[ind_train],y2[ind_train],\
            NN1,mlogreg,mlogreg,\
            hparams0,hparams1,hparams2)

        #val = _f(w0,(w1,w2),\
        #    rho,X[:,ind_train],y1[ind_train],y2[ind_train],\
        #    NN1,mlogreg,mlogreg,\
        #    hparams0,hparams1,hparams2)

        #print 'val=', val, '\n'

    print '\n\nAlternating optimization'
    w0 = w0_init
    w1 = w1_init
    w2 = w2_init
    for iter in range(maxiter):
        #print (W0**2).sum()
        G_train = NN1.g(w0, X[:, ind_train], hparams0)

        # Full training
        tW1, f1 = mlogreg.train(G_train, y1[ind_train], hparams1, None,
                                maxiter_final)
        tW2, f2 = mlogreg.train(G_train, y2[ind_train], hparams2, None,
                                maxiter_final)

        # Testing error
        G_test = NN1.g(w0, X[:, ind_test], hparams0)

        rate1, _ = mlogreg.accuracy(tW1, G_test, y1[ind_test])
        rate2, _ = mlogreg.accuracy(tW2, G_test, y2[ind_test])

        print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1, rate2)

        # run one iteration
        w0,w1,w2 = run(w0,w1,w2,rho,'alt',maxiter_main,\
            X[:,ind_train],y1[ind_train],y2[ind_train],\
            NN1,mlogreg,mlogreg,\
            hparams0,hparams1,hparams2)
Esempio n. 3
0
            if True:  #iter==maxiter_minimax-1:
                G_train = NN1.g(W0, X[:, ind_train_dom1[trial][0]].squeeze(),
                                hparams0)
                #% Full training
                tW1, f1 = mlogreg.train(
                    G_train, y1[:, ind_train_dom1[trial][0]].squeeze(),
                    hparams1, None, maxiter_final)
                tW2, f2 = mlogreg.train(
                    G_train, y2[:, ind_train_dom1[trial][0]].squeeze(),
                    hparams2, None, maxiter_final)

                #% Testing error
                G_test = NN1.g(W0, X[:, ind_test_dom1[trial][0]].squeeze(),
                               hparams0)

                rate1, _ = mlogreg.accuracy(
                    tW1, G_test, y1[:, ind_test_dom1[trial][0]].squeeze())
                rate2, _ = mlogreg.accuracy(
                    tW2, G_test, y2[:, ind_test_dom1[trial][0]].squeeze())

                print 'minimax (NN): rho=%f, d=%d, trial=%d, rate1=%f, rate2=%f\n' % \
                    (rho,d,trial,rate1,rate2)

                rates1_minimax2[j, trial] = rate1
                rates2_minimax2[j, trial] = rate2

                W0_minimax2[j][trial] = W0

            W0,W1,W2 = minimaxFilter.run(W0,W1,W2,rho,'alt',1,\
                X[:,ind_train_dom1[trial][0]].squeeze(), \
                y1[:,ind_train_dom1[trial][0]].squeeze(),\
                y2[:,ind_train_dom1[trial][0]].squeeze(),\