def selftest2(): ## Simple neural network filter from filterAlg_NN import NN1 from learningAlg import mlogreg # Generate data D0 = 5 K1 = 2 K2 = 3 NperClass = 100 N = NperClass * K1 * K2 #l = 1.0e-3 X = np.zeros((D0, NperClass, K1, K2)) y1 = np.zeros((NperClass, K1, K2), dtype=int) y2 = np.zeros((NperClass, K1, K2), dtype=int) bias1 = np.random.normal(scale=1.0, size=(D0, K1)) bias2 = np.random.normal(scale=1.0, size=(D0, K2)) for k1 in range(K1): for k2 in range(K2): X[:,:,k1,k2] = \ np.random.normal(scale=1.0, size=(D0,NperClass)) \ + np.tile(bias1[:,k1].reshape((D0,1)),(1,NperClass)) \ + np.tile(bias2[:,k2].reshape((D0,1)),(1,NperClass)) y1[:, k1, k2] = k1 * np.ones((NperClass, )) y2[:, k1, k2] = k2 * np.ones((NperClass, )) X = X.reshape((D0, N)) y1 = y1.reshape((N, )) y2 = y2.reshape((N, )) Ntrain = np.floor(N / 2.) #Ntest = N - Ntrain ind = np.random.choice(range(N), size=(N, ), replace=False) ind_train = ind[:Ntrain] ind_test = ind[Ntrain:] ########################################################################### maxiter = 30 maxiter_main = 1 maxiter_final = 50 rho = 10. lambda0 = 1e-8 lambda1 = 1e-8 lambda2 = 1e-8 d = 2 hparams0 = { 'D': D0, 'nhs': [3, 3, d], 'activation': 'sigmoid', 'l': lambda0 } hparams1 = {'K': K1, 'l': lambda1, 'd': d} hparams2 = {'K': K2, 'l': lambda2, 'd': d} w0_init = NN1.init(hparams0) w1_init = mlogreg.init(hparams1) w2_init = mlogreg.init(hparams2) print '\n\nKiwiel' 's method' w0 = w0_init w1 = w1_init w2 = w2_init for iter in range(maxiter): #print (W0**2).sum() G_train = NN1.g(w0, X[:, ind_train], hparams0) # Full training tW1, f1 = mlogreg.train(G_train, y1[ind_train], hparams1, None, maxiter_final) tW2, f2 = mlogreg.train(G_train, y2[ind_train], hparams2, None, maxiter_final) # Testing error G_test = NN1.g(w0, X[:, ind_test], hparams0) rate1, _ = mlogreg.accuracy(tW1, G_test, y1[ind_test]) rate2, _ = mlogreg.accuracy(tW2, G_test, y2[ind_test]) print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1, rate2) # run one iteration w0,w1,w2 = run(w0,w1,w2,rho,'kiwiel',maxiter_main,\ X[:,ind_train],y1[ind_train],y2[ind_train],\ NN1,mlogreg,mlogreg,\ hparams0,hparams1,hparams2) #val = _f(w0,(w1,w2),\ # rho,X[:,ind_train],y1[ind_train],y2[ind_train],\ # NN1,mlogreg,mlogreg,\ # hparams0,hparams1,hparams2) #print 'val=', val, '\n' print '\n\nAlternating optimization' w0 = w0_init w1 = w1_init w2 = w2_init for iter in range(maxiter): #print (W0**2).sum() G_train = NN1.g(w0, X[:, ind_train], hparams0) # Full training tW1, f1 = mlogreg.train(G_train, y1[ind_train], hparams1, None, maxiter_final) tW2, f2 = mlogreg.train(G_train, y2[ind_train], hparams2, None, maxiter_final) # Testing error G_test = NN1.g(w0, X[:, ind_test], hparams0) rate1, _ = mlogreg.accuracy(tW1, G_test, y1[ind_test]) rate2, _ = mlogreg.accuracy(tW2, G_test, y2[ind_test]) print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1, rate2) # run one iteration w0,w1,w2 = run(w0,w1,w2,rho,'alt',maxiter_main,\ X[:,ind_train],y1[ind_train],y2[ind_train],\ NN1,mlogreg,mlogreg,\ hparams0,hparams1,hparams2)
for trial in range(ntrials): for j in range(len(ds)): d = ds[j] nhs = [20, d] # hparams0 = {'D': D, 'nhs': nhs, 'activation': 'sigmoid', 'l': lambda0} hparams1 = {'K': K1, 'l': lambda1, 'd': d} hparams2 = {'K': K2, 'l': lambda2, 'd': d} if False: W0 = NN1.init(hparams0) else: print 'Pre-training by autoencoder' W0 = NN1.initByAutoencoder( X[:, ind_train_dom1[trial][0]].squeeze(), hparams0) W1 = mlogreg.init(hparams1) W2 = mlogreg.init(hparams2) for iter in range(maxiter_minimax): if True: #iter==maxiter_minimax-1: G_train = NN1.g(W0, X[:, ind_train_dom1[trial][0]].squeeze(), hparams0) #% Full training tW1, f1 = mlogreg.train( G_train, y1[:, ind_train_dom1[trial][0]].squeeze(), hparams1, None, maxiter_final) tW2, f2 = mlogreg.train( G_train, y2[:, ind_train_dom1[trial][0]].squeeze(), hparams2, None, maxiter_final) #% Testing error