コード例 #1
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:',
          eta, 'Lamda:', lmda, 'Activation:', func)

    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(func(hidden, name='hidden'))
    n.addModule(LinearLayer(hidden, name='context'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'],
                                   name='hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()

    trainer = BackpropTrainer(n,
                              trainds,
                              learningrate=eta,
                              weightdecay=lmda,
                              momentum=0.1,
                              shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #2
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:', eta, 'Lamda:', lmda, 'Activation:', func)
    
    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name = 'in'))
    n.addModule(func(hidden, name = 'hidden'))
    n.addModule(LinearLayer(hidden, name = 'context'))
    n.addOutputModule(LinearLayer(1, name = 'out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name = 'in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name = 'hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()
    
    trainer = BackpropTrainer(n, trainds, learningrate=eta, weightdecay=lmda, momentum=0.1, shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #3
0
ファイル: pyb_ann_const.py プロジェクト: pcolo/regret
# print len(tstdata)
# print len(trndata)

trainer = BackpropTrainer(n, DS, learningrate=0.1, momentum=0.5, weightdecay=0.0001)
trainer.trainUntilConvergence(verbose=True, maxEpochs=100)

# print trainer.trainUntilConvergence()
# trainer.trainOnDataset(trndata, 100)

#print n.activate((2, 1, 3, 0))
#print n.activate((2, 1, 3, 90))

## ----------------------- Results & Performance mesurements ---------------------------- ##

yhat = []
yhat = n.activateOnDataset(tstdata)

#print yhat
#print tstdata['target']

def vect_se(X,y):
    vect_se = []
    for i in range(len(X)):
        vect_se.append((float(y[i]) - float(X['target'][i]))**2)
    return vect_se


def mse(X,y):
    return float(sum(vect_se(X, y)))/float(len(vect_se(X, y)))

print mse(tstdata, yhat)
コード例 #4
0
ファイル: rnn.py プロジェクト: wikii122/nemi
class RecurrentNeuralNetwork:
    """
    Recurent neural network.
    """
    def __init__(self, nin, nout):
        singleton.append(self)

        self.inn = nin
        self.outn = nout

        self.n = buildNetwork(nin, 20, nout, bias=False, recurrent=True)
        self.n = RecurrentNetwork()
        self.n.addInputModule(LinearLayer(nin, name='in'))
        self.n.addOutputModule(LinearLayer(nout, name='out'))

        self.n.addModule(SigmoidLayer(8, name='hidden2'))
        self.n.addModule(TanhLayer(nin+nout/2, name='hidden1'))
        self.n.addModule(BiasUnit(name='bias'))
        self.n.addModule(LSTMLayer(5, name='memory'))

        self.n.addConnection(FullConnection(self.n['in'], self.n['hidden1']))
        self.n.addConnection(FullConnection(self.n['bias'], self.n['hidden1']))
        self.n.addConnection(FullConnection(self.n['hidden1'], self.n['hidden2']))
        self.n.addConnection(FullConnection(self.n['hidden2'], self.n['out']))
        self.n.addConnection(FullConnection(self.n['hidden1'], self.n['memory']))
        self.n.addConnection(FullConnection(self.n['memory'], self.n['hidden2']))
        self.n.addConnection(FullConnection(self.n['in'], self.n['hidden2']))
        self.n.addConnection(FullConnection(self.n['hidden2'], self.n['out']))
        
        self.n.addRecurrentConnection(FullConnection(self.n['hidden1'], self.n['hidden1']))
        self.n.addRecurrentConnection(FullConnection(self.n['memory'], self.n['hidden1']))
        self.n.sortModules()

    def set_learning_data(self, dataset):
        """
        Set dataset used to train network.
        """
        self.ds_learn = dataset

    def train(self, epochs=100):
        """
        Train the network
        """
        #self.n.reset()
        trainer = BackpropTrainer(self.n, self.ds_learn, verbose=True)
        # trainer.setData(self.ds_learn)
        return trainer.trainEpochs(epochs=epochs)

    def validate_error(self, dataset):
        """
        Return error value for given dataset
        """
        v = Validator()
        #self.n.reset()
        return v.MSE(self.n, dataset)

    def calculate(self, dataset):
        """
        Return network response for given dataset
        """
        #self.n.reset()
        return self.n.activateOnDataset(dataset)
コード例 #5
0
har_train_test_data = har_data.iloc[0:2000,0:17];
har_train_test_label = har_data.iloc[0:2000,17:18];

alldata = ClassificationDataSet(17, nb_classes=5);

for i in range(len(har_train_test_data)): 
    t=int(har_train_test_label.iloc[i])-1;
    alldata.addSample(har_train_test_data.iloc[i],[t]);

alldata._convertToOneOfMany(bounds=[0, 1]);
tstdata,trndata = alldata.splitWithProportion(0.5);

rnn = RecurrentNetwork(17,20,30,14,13,12,11,7,5);
#fnn = buildNetwork(17,11,8,6,5,outclass=SoftmaxLayer);
#fnn = buildNetwork(17,11,8,6,5,bias=True);

neural_trainer = BackpropTrainer(rnn, trndata, verbose=True, learningrate=0.01);
neural_trainer.trainUntilConvergence(maxEpochs=100);

out = rnn.activateOnDataset(tstdata);
out = out.argmax(axis=1);

out2=tstdata.getField('target').argmax(axis=1);
length = len(out);
count=0;
for i in range(len(out)):
    if out[i]!=out2[i]:
        count+=1;
errorrate = float(count)/float(length);

def FitNeuralNetworkDeptAnimate(dept = 1, num = 1000):

	train_file = input_file_path + train_file_name[0] + str(dept) + train_file_name[1]
	test_file = input_file_path + test_file_name[0] + str(dept) + test_file_name[1]

	train = np.loadtxt( train_file, delimiter = ' ' )
	test = np.loadtxt( test_file, delimiter = ' ' )
	print len(train)
	x_train = train[0:num, 0 : -1]
	y_train = train[0:num, -1]

	y_max = max(y_train)
	y_min = min(y_train)
	y_train = (y_train - y_min) / (y_max-y_min)
	y_train = y_train.reshape(-1,1)

	input_size = x_train.shape[1]
	target_size = y_train.shape[1]

	x_test = test[0:num/4, 0 : -1]
	y_test = test[0:num/4, -1]
	y_test = y_test.reshape(-1,1)

	
	ds_test = SDS( input_size, target_size )
	ds_test.setField( 'input', x_test )
	ds_test.setField( 'target', y_test )

	ds = SDS( input_size, target_size )
	ds.setField( 'input', x_train )
	ds.setField( 'target', y_train )


	hidden_size = input_size*hidden_size_ratio


	n = RecurrentNetwork()


	n.addInputModule(LinearLayer(input_size, name='in'))
	n.addModule(BiasUnit('bias'))
	for i in range(0, num_hidden_layer+1):
		hidden_name = 'hidden'+str(i)
		n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
	n.addOutputModule(LinearLayer(target_size, name='out'))

	n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
	next_hidden = 'hidden0'

	for i in range(0,num_hidden_layer ):
		current_hidden = 'hidden'+str(i)
		next_hidden = 'hidden'+str(i+1)
		n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))

	n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))

	n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))


	n.sortModules()
	print n


	trainer = BackpropTrainer(n,ds ,weightdecay=weightdecay, learningrate=learningrate, lrdecay=1.0, momentum = momentum)
	
	
	plt.ion()
	fig = plt.figure()
	ax = fig.add_subplot(111)

	plt.annotate("Dept1", (10,-15000))
	plt.annotate("Dept2", (180,-30000))
	plt.annotate("Dept3", (300,-15000))
	plt.annotate("Dept4", (450,-30000))
	plt.annotate("Dept5", (600,-15000))
	plt.annotate("Dept6", (700,-30000))
	plt.annotate("Dept7", (900,-15000))
	
	line1, = ax.plot([],[],'-b',label='train')
	line2, = ax.plot([],[],'-r',label='test')
	ax.legend()

	dummy = raw_input("Plot the graph?")

	for i in range(epochs):
		error = trainer.train()
		print "Epoch: %d, Error: %7.4f" % (i, error)


		p_train = n.activateOnDataset( ds )
		p_test = n.activateOnDataset( ds_test )
		plot_result = np.vstack((p_train*(y_max-y_min) + y_min, p_test*(y_max-y_min) + y_min ))


		p_test_print = p_test.reshape(-1,len(p_test))
		p_test_print = p_test_print*(y_max-y_min) + y_min

		line1.set_ydata(y_train*(y_max-y_min) + y_min)
		line1.set_xdata(range(len(y_train)))
		line2.set_ydata(plot_result)
		line2.set_xdata(range(len(plot_result)))
		ax.relim()
		ax.autoscale_view()
		plt.draw()
コード例 #7
0
    1.0, 2.0, 3.0, 4.0, 3.0, 2.0
])
x2test = np.array([
    10.0, 11.0, 12.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.5, 8.0, 9.0, 10.0,
    12.0, 12.5, 11.0, 10.0, 9.0, 8.0, 7.5
])
ycorrect = iir1(x1test, x2test)
#ycorrect = func(x1train,x2train)

testds = SupervisedDataSet(2, 1)
for i in range(len(x1test)):
    testds.addSample((x1test[i], x2test[i]), (0.0))
#for i in range(len(x1train)):
#    testds.addSample((x1train[i], x2train[i]), (0.0))

ytest = net.activateOnDataset(testds)
diff = list_diff(ytest, ycorrect)
mse = np.multiply(diff, diff).mean()

print "Ycorrect=", ycorrect
print "Ypredicted=", ytest
#print "diff=", diff
print "mse=", mse

plotLists(ytest, ycorrect, diff)

# Reset the network, so that we can retrain
#net.reset()


# estimate the parameters of an FIR filter using linear regression