Пример #1
0
def main():
    lis = pimadataf.give_datainshared()

    rest_setx, rest_sety = lis[0]  #tuple of two shared variable of array
    test_setx, test_sety = lis[1]  #tuple of shared variable of array

    y = T.ivector('y')

    newmlp = MLP(n_in, n_out, n_hid, rest_setx, rest_sety, test_setx,
                 test_sety)

    #error=0.5*T.mean((y-output.reshape((x.shape[0],)))**2)
    #finalerror=T.mean(abs(y-(output.reshape((x.shape[0],)))))
    fun = theano.function([], rest_sety)
    fund = theano.function([], test_sety)
    params = newmlp.params
    cost = newmlp.cost_func(y)
    learning_rate = 0.01
    gparams = [T.grad(cost, j) for j in params]
    updates = [(param, param - learning_rate * gparam)
               for param, gparam in zip(params, gparams)]

    train_model = theano.function([],
                                  cost,
                                  updates=updates,
                                  givens={y: rest_sety})
    #fun1=theano.function([x],output.reshape((x.shape[0],)))
    test_model = theano.function([],
                                 newmlp.find_error(y),
                                 givens={y: test_sety})
    print(fun())
    epochs = 1000
    for i in range(1, epochs):
        #p=train_model(rest_setx.get_value(),fun())

        p = train_model()

        if i % 100 == 0:
            st = newmlp.turn_weights_into_chromosome()
            newmlp.set_weights_from_chromosome(st)
            print("been through here")

        #print(fun1(rest_setx.get_value()))
        #print(fun()-fun1(rest_setx.get_value()))
        #heyout=fun1(rest_setx.get_value())
        #print(np.where(heyout>0.5,1,0))
        #print(w1.get_value(),w2.get_value())
        #print("mid",printmidout(rest_setx.get_value()))
        #print("out",printout(rest_setx.get_value()))
        print(p)
    print("here testing", test_model())
Пример #2
0
	def __init__(self,rng, max_hidden_units, size=50, limittup=(-1,1)):
		self.dimtup = pimadataf.get_dimension()
		rest_set, test_set = pimadataf.give_data()
		tup = pimadataf.give_datainshared()
		self.rng=rng
		self.size = size
		self.max_hidden_units = max_hidden_units
		self.list_chromo = self.aux_pop(size, limittup) #a numpy array
		self.fits_pops = []
				
		self.trainx = rest_set[0]
		self.trainy = rest_set[1]
		self.testx = test_set[0]
		self.testy = test_set[1]
		
		self.strainx, self.strainy = tup[0]
		self.stestx, self.stesty = tup[1]
		self.net_err = Network.Neterr(inputdim=self.dimtup[0], outputdim=self.dimtup[1], arr_of_net=self.list_chromo, trainx=self.trainx, trainy=self.trainy, testx=self.testx, testy=self.testy,strainx=self.strainx, strainy=self.strainy, stestx=self.stestx, stesty=self.stesty)
		self.net_dict={} #dictionary of networks for back-propagation, one for each n_hid
Пример #3
0
                                  high=np.sqrt(6. / (n_in + n_hid)),
                                  size=(n_in, n_hid)),
                      dtype=theano.config.floatX)

w1 = theano.shared(value=W_values, name='w1', borrow=True)
b_values = np.zeros((n_hid, ), dtype=theano.config.floatX)
b1 = theano.shared(value=b_values, name='b1', borrow=True)

w2 = theano.shared(value=np.zeros((n_hid, n_out), dtype=theano.config.floatX),
                   name='w2',
                   borrow=True)
b2 = theano.shared(value=np.zeros((n_out, ), dtype=theano.config.floatX),
                   name='b2',
                   borrow=True)

lis = pimadataf.give_datainshared()

rest_setx, rest_sety = lis[0]  #tuple of two shared variable of array
test_setx, test_sety = lis[1]  #tuple of shared variable of array

x = T.matrix('x')
y = T.ivector('y')

lin_midout = T.dot(x, w1) + b1
midout = T.tanh(lin_midout)
params1 = [w1, b1]
printmidout = theano.function([x], midout[0])
lin_out = T.dot(midout, w2) + b2
output = T.nnet.sigmoid(lin_out)
printout = theano.function([x], output[0])
params2 = [w2, b2]