Example #1
0
def runNN(load):

    saveNNFile = 'backup.p'

    if (load):
        a = loadNN(saveNNFile)
    else:
        a = nnet.nnet()

    for e in range(a.currEpoch, 10):
        a.currEpoch = e
        a.train()
        saveNN(a, saveNNFile)
Example #2
0
File: dbn.py Project: ForrestPi/ML
 def dbn2nnet(self,n_out):
     if type(n_out) is list:
         size=self.size+n_out
     else:
         n_out=[n_out]
         size=self.size+n_out
     ann=nnet(layers=size,active_fun='sigm',output_fun='softmax',momentum=0,show=True)
     for ii in xrange(1,ann.n-1):
         ann.layer[ii].w=self.lrbm[ii].w.T
         ann.layer[ii].b=self.lrbm[ii].c.T
     for ii in xrange(1,ann.n):
         print('w shape=%s',ann.layer[ii].w.shape)
         print('b shape=%s',ann.layer[ii].b.shape)
     return ann
Example #3
0
 def dbn2nnet(self, n_out):
     if type(n_out) is list:
         size = self.size + n_out
     else:
         n_out = [n_out]
         size = self.size + n_out
     ann = nnet(layers=size,
                active_fun='sigm',
                output_fun='softmax',
                momentum=0,
                show=True)
     for ii in xrange(1, ann.n - 1):
         ann.layer[ii].w = self.lrbm[ii].w.T
         ann.layer[ii].b = self.lrbm[ii].c.T
     for ii in xrange(1, ann.n):
         print('w shape=%s', ann.layer[ii].w.shape)
         print('b shape=%s', ann.layer[ii].b.shape)
     return ann
Example #4
0
    def __init__(self):
        self.serialized_parameters_shape = (100, )
        self.config = get_config()

        self.nnet = nnet.nnet()
Example #5
0
    def __init__(self):
        self.serialized_parameters_shape = (100,)
        self.config = get_config()

        self.nnet = nnet.nnet()
Example #6
0
	def build_and_train_nnet(self, X, Y):
	
		y_onehot = self.class_to_onehot(Y)
		n_in = X.shape[1]
		n_nodes = self.l1_size
		n_out = y_onehot.shape[1]
		
		x = T.dmatrix()
		y = T.imatrix()
		
		#bias1, bias2, weights1, weights2
		template = [(n_nodes,), (n_out,), (n_in,n_nodes),(n_nodes,n_out)]

		#initialize nnet
		model = nnet(input=x, n_in=n_in, n_nodes=n_nodes, n_out=n_out)
		cost = model.neg_log_likelihood(y)

		g_b1 = T.grad(cost, model.b1)
		g_b2 = T.grad(cost, model.b2)
		g_w1 = T.grad(cost, model.w1)
		g_w2 = T.grad(cost, model.w2)
		
		g_params = T.concatenate([g_b1.flatten(),g_b2.flatten(),
									g_w1.flatten(),g_w2.flatten()])
		
		getcost = theano.function([x,y],outputs=cost)
		getdcost = theano.function([x,y],outputs=g_params)

		def cost_fcn(params,inputs,targets):
			model.set_params(params,template)
			x = inputs
			y = targets
			return getcost(x,y)

		def cost_grad(params, inputs, targets):
			model.set_params(params,template)
			x = inputs
			y = targets
			return getdcost(x,y)

		args = climin.util.iter_minibatches([X,y_onehot],self.batch_size,[0,0])
		batch_args = itertools.repeat(([X,y_onehot],{}))
		args = ((i,{}) for i in args)
		init_params = model.get_params(template)

		opt_sgd = climin.GradientDescent(init_params, cost_fcn, cost_grad,
							steprate=0.01, momentum=0.99, args=args,
							momentum_type="nesterov")

		opt_ncg = climin.NonlinearConjugateGradient(init_params,
													cost_fcn,
													cost_grad, args=batch_args)

		opt_lbfgs = climin.Lbfgs(init_params, cost_fcn,
								cost_grad, args=batch_args)
		#choose the optimizer
		if self.optimizer=='sgd':
			optimizer = opt_sgd
		elif self.optimizer=='ncg':
			optimizer = opt_ncg
		else: optimizer = opt_lbfgs
		
		#do the actual training.
		costs = []
		for itr_info in optimizer:
			if itr_info['n_iter'] > self.max_iters: break
			costs.append(itr_info['loss'])
			
		model.set_params(init_params, template)
		return model, costs
Example #7
0
# For NN >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

# Import images for train and test set ----------------------------------------------------------------

n_images_per_class = 100
train_images, train_labels = loadImages(train_set, n_images_per_class)
n_images_per_class = 10
test_images, test_labels = loadImages(test_set, n_images_per_class)

# -----------------------------------------------------------------------------------------------------

# Classification process ------------------------------------------------------------------------------

print('Classification with NN...')
predicted_nn = nnet(train_images, test_images, train_labels, test_labels,
                    classes_list)

# -----------------------------------------------------------------------------------------------------

# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

# Showing results images and predictions --------------------------------------------------------------

random.seed()
images_to_print = [
    random.randint(0, n_images_per_class * n_classes - 1) for i in range(4)
]
print(
    'Predicted by KNN: ', ' '.join('%20s' % classes_list[int(predicted_knn[i])]
                                   for i in images_to_print))
print(
Example #8
0
        # outName = raw_input("Output Name: ")

        # epoch = raw_input("Epoch: ")
        # while not epoch.isdigit():
        # epoch = raw_input("\tEnter Integer value for Epoch: ")
        # epoch = int(epoch)

        # rate = float(raw_input("Rate: "))

        netLoc = "data/nn.init"
        trainLoc = "data/training.csv"
        outName = "data/sahearts.trained"
        epoch = 500
        rate = 0.1

        net = nnet(netLoc)
        net.train(trainLoc, epoch, rate)
        net.writeFile(outName)
    elif inPutz == "gen":
        genInit()
        exit()
    elif inPutz == "test":
        # netLoc = getFile("Trained net location: ")
        # testLoc = getFile("Testing Set: ")
        # outName = raw_input("Output Filename: ")

        netLoc = "data/sahearts.trained"
        testLoc = "data/test.csv"
        outName = "data/sahearts.res"

        net = nnet(netLoc)
Example #9
0
    def build_and_train_nnet(self, X, Y):

        y_onehot = self.class_to_onehot(Y)
        n_in = X.shape[1]
        n_nodes = self.l1_size
        n_out = y_onehot.shape[1]

        x = T.dmatrix()
        y = T.imatrix()

        #bias1, bias2, weights1, weights2
        template = [(n_nodes, ), (n_out, ), (n_in, n_nodes), (n_nodes, n_out)]

        #initialize nnet
        model = nnet(input=x, n_in=n_in, n_nodes=n_nodes, n_out=n_out)
        cost = model.neg_log_likelihood(y)

        g_b1 = T.grad(cost, model.b1)
        g_b2 = T.grad(cost, model.b2)
        g_w1 = T.grad(cost, model.w1)
        g_w2 = T.grad(cost, model.w2)

        g_params = T.concatenate(
            [g_b1.flatten(),
             g_b2.flatten(),
             g_w1.flatten(),
             g_w2.flatten()])

        getcost = theano.function([x, y], outputs=cost)
        getdcost = theano.function([x, y], outputs=g_params)

        def cost_fcn(params, inputs, targets):
            model.set_params(params, template)
            x = inputs
            y = targets
            return getcost(x, y)

        def cost_grad(params, inputs, targets):
            model.set_params(params, template)
            x = inputs
            y = targets
            return getdcost(x, y)

        args = climin.util.iter_minibatches([X, y_onehot], self.batch_size,
                                            [0, 0])
        batch_args = itertools.repeat(([X, y_onehot], {}))
        args = ((i, {}) for i in args)
        init_params = model.get_params(template)

        opt_sgd = climin.GradientDescent(init_params,
                                         cost_fcn,
                                         cost_grad,
                                         steprate=0.01,
                                         momentum=0.99,
                                         args=args,
                                         momentum_type="nesterov")

        opt_ncg = climin.NonlinearConjugateGradient(init_params,
                                                    cost_fcn,
                                                    cost_grad,
                                                    args=batch_args)

        opt_lbfgs = climin.Lbfgs(init_params,
                                 cost_fcn,
                                 cost_grad,
                                 args=batch_args)
        #choose the optimizer
        if self.optimizer == 'sgd':
            optimizer = opt_sgd
        elif self.optimizer == 'ncg':
            optimizer = opt_ncg
        else:
            optimizer = opt_lbfgs

        #do the actual training.
        costs = []
        for itr_info in optimizer:
            if itr_info['n_iter'] > self.max_iters: break
            costs.append(itr_info['loss'])

        model.set_params(init_params, template)
        return model, costs
#!/usr/bin/env python
import numpy as np
import nnet
import preprocessing

x = preprocessing.X
y = preprocessing.Y

model = nnet.nnet(x.shape[1:])
model.compile(
    optimizer='adam',
    loss='binary_crossentropy',
    metrics=['accuracy'],
)

model.fit(
    x,
    y,
    batch_size=32,
    epochs=10,
    validation_split=0.1,
)