Esempio n. 1
0
    def __init__(self, layerSizes,weight_range:None, filter_size, stride, padding, weight_initialization="He"):
        
        self.MLP = MultiLayerPerceptron(layerSizes,None,"He")
        self.stride = stride
        self.padding = padding

        filter_count = 32
        weight_variance = 2/(filter_size*filter_size*filter_count)       
        self.conv_filters = np.random.randn(filter_count,filter_size,filter_size) * math.sqrt(weight_variance)
        self.conv_filters_biases = np.random.randn(filter_count,) * math.sqrt(weight_variance)
        
        """
def train(dataPassed):
    shape = np.shape(dataPassed)
    '''Creates Neural Network, Num Features should stay same'''
    '''You can Change Number of hidden units if you want'''
    '''I'm just using 3/4 Size of Input'''
    network = MLP.MLP(shape[1] - 1, int(shape[1] * .75))
    np.random.shuffle(dataPassed)
    '''Number of Epochs is 500'''
    '''You can Train for Longer If You Want'''
    for i in range(500):
        for j in range(len(dataPassed)):
            random = ra.randint(0, shape[0] - 1)
            network.train([dataPassed[random][0:500]],
                          [[dataPassed[random][500]]])
    return network
Esempio n. 3
0
def cnn_mnist(dataset):
    learning_rate = 0.1
    n_epochs = 200
    batch_size = 500

    #nkerns : number of kernels on each layer
    nkerns = [20, 10]

    train_set_x, train_set_y = dataset[0]
    valid_set_x, valid_set_y = dataset[1]
    test_set_x, test_set_y = dataset[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    print('Building the model...')

    layer0_input = x.reshape((batch_size, 1, 28, 28))

    layer0 = CNN.ConvPoolLayer(rng,
                               input=layer0_input,
                               image_shape=(batch_size, 1, 28, 28),
                               filter_shape=(nkerns[0], 1, 5, 5),
                               poolsize=(2, 2))

    layer1 = CNN.ConvPoolLayer(rng,
                               input=layer0.output,
                               image_shape=(batch_size, nkerns[0], 12, 12),
                               filter_shape=(nkerns[1], nkerns[0], 5, 5),
                               poolsize=(2, 2))

    layer2_input = layer1.output.flatten(2)

    layer2 = MultiLayerPerceptron.HiddenLayer(rng,
                                              input=layer2_input,
                                              n_in=nkerns[1] * 4 * 4,
                                              n_out=80,
                                              activation=T.tanh)

    layer3 = LogisticRegression.LogisticRegression(input=layer2.output,
                                                   n_in=80,
                                                   n_out=10)

    cost = layer3.NLL(y)

    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })
    print('1. Test built.')
    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })
    print('2. Valid built.')
    params = layer3.params + layer2.params + layer1.params + layer0.params

    grads = T.grad(cost, params)

    updates = [(param_i, param_i - learning_rate * grad_i)
               for param_i, grad_i in zip(params, grads)]
    print('3. Derivative calculated.')
    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })
    print('4. Train built.')
    print('Train model...')
    patience = 10000
    patience_increase = 2
    improvement_threshold = 0.995
    validation_frequency = min(n_train_batches, patience // 2)

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1

        for minibatch_index in range(n_train_batches):
            iter = (epoch - 1) * n_train_batches + minibatch_index

            #           if iter % 100 == 0:
            #               print('training @ iter = ',iter)
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:
                validation_losses = [
                    validate_model(i) for i in range(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %.2f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100))

                if this_validation_loss < best_validation_loss:
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [
                        test_model(i) for i in range(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print(
                        '\tepoch %i, minibatch %i/%i, test error of best model %.2f %%'
                        % (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print('Optimization complete')
    print(
        'Best validation score of %f %% obtained at iteration %i, with test performance %.2f %%'
        % (best_validation_loss * 100, best_iter + 1, test_score * 100))
Esempio n. 4
0
def test_mlp(learning_rate=0.01,
             L1_reg=0.00,
             L2_reg=0.0001,
             n_epochs=1000,
             dataset='mnist.pkl.gz',
             batch_size=20,
             n_hidden=500):

    # Load dataset
    datasets = LoadData.load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    # Construct the model
    print '... building the model'

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    rng = np.random.RandomState(1234)

    classifier = MultiLayerPerceptron.MLP(rng, x, 28 * 28, n_hidden, 10)
    cost = classifier.negative_log_likelihood(
        y) + L1_reg * classifier.L1 + L2_reg * classifier.L2

    # Function to train the model
    gparams = [T.grad(cost, param) for param in classifier.params]
    updates = [(param, param - learning_rate * gparam)
               for param, gparam in zip(classifier.params, gparams)]
    train_model = theano.function(
        inputs=[index],
        outputs=[cost],
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })

    # Functions to test and validate the model
    valid_model = theano.function(
        inputs=[index],
        outputs=[classifier.errors(y)],
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })

    test_model = theano.function(
        inputs=[index],
        outputs=[classifier.errors(y)],
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })

    # Train the model
    print 'Training the model ...'

    patience = 10000
    patience_increase = 2
    improvement_threshold = 0.995
    validation_frequency = min(n_train_batches, patience / 2)

    best_validation_loss = np.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            train_model(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = [
                    valid_model(i) for i in xrange(n_valid_batches)
                ]
                this_validation_loss = np.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                if this_validation_loss < best_validation_loss:
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [
                        test_model(i) for i in xrange(n_test_batches)
                    ]
                    test_score = np.mean(test_losses)

                    print((
                        '     epoch %i, minibatch %i/%i, test error of best model %f %%'
                    ) % (epoch, minibatch_index + 1, n_train_batches,
                         test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Esempio n. 5
0
def test_cnn(dataset_matrix_r, label_vector_r, learning_rate=0.1, n_epochs=120, nkerns=[30, 90], batch_size=250):
    
    # Load dataset
    datasets = LoadData.load_data_multi(dataset_matrix_r, label_vector_r)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    # Construct the model
    print '... building the model'

    index = T.lscalar()  
    x = T.matrix('x')  
    y = T.ivector('y')  

    rng = np.random.RandomState(1234)
    
    layer0_input = x.reshape((batch_size, 5, 5, 10))
    layer0_input = layer0_input.dimshuffle(0, 3, 1, 2)

    layer0 = ConvPoolLayer.ConvPoolLayer(rng, 
                                         layer0_input, 
                                         filter_shape=(nkerns[0], 10, 3, 3),
                                         image_shape=(batch_size, 10, 5, 5))
                                        
    layer1 = ConvPoolLayer.ConvPoolLayer(rng,
                                         layer0.output,
                                         filter_shape=(nkerns[1], nkerns[0], 3, 3),
                                         image_shape=(batch_size, nkerns[0], 3, 3))
                                         
    layer3 = MultiLayerPerceptron.HiddenLayer(rng, 
                                              layer1.output.flatten(2),
                                              nkerns[1], 
                                              120, 
                                              activation=T.tanh)
                                                                                    
    layer5 = LogisticLayer.LogisticLayer(layer3.output, 120, 9)
    
    cost = layer5.negative_log_likelihood(y)
    
    # Function to train the model
    params = layer5.params + layer3.params + layer1.params +layer0.params
    gparams = T.grad(cost, params)
    updates = [(param, param - learning_rate * gparam) for param, gparam in zip(params, gparams)]
    train_model = theano.function(inputs=[index],
                                  outputs=[cost],
                                  updates=updates,
                                  givens={x:train_set_x[index * batch_size: (index+1) * batch_size],
                                          y:train_set_y[index * batch_size: (index+1) * batch_size]})
                                          
    # Functions to test and validate the model
    valid_model = theano.function(inputs=[index],
                                  outputs=[layer5.errors(y)],
                                  givens={x:valid_set_x[index * batch_size: (index+1) * batch_size],
                                          y:valid_set_y[index * batch_size: (index+1) * batch_size]})
                                          
    test_model = theano.function(inputs=[index],
                                 outputs=[layer5.errors(y)],
                                 givens={x:test_set_x[index * batch_size: (index+1) * batch_size],
                                         y:test_set_y[index * batch_size: (index+1) * batch_size]})
                                         
    print '... training the model'
    patience = 10000  
    patience_increase = 2  
    improvement_threshold = 0.995  
    validation_frequency = min(n_train_batches, patience / 2)
                                  
    best_validation_loss = np.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print 'training @ iter = ', iter
            
            train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                validation_losses = [valid_model(i) for i in xrange(n_valid_batches)]
                this_validation_loss = np.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %  (epoch, 
                                                                              minibatch_index + 1, 
                                                                              n_train_batches,
                                                                              this_validation_loss * 100.))

                if this_validation_loss < best_validation_loss:

                    if this_validation_loss < best_validation_loss *  improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = np.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
                          
    return params
Esempio n. 6
0
def mlp_mnist(dataset):
    learning_rate = 0.01
    L1_reg = 0
    L2_reg = 0.0001
    n_epochs = 1000
    batch_size = 20
    n_hidden = 500

    train_set_x, train_set_y = dataset[0]
    valid_set_x, valid_set_y = dataset[1]
    test_set_x, test_set_y = dataset[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    rng = numpy.random.RandomState(1234)
    classifier = MultiLayerPerceptron.MLP(rng=rng,
                                          input=x,
                                          n_in=28 * 28,
                                          n_hidden=n_hidden,
                                          n_out=10)

    result = None

    cost = (classifier.NLL(y) + L1_reg * classifier.L1 +
            L2_reg * classifier.L2_sqr)

    print('building model...')
    test_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })
    print('Test built.')
    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })

    gparams = [T.grad(cost, param) for param in classifier.params]

    updates = [(param, param - learning_rate * gparam)
               for param, gparam in zip(classifier.params, gparams)]
    print('Valid built.')
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })
    print('Train built.')
    print('training model...')
    patience = 10000
    patience_increase = 2
    improvement_threshold = 0.995
    validation_frequency = min(n_train_batches, patience // 2)

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1

        for minibatch_index in range(n_train_batches):
            minibatch_avg_cost = train_model(minibatch_index)

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = [
                    validate_model(i) for i in range(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100))

                if this_validation_loss < best_validation_loss:
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [
                        test_model(i) for i in range(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print(
                        'epoch %i, minibatch %i/%i, test error of best model %f %%'
                        % (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100))

            if patience <= iter:
                done_looping = True
                break
    end_time = timeit.default_timer()
    print(
        'Optimization complete with best validation score of %f %%, with test performance %f %%'
        % (best_validation_loss * 100, test_score * 100))
    print('The code run for %d epochs, with %f epochs/sec' %
          (epoch, epoch / (end_time - start_time)))
Esempio n. 7
0
def run():

    if combo_algotype.get() != 'Multi Layer Per.':
        if combo_algotype.current() == -1:
            tk.messagebox.showinfo("ERROR !", "Please Choose Algorithm")
            return
        if comboBox1.current() == -1 or comboBox2.current() == -1:
            tk.messagebox.showinfo("ERROR !", "Please Choose Features")
            return

        if comboBox3.current() == -1 or comboBox4.current() == -1:
            tk.messagebox.showinfo("ERROR !", "Please Choose Classes")
            return

        if entry_learningRate.get() == "":
            tk.messagebox.showinfo("ERROR !", "Please Enter Learning Rate")
            return

        if entry_epochs.get() == "":
            tk.messagebox.showinfo("ERROR !", "Please Enter Epochs")
            return

        if comboBox5.current() == -1:
            tk.messagebox.showinfo("ERROR !",
                                   "Please Choose is it Bias or not")
            return

        if combo_algotype.get() == 'Adaline':
            if entry_threshold.get() == "":
                tk.messagebox.showinfo("ERROR !", "Please Enter MSE threshold")
                return

        data.set_data(comboBox1.current(), comboBox2.current(),
                      comboBox3.current(), comboBox4.current())

        inputs = data.training_input
        outputs = data.training_output

        perceptron = SingleLayerPerceptron(float(entry_learningRate.get()),
                                           comboBox5.current())

        if (combo_algotype_sel.get() == 'Single Layer per.'):
            msg = perceptron.algorithm(inputs, outputs,
                                       int(entry_epochs.get()))
            tk.messagebox.showinfo("Event", msg)

        elif (combo_algotype_sel.get() == 'Adaline'):
            msg = perceptron.adaline(inputs, outputs, int(entry_epochs.get()),
                                     float(entry_threshold.get()))
            tk.messagebox.showinfo("Event", msg)
        perceptron.draw_line(data)

        inputs_test = data.testing_input
        outputs_test = data.testing_output

        msg2 = perceptron.testing(inputs_test, outputs_test)
        tk.messagebox.showinfo("Event", msg2)
        show_confusion_matrix(perceptron.confusion_matrix)

        if entry_entry_x1.get() == "" or entry_entry_x2.get() == "":
            return
        else:
            intput_vector = np.array(
                [float(entry_entry_x1.get()),
                 float(entry_entry_x2.get())])
            net_value = perceptron.calc_net_value(intput_vector)
            prediction = perceptron.signum(net_value)

            if (prediction == 1):
                label_result = tk.Label(Form,
                                        text="Flower Category is : " +
                                        comboBox3.get())
                label_result.place(x=220, y=300 + RD)
            else:
                label_result = tk.Label(Form,
                                        text="Flower Category is : " +
                                        comboBox4.get())
                label_result.place(x=220, y=300 + RD)

    if (combo_algotype_sel.get() == 'Multi Layer Per.'):
        # number_layer, size_layer, learning_rate, bias, epochs, activation_fn
        data.set_data_MLP()
        inputs = data.training_input
        outputs = data.training_output

        # msg = perceptron.adaline(inputs, outputs, int(entry_epochs.get()), float(entry_threshold.get()))

        MLP = MultiLayerPerceptron(int(entry_num_hidden_layer.get()),
                                   int(entry_num_nodes_layer.get()),
                                   float(entry_learningRate.get()),
                                   comboBox5.current())
        msg = MLP.algorithm(inputs, outputs, int(entry_epochs.get()),
                            comboBox6.current())
        tk.messagebox.showinfo("Event", msg)

        inputs_test = data.testing_input
        outputs_test = data.testing_output

        msg = MLP.testing(inputs_test, outputs_test, comboBox6.current())
        tk.messagebox.showinfo("Event", msg)

        show_confusion_matrix_MLP(MLP.confusion_matrix)
Esempio n. 8
0
import MultiLayerPerceptron as mlp
import numpy as np

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0, 1, 1, 0]]).T

model = mlp.MultiLayerPerceptron(4, 4, 10000)
model.fit(X, y)

print(model.predict(np.array([0, 0])))
Esempio n. 9
0
print("[ " + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + " ]" +
      " Backpropagation, Training MLP..." + "")

momentumConstant = 1  # Not used in this step, but required for the constructor.
numberOfEpochs = 1000
numberOfHiddenUnits = 25

nn = mlp.MultiLayerPerceptron(n_output=3,
                              n_features=df_X.shape[1],
                              n_hidden=numberOfHiddenUnits,
                              l2=0.1,
                              l1=0.0,
                              epochs=numberOfEpochs,
                              eta=0.001,
                              alpha=momentumConstant,
                              decrease_const=0.00001,
                              minibatches=1,
                              shuffle=True,
                              random_state=1,
                              useMomentum=False,
                              useNguyenWidrow=False,
                              destroyWeights=True,
                              destroyAmount=40)

#   Fit the training data using the initialized MLP object.
nn.fit(df_X, df_y, print_progress=True)

#   Diagnostic plots
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 100])
plt.ylabel('Cost')
Esempio n. 10
0
class CNN(object):

    def __init__(self, layerSizes,weight_range:None, filter_size, stride, padding, weight_initialization="He"):
        
        self.MLP = MultiLayerPerceptron(layerSizes,None,"He")
        self.stride = stride
        self.padding = padding

        filter_count = 32
        weight_variance = 2/(filter_size*filter_size*filter_count)       
        self.conv_filters = np.random.randn(filter_count,filter_size,filter_size) * math.sqrt(weight_variance)
        self.conv_filters_biases = np.random.randn(filter_count,) * math.sqrt(weight_variance)
        
        """
        self.conv_filters = []
        for i in range(0,32):
            weight_variance = 2 / 1000          
            filterArray = math.sqrt(weight_variance) * np.random.randn(filter_size,filter_size)
            self.conv_filters.append(filterArray)
        """




    def feedforward(self, x, activation_function):

        first_x = x
        #CONVOLUTION
        height, width = x.shape
        filter_count, filter_height, filter_width = self.conv_filters.shape
           
        height_out = _compute_size(height, filter_height, self.padding, self.stride)
        width_out = _compute_size(width, filter_width, self.padding, self.stride)

        padding = (self.padding, self.padding)
        x_padded = np.pad(x, (padding, padding), mode='constant', constant_values=0)
        filters_reshaped = self.conv_filters.reshape(filter_count, -1).T

        out = np.zeros((filter_count, height_out, width_out))

        for h in range(height_out):
            for w in range(width_out):
                h_start, w_start = h * self.stride, w * self.stride
                h_end, w_end = h_start + filter_height, w_start + filter_width
                window = x_padded[h_start: h_end, w_start: w_end]
                
                out[:, h, w] = np.dot(
                    window.reshape(1, -1),
                    filters_reshaped,
                ) + self.conv_filters_biases.T
        
        
        
        x = activation_function(out)
        conv_out = out

        #MAXPOOLING
        filter_count, height, width = x.shape
        height_out = _compute_size(height, 2, 0, 2)
        width_out = _compute_size(width, 2, 0, 2)
        

        

        out = np.zeros((filter_count, height_out, width_out))
        mask = np.zeros_like(x, dtype=np.uint8)
        for h in range(height_out):
            for w in range(width_out):
                h_start, w_start = h * 2, w * 2
                h_end, w_end = h_start + 2, w_start + 2
                window = x[:, h_start: h_end, w_start: w_end]
                
                flat_window = window.reshape(filter_count, -1)
               
                window_mask = flat_window.argmax(-1)[..., None] == range(flat_window.shape[-1])
                
                out[:, h, w] = flat_window[window_mask].reshape(filter_count)
                mask[:, h_start: h_end, w_start: w_end] = window_mask.reshape((filter_count, 2, 2))    

        
        
        
        return first_x, mask.astype(bool), conv_out, out



        


    def backpropagation(self,x,y,activation_function,derivative_function):
        
        first_x, mask, conv_out, mlpIN = self.feedforward(x,softplus_function)
        ynet,z_list,activations_list = self.MLP.feedforward(mlpIN.reshape(-1),softplus_function)
        fc_db, fc_dw = self.MLP.backpropagation(activations_list[0],y,softplus_function,sigmoid_function)
        
        delta =  self.MLP.weights[0].T@fc_db[0] * derivative_function(mlpIN.reshape(-1))
        delta = delta.reshape(mlpIN.shape)
        
        
        #upsample downstream
        d_downstream = np.zeros_like(conv_out)

        filter_count, height, width = delta.shape
        for h in range(height):
            for w in range(width):
                h_start, w_start = h * 2, w * 2
                h_end, w_end = h_start + 2, w_start + 2
                mask_window = mask[:, h_start: h_end, w_start: w_end]
                
                d_downstream[:,  h_start: h_end, w_start: w_end][mask_window] =  delta[:, h, w].flatten()

       
        #first layer error
        delta = d_downstream * conv_out
        

        filter_count, filter_height, filter_width = self.conv_filters.shape
        filters_reshaped = self.conv_filters.reshape(filter_count, -1)
        
        padding = (self.padding, self.padding)
        x_padded = np.pad(x, (padding, padding), mode='constant', constant_values=0)
        filters_reshaped = self.conv_filters.reshape(filter_count, -1).T
        d_downstream = np.zeros_like(x)
        d_weight = np.zeros_like(self.conv_filters)
        filter_count, height, width = delta.shape
       
        for h in range(height):
            for w in range(width):
                h_start, w_start = h * self.stride, w * self.stride
                h_end, w_end = h_start + filter_height, w_start + filter_width           
                d_weight += (delta[:, h, w].T).reshape(filter_count,1).dot(
                    x_padded[h_start: h_end, w_start: w_end].reshape(1, -1)
                ).reshape(self.conv_filters.shape)

        d_bias = delta.sum(axis=(1, 2))
        

        return d_weight,d_bias, fc_dw, fc_db

    def train(self, training_data, validation_data, epochs, learn_step, minibatch_size, activation_function, derivative_function, patience):

        train_data_length = len(training_data[0])
        if validation_data:
            validation_data_length = len(validation_data[0])
        max_accuracy = 0.0
        max_epoch = epochs

        reversed_accuracy_list = []
        reversed_epoch_list = []

        for i in range(0,epochs):

           
            minibatches = [
                (training_data[0][j:j+minibatch_size],
                training_data[1][j:j+minibatch_size])
                for j in range(0, train_data_length, minibatch_size)]
            
            for minibatch in minibatches:
                
                gradient_f_weights = np.zeros_like(self.conv_filters)
                gradient_f_biases = np.zeros(self.conv_filters.shape[0])
                gradient_b = [np.zeros(b.shape) for b in self.MLP.biases]
                gradient_w = [np.zeros(w.shape) for w in self.MLP.weights]
                #print("EnterX:{0}".format(time.time()))
                minibatch_len = len(minibatch[0])
                for l in range(minibatch_len):
                    
                    x = minibatch[0][l].reshape(28,28) /255
                    y = minibatch[1][l]
                    
                    d_weight,d_bias, fc_dw, fc_db = self.backpropagation(x,y,activation_function,derivative_function)
                    
                    
                    gradient_f_weights += d_weight
                    gradient_f_biases += d_bias
                    gradient_b = [nb+dnb for nb, dnb in zip(gradient_b, fc_db)]
                    gradient_w = [nw+dnw for nw, dnw in zip(gradient_w, fc_dw)]

                #print("EntUpd:{0}".format(time.time()))
                self.MLP.weights = [w-(learn_step/minibatch_len)*nw
                        for w, nw in zip(self.MLP.weights, gradient_w)]
                self.MLP.biases = [b-(learn_step/minibatch_len)*nb
                       for b, nb in zip(self.MLP.biases, gradient_b)]
                self.conv_filters -= (learn_step/minibatch_len)* gradient_f_weights  
                self.conv_filters_biases -=  (learn_step/minibatch_len)* gradient_f_biases  
                
               # print(gradient_f_weights)
               # print(gradient_f_biases)
                #print(gradient_b)
                #print(gradient_w)
                     
                     
                    
            if True:
                #print(self.weights)
                #print(self.biases)
                #start = time.time()
                # print(self.conv_filters)
                #print("EntAcc:{0}".format(time.time()))
                accuracy = self.accuracy(validation_data,validation_data_length,activation_function)
               # print("FinEpo:{0}".format(time.time()))
                #print("VALIDATION TIME: {0}".format(time.time() - start))
                accuracy = round(accuracy,2)
                reversed_accuracy_list.insert(0,accuracy)
                reversed_epoch_list.insert(0,i)
                print ("Epoch {0}: {1}".format(
                    i, accuracy))
                np.set_printoptions(threshold=np.inf)
                
                  
            else:
                #print ("Epoch {0} complete".format(i))
                gg=False
                #return i,reversed_epoch_list,reversed_accuracy_list,max_accuracy
            if (accuracy > max_accuracy):
                #print("----------------{0}".format(i))
                max_epoch = i
                max_accuracy = accuracy
                
            if(i - max_epoch > patience):
                print("PATIENCE")
                
                return i,reversed_epoch_list,reversed_accuracy_list,max_accuracy
                
       
        return epochs,reversed_epoch_list,reversed_accuracy_list,max_accuracy 

    def accuracy(self, validation_data, data_size, activation_function):

        hit_counter = 0

        for i in  range(data_size):
            x = validation_data[0][i].reshape(28,28) /255
            y = validation_data[1][i]
            _,_,_, mlpIN = self.feedforward(x,softplus_function)
            ynet,_,_ = self.MLP.feedforward(mlpIN.reshape(-1),softplus_function)
            
            if np.argmax(ynet) == y:
                hit_counter += 1
        return float(hit_counter) / data_size

    def feedforward_batch(self, x, activation_function):

        first_x = x
        #CONVOLUTION
        batch, height, width = x.shape
        filter_count, filter_height, filter_width = self.conv_filters.shape
           
        height_out = _compute_size(height, filter_height, self.padding, self.stride)
        width_out = _compute_size(width, filter_width, self.padding, self.stride)

        padding = (self.padding, self.padding)
        x_padded = np.pad(x, ((0,0), padding, padding), mode='constant', constant_values=0)
        
        
        filters_reshaped = self.conv_filters.reshape(filter_count, -1).T

        out = np.zeros((batch,filter_count, height_out, width_out))

        for h in range(height_out):
            for w in range(width_out):
                h_start, w_start = h * self.stride, w * self.stride
                h_end, w_end = h_start + filter_height, w_start + filter_width
                window = x_padded[:, h_start: h_end, w_start: w_end]
                
                out[..., h, w] = np.dot(
                    window.reshape(batch, -1),
                    filters_reshaped,
                ) + self.conv_filters_biases.T
        np.set_printoptions(threshold=np.inf)
        
        x = activation_function(out)
        conv_out = out

        #MAXPOOLING
        batch,filter_count, height, width = x.shape
        height_out = _compute_size(height, 2, 0, 2)
        width_out = _compute_size(width, 2, 0, 2)
        

        

        out = np.zeros((batch, filter_count, height_out, width_out))
        mask = np.zeros_like(x, dtype=np.uint8)
        for h in range(height_out):
            for w in range(width_out):
                h_start, w_start = h * 2, w * 2
                h_end, w_end = h_start + 2, w_start + 2
                window = x[..., h_start: h_end, w_start: w_end]
                
                flat_window = window.reshape(*window.shape[:2], -1)
                window_mask = flat_window.argmax(-1)[..., None] == range(flat_window.shape[-1])
                out[..., h, w] = flat_window[window_mask].reshape(window.shape[:2])
                mask[..., h_start: h_end, w_start: w_end] = window_mask.reshape((*window.shape[:2], 2, 2))    

        
        
        
        return first_x, mask.astype(bool), conv_out, out
Esempio n. 11
0
def run_train_models(datasets, parameters):
    device = parameters["device"]

    # Per-Class-Error
    per_class_error = {
        "MLP": 0.0,
        "FCN": 0.0,
        "ResNet": 0.0
    }

    for dataset_number, (dataset, dataloader) in enumerate(datasets.items()):
        dataset_number += 1

        # setting up
        if parameters["verbose"]:
            print_dataset_info(dataset, dataloader)
        sleep(1)

        time_steps = dataloader['test'].dataset.inputs.shape[-1]
        n_classes = len(np.unique(dataloader['test'].dataset.targets))

        # MLP
        if parameters["run_mlp"]:
            model_name = "MLP"
            if parameters["verbose"]:
                print(model_name)
            model = MultiLayerPerceptron(time_steps, n_classes)
            optimizer = optim.Adadelta(
                model.parameters(),
                lr=parameters["mlp_lr"],
                rho=parameters["mlp_rho"],
                eps=parameters["mlp_eps"]
            )
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["mlp_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ConvNet
        if parameters["run_fcn"]:
            model_name = "FCN"
            if parameters["verbose"]:
                print(model_name)
            model = ConvNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)

            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ResNet
        if parameters["run_resnet"]:
            model_name = "ResNet"
            if parameters["verbose"]:
                print(model_name)
            model = ResNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)
Esempio n. 12
0
def normalize_dataset(dataset):
    """Divides each value by the difference of the upper and the lower values in that feature."""
    lower_upper_bounds = dataset_boundaries(dataset)
    for row in dataset:
        for i in range(len(row) - 1):
            row[i] = (row[i] - lower_upper_bounds[i][0]) / (
                lower_upper_bounds[i][1] - lower_upper_bounds[i][0])
    return dataset


if __name__ == "__main__":
    filename = 'owls.csv'
    # Preprocess the dataset
    dataset = create_dataset(filename)
    # normalize input variables
    dataset = normalize_dataset(dataset)
    # Instantiate the Perceptron class
    # by data split
    #MLP = MultiLayerPerceptron.Perceptron(n_epochs=20, train_test_prop=2/3, learning_rate=0.4, n_hidden=5, activation_type="Sigmoid")
    # by cross-validation
    MLP = MultiLayerPerceptron.Perceptron(n_epochs=20,
                                          n_folds=10,
                                          learning_rate=0.4,
                                          n_hidden=5,
                                          activation_type="Sigmoid")
    # Evaluate the trained model
    scores = MLP.run(dataset)
    average = str(float("%0.2f" % (sum(scores) / len(scores))))
    scores = [str(float("%0.2f" % s)) for s in scores]
    print " ".join(scores) + " " + str(average)
Esempio n. 13
0
    testX = []
    testy = []
    test = []
    with open("optdigits_training.csv") as t:
        for line in t:
            l = line.split(",")
            lineInts = []
            for num in l:
                lineInts.append(int(num))
            testX.append(lineInts[:-1])
            y = [0 for i in range(0, 10)]
            y[lineInts[-1]] = 1
            testy.append(y)

    nn = MultiLayerPerceptron.Net([64, 30, 10], 1.0)

    #Visualize.drawNN(nn)

    # for x in range(1, 100):
    #     nn.train(trainingX, trainingy)
    #     print(nn.errTot)
    err = 100
    i = 0
    errArr = [5 for x in range(0, 100)]
    while err > .55:
        err = nn.train(trainingX, trainingy)
        errArr[i % 100] = err
        nn.LR = sum(errArr) / len(errArr) * 2
        i = i + 1
        print(str(i) + ", " + str(nn.LR) + ": " + str(err))
Esempio n. 14
0
    z = float(z)

    #Convert the class to an integer
    cl = int(cl)

    #Add this datapoint (a list) to the dataset (also a list)
    data.append([w, x, y, z, cl])

#Shuffle dataset
random.shuffle(data)

#Divide dataset into training, validation, and testing subsets
training_data = data[:int(len(data) * .7)]
validation_data = data[int(len(data) * .7):int(len(data) * .85)]
testing_data = data[int(len(data) * .85):]

#Create MLP with default parameters
mlp = MLP.MultiLayerPerceptron(training_data,
                               validation_data,
                               testing_data,
                               alpha=.00035,
                               beta=.8,
                               weight_init=lambda x, y:
                               (2 * np.random.rand(x, y) - 1))

#Train mlp
num_iterations = mlp.do_training(max_iterations=15)

#Plot results
mlp.graph_results()
Esempio n. 15
0
def get_dataset2():
    """Retrieve the gcc dataset and process the data."""
    # Set defaults.
    global input_shape
    filename = "FinancialData.xlsx"
    df = pd.read_excel(filename, 'DataSet2', header=0, usecols="B:R")
    dm = pd.read_excel(filename, 'DataSet2', header=0, usecols="Y")
    X = df.values
    Y = dm.values
    input_shape = 17
    return X, Y, input_shape


X, Y, Input_shape = get_dataset1()
Layers = ("1","2","3")
MLPresult1, MLPresult2, MLPresult3,MLPf1, MLPf2, MLPf3 = MLP.MLP_evaluate(X, Y, Input_shape)
MLResults = (MLPresult1,MLPresult2,MLPresult3)
MLf1Results = (MLPf1, MLPf2, MLPf3)

X, Y, Input_shape = get_dataset2()
MLPresult1, MLPresult2, MLPresult3,MLPf1, MLPf2, MLPf3 = MLP.MLP_evaluate(X, Y, Input_shape)

D2_MLResults = (MLPresult1,MLPresult2,MLPresult3)
D2_MLf1Results = (MLPf1, MLPf2, MLPf3)


fig, ax = plt1.subplots()
plt1.title('F1-score of Deep Neural Network(MLP) on FDP')
ax.plot(Layers, MLf1Results, color='r', marker='o', linestyle='--', markersize=5, label='Dataset1')
ax.plot(Layers, D2_MLf1Results, color='b', marker='o', linestyle='--', markersize=5, label='Dataset2')
legend = ax.legend(loc='lower right', shadow=True)
Esempio n. 16
0
def predict_cnn(nkerns=[20, 40, 60], batch_size=200):

    # Load dataset
    datasets = LoadData.load_predict('VisionFeatures/dct12')

    #train_set_x, train_set_y = datasets[0]
    #valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    #n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    #n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    weights = sio.loadmat('weights20')
    layer0_W = weights['layer0_W']
    layer0_b = weights['layer0_b']
    layer0_b = np.reshape(layer0_b, (layer0_b.shape[1], ))
    layer1_W = weights['layer1_W']
    layer1_b = weights['layer1_b']
    layer1_b = np.reshape(layer1_b, (layer1_b.shape[1], ))
    layer2_W = weights['layer2_W']
    layer2_b = weights['layer2_b']
    layer2_b = np.reshape(layer2_b, (layer2_b.shape[1], ))
    layer3_W = weights['layer3_W']
    layer3_b = weights['layer3_b']
    layer3_b = np.reshape(layer3_b, (layer3_b.shape[1], ))
    layer5_W = weights['layer5_W']
    layer5_b = weights['layer5_b']
    layer5_b = np.reshape(layer5_b, (layer5_b.shape[1], ))

    # Construct the model
    print '... building the model'

    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    rng = np.random.RandomState(1234)

    layer0_input = x.reshape((batch_size, 72, 88, 1))
    layer0_input = layer0_input.dimshuffle(0, 3, 1, 2)

    layer0 = ConvPoolLayer.ConvPoolLayer(rng,
                                         layer0_input,
                                         filter_shape=(nkerns[0], 1, 9, 9),
                                         image_shape=(batch_size, 1, 72, 88),
                                         W=layer0_W,
                                         b=layer0_b)

    layer1 = ConvPoolLayer.ConvPoolLayer(rng,
                                         layer0.output,
                                         filter_shape=(nkerns[1], nkerns[0], 9,
                                                       9),
                                         image_shape=(batch_size, nkerns[0],
                                                      32, 40),
                                         W=layer1_W,
                                         b=layer1_b)

    layer2 = ConvPoolLayer.ConvPoolLayer(rng,
                                         layer1.output,
                                         filter_shape=(nkerns[2], nkerns[1], 5,
                                                       5),
                                         image_shape=(batch_size, nkerns[1],
                                                      12, 16),
                                         W=layer2_W,
                                         b=layer2_b)

    layer3 = MultiLayerPerceptron.HiddenLayer(rng,
                                              layer2.output.flatten(2),
                                              nkerns[2] * 4 * 6,
                                              600,
                                              W=layer3_W,
                                              b=layer3_b,
                                              activation=T.tanh)

    layer5 = LogisticLayer.LogisticLayer(layer3.output,
                                         600,
                                         6,
                                         W=layer5_W,
                                         b=layer5_b)

    predict_model = theano.function(
        inputs=[index],
        outputs=[layer5.errors(y)],
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })

    prediction_losses = [predict_model(i) for i in xrange(n_test_batches)]
    this_prediction_loss = np.mean(prediction_losses)

    print this_prediction_loss
Esempio n. 17
0
from MultiLayerPerceptron import *
import mnist_loader
import matplotlib.pyplot as plt
import numpy as np
training, validation, test = mnist_loader.load_data()
offset = 5000
t = training[0][0:offset], training[1][0:offset]
offsetv = 500
v = validation[0][0:offsetv], validation[1][0:offsetv]

m = MultiLayerPerceptron([784, 8, 10], (-0.4, 0.4), 'He')

b, c = m.backpropagation(t[0][12], t[1][0], softplus_function,
                         sigmoid_function)

#print(b)
#print(c)

hidden_layer_size = 100
weight_range = (-0.4, 0.4)
epochs = 100
learn_step = 0.05
batch_size = 64
activation_function = softplus_function
derivative_function = sigmoid_function
patience = 25
"""
#_,epochs_list,accuracy_list,max_accuracy = m.train(t,v,epochs,learn_step,batch_size,activation_function,derivative_function,patience)
b , w = m.backpropagation(t[0][0],t[1][0],sigmoid_function,sigmoid_derivative)
print(w[0].shape)
print(b[1].shape)
    updatenumber = 10000

    pictures = fetch_mldata('MNIST original', data_home=".")
    # 訓練データを作成
    X = pictures.data
    y = pictures.target

    # ピクセルの値を0.0~1.0に正規化
    X = X.astype(np.float64)
    X /= X.max()

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
    # 出力としては10元のベクトルを用意して、k個目のunitにはkが対応し、答えのunitに1が入る
    labels_train = LabelBinarizer().fit_transform(y_train)

    mlp = MultiLayerPerceptron.MultiLayerPerceptron(28 * 28, [1000], 10, 3,
                                                    ["tanh", "sigmoid"])
    for i in range(10):
        # 訓練データを用いてニューラルネットの重みを学習
        mlp.fit(X_train, labels_train, learning_rate, updatenumber)

        # テストデータを用いて予測精度を計算
        predictions = []
        for i in range(X_test.shape[0]):
            ###入力にノイズを混ぜる
            for k in range(len(X_test[i])):
                rand = np.random.rand()
                if rand < 0.25:
                    X_test[i][k] = np.random.rand()
            o = mlp.UseMLP(X_test[i])
            # 最大の出力を持つクラスに分類
            predictions.append(np.argmax(o))
Esempio n. 19
0
import numpy as np
import numpy.matlib
import MultiLayerPerceptron as mlp
from activations import relu, sigmoid

np.random.seed(1)

testMLP = mlp.MultiLayerPerceptron(inputSize = 3,
                                   numHiddenLayers = 0,
                                   numNeuronByHiddenLayer = [2],
                                   activationFunction = sigmoid,
                                   outputSize = 1,
                                   trainingIterations = 1)



X = np.array([[0,0,1],
              [1,1,1],
              [1,0,1],
              [0,1,1]])

Y = np.array([[0,1,1,0]]).T


print(testMLP.weightsByLayer)
testMLP.weightsByLayer[0] = 2 * testMLP.weightsByLayer[0] - 1
testMLP.biasByLayer[0] = 0 


Z = testMLP.feedforward(X)
print(Z)
Esempio n. 20
0
epochs = 100
learn_step = 0.05
mini_batch_size = 64
activation_function = sigmoid_function
derivative_function = sigmoid_derivative
patience = 40
momentum = 0.9

x_list = []
y_list = []

opti_names = ['Momentum', 'Nesterov', 'Adam', 'Adagrad', 'Adadelta']

for rep in range(0, reps):

    m = MultiLayerPerceptron([784, hidden_layer_size, 10], weight_range)
    _, avg_x, avg_y, _ = m.train_momentum(t, v, epochs, learn_step, 64,
                                          activation_function,
                                          derivative_function, patience,
                                          momentum)
    avg_x = [element / reps for element in x]
    x_list.append(avg_x)
    avg_y = [element / reps for element in y]
    y_list.append(avg_y)

    m = MultiLayerPerceptron([784, hidden_layer_size, 10], weight_range)
    _, avg_x, avg_y, _ = m.train_nesterov(t, v, epochs, learn_step, 64,
                                          activation_function,
                                          derivative_function, patience,
                                          momentum)
    avg_x = [element / reps for element in x]
Esempio n. 21
0

def get_dataset1():
    """Retrieve the kuwait dataset and process the data."""
    # Set defaults.
    global input_shape
    filename = "Kuwait.xlsx"
    df = pd.read_excel(filename, 'DataSet2', header=0, usecols="B:X")
    dm = pd.read_excel(filename, 'DataSet2', header=0, usecols="AF")
    X = df.values
    Y = dm.values
    input_shape = 23
    return X, Y, input_shape


def get_dataset2():
    """Retrieve the gcc dataset and process the data."""
    # Set defaults.
    global input_shape
    filename = "FinancialData.xlsx"
    df = pd.read_excel(filename, 'DataSet2', header=0, usecols="B:R")
    dm = pd.read_excel(filename, 'DataSet2', header=0, usecols="Y")
    X = df.values
    Y = dm.values
    input_shape = 17
    return X, Y, input_shape


X, Y, Input_shape = get_dataset1()
MLP.MLP_evaluate(X, Y, Input_shape)