Пример #1
0
Файл: MLP.py Проект: dxmtb/nn
    def __init__(self, in_dim, hidden_dim, out_dim, activation, loss_type,
                 layer_num=0):
        NeuralNetwork.__init__(self, activation, loss_type)

        args = [self.activation, self.grad_activation]
        self.layers = []
        self.layers.append(FullyConnectedLayer(in_dim, hidden_dim, *args))
        for _ in xrange(layer_num):
            self.layers.append(FullyConnectedLayer(hidden_dim, hidden_dim, *args))
        if loss_type == 'mse':
            self.layers.append(FullyConnectedLayer(hidden_dim, out_dim, *args))
        else:
            from SoftmaxLayer import SoftmaxLayer
            self.layers.append(SoftmaxLayer(hidden_dim, out_dim, *args))
Пример #2
0
    def __init__(self, input, output, output_act, eval_metric):

        NeuralNetwork.__init__(self, input, output, output_act, eval_metric)

        self.w_size = self.get_weight_vector_length()

        self.initialise_cache()

        self.W1 = np.random.randn(self.input, self.output) / np.sqrt(
            self.input)
        self.B1 = np.random.randn(1, self.output) / np.sqrt(
            self.output)  # bias first layer

        self.out = np.zeros((1, self.output))  # output layer for base model

        self.final_out = np.zeros(
            (1, self.output))  # Final output for the model
    def __init__(self):
        """---------------------------------------------------------------------
        Desc.:   Class Constructor  
        Args:    -
        Returns: - 
        ---------------------------------------------------------------------"""

        NeuralNetwork.__init__(self, [28 * 28, 10 * 10, 10])

        print "----------------------------------------------------------------"
        print "Digit Classfier using pytorch nn module"
        print "Author: Ankit Manerikar"
        print "Written on: 09-21-2017"
        print "----------------------------------------------------------------"
        print "Loading MNIST Dataset ..."
        self.train_images = read_image_file(
            './data/raw/train-images-idx3-ubyte')
        self.target_val = read_label_file('./data/raw/train-labels-idx1-ubyte')
        print "Dataset Loaded"
        print "\nClass initialized"
Пример #4
0
    def __init__(self,
                 X_data,
                 Y_data,
                 n_hidden_neurons=100,
                 epochs=10,
                 batch_size=100,
                 eta=0.1,
                 lmbd=0.0,
                 activation_func='relu',
                 activation_func_out='leaky_relu',
                 cost_func='MSE',
                 leaky_a=0.01):

        if len(Y_data.shape) == 1:
            Y_data = np.expand_dims(Y_data, 1)

        n_categories = Y_data.shape[1]

        NN.__init__(self, X_data, Y_data, n_hidden_neurons, n_categories,
                    epochs, batch_size, eta, lmbd, activation_func,
                    activation_func_out, cost_func)
    def __init__(self, input, hidden, output, max_depth, output_act,
                 eval_metric):

        self.hidden = hidden
        self.max_depth = max_depth
        NeuralNetwork.__init__(self, input, output, output_act, eval_metric)

        self.w_size = self.get_weight_vector_length()

        self.initialise_cache()

        # WEIGHTS FROM INPUT TO FIRST HIDDEN LAYER
        self.W1 = np.random.randn(self.input, self.hidden) / np.sqrt(
            self.input)
        self.B1 = np.random.randn(1, self.hidden) / np.sqrt(self.hidden)

        # WEIGHTS FROM LAST HIDDEN LAYER TO OUTPUT LAYER
        self.W2 = np.random.randn(self.hidden, self.output) / np.sqrt(
            self.hidden)
        self.B2 = np.random.randn(1, self.output) / np.sqrt(self.hidden)

        self.out = np.zeros((1, self.output))  # output layer for base model

        # NOW LETS CREATE ALL OF THE HIDDEN LAYERS
        self.h_weights = []
        self.h_biases = []
        self.h_out = []
        for layer in range(self.max_depth):
            self.h_weights.append(
                np.random.randn(self.hidden, self.hidden) /
                np.sqrt(self.hidden))
            self.h_biases.append(
                np.random.randn(1, self.hidden) / np.sqrt(self.hidden))
            self.h_out.append(np.zeros((1, self.hidden)))

        self.final_out = np.zeros(
            (1, self.output))  # Final output for the model
 def __init__(self):
     NeuralNetwork.__init__(self, [2, 1])
     self.truth_table = [[False, False], [False, True], [True, False],
                         [True, True]]
     self.target_val = [x[0] or x[1] for x in self.truth_table]
 def __init__(self):
     NeuralNetwork.__init__(self, [1, 1])
     self.truth_table = [False, True]
     self.target_val = [not x for x in self.truth_table]
 def __init__(self, input, output, output_act, eval_metric):
     NeuralNetwork.__init__(self, input, output, output_act, eval_metric)
Пример #9
0
 def __init__(self, layers=[]):
     NeuralNetwork.__init__(self, layers)
     for i in range(self.number_hidden_layers):
         self.activation_function[i] = self.ACTIVATION_FUNCTION_SIGMOID
     self.output_activation_function = self.ACTIVATION_FUNCTION_SIGMOID
     self.loss_function = self.LOSS_FUNCTION_MSE
Пример #10
0
 def __init__(self):
     NeuralNetwork.__init__(self)