Пример #1
0
def load():
    args = argparse.ArgumentParser()
    args.add_argument('-modelName')
    args.add_argument('-data')
    args.add_argument('-target')
    args = args.parse_args()

    if args.modelName:
        os.mkdir(args.modelName)

    model_filename = raw_input('Model File: ')
    model_file = open(model_filename, 'r')
    layer_arr = model_file.readlines()[:-2]
    layer_arr = [x[:-1] for x in layer_arr]
    model_file.close()

    layer_arr = layer_arr[1:]

    model = Model()

    for i in layer_arr:
        s = i.split()
        if s[0] == 'linear':
            layer = Linear(int(s[1]), int(s[2]))
            model.addLayer(layer)
        elif s[0] == 'relu':
            layer = ReLU()
            model.addLayer(layer)
    print 'Model loaded successfully from file:', model_filename
    return model
Пример #2
0
    def test_forward(self):
        expected_tensor = np.zeros([self.batch_size, self.input_size])
        expected_tensor[self.half_batch_size:self.batch_size, :] = 1

        layer = ReLU.ReLU()
        output_tensor = layer.forward(self.input_tensor)
        self.assertEqual(np.sum(np.power(output_tensor - expected_tensor, 2)), 0)
Пример #3
0
 def test_gradient(self):
     input_tensor = np.abs(np.random.random((self.batch_size, self.input_size)))
     input_tensor *= 2.
     input_tensor -= 1.
     layers = list()
     layers.append(ReLU.ReLU())
     layers.append(L2Loss())
     difference = Helpers.gradient_check(layers, input_tensor, self.label_tensor)
     self.assertLessEqual(np.sum(difference), 1e-5)
    def test_data_access(self):
        net = NeuralNetwork.NeuralNetwork(Optimizers.Sgd(1))
        categories = 3
        input_size = 4
        net.data_layer = Helpers.IrisData(50)
        net.loss_layer = Loss.CrossEntropyLoss()
        fcl_1 = FullyConnected.FullyConnected(input_size, categories)
        net.append_layer(fcl_1)
        net.append_layer(ReLU.ReLU())
        fcl_2 = FullyConnected.FullyConnected(categories, categories)
        net.append_layer(fcl_2)
        net.append_layer(SoftMax.SoftMax())

        out = net.forward()
        out2 = net.forward()

        self.assertNotEqual(out, out2)
    def test_iris_data(self):
        net = NeuralNetwork.NeuralNetwork(Optimizers.Sgd(1e-3))
        categories = 3
        input_size = 4
        net.data_layer = Helpers.IrisData(50)
        net.loss_layer = Loss.CrossEntropyLoss()

        fcl_1 = FullyConnected.FullyConnected(input_size, categories)
        net.append_layer(fcl_1)
        net.append_layer(ReLU.ReLU())
        fcl_2 = FullyConnected.FullyConnected(categories, categories)
        net.append_layer(fcl_2)
        net.append_layer(SoftMax.SoftMax())

        net.train(4000)
        plt.figure(
            'Loss function for a Neural Net on the Iris dataset using SGD')
        plt.plot(net.loss, '-x')
        plt.show()

        data, labels = net.data_layer.get_test_set()

        results = net.test(data)
        index_maximum = np.argmax(results, axis=1)
        one_hot_vector = np.zeros_like(results)
        for i in range(one_hot_vector.shape[0]):
            one_hot_vector[i, index_maximum[i]] = 1

        correct = 0.
        wrong = 0.
        for column_results, column_labels in zip(one_hot_vector, labels):
            if column_results[column_labels > 0].all() > 0:
                correct += 1
            else:
                wrong += 1

        accuracy = correct / (correct + wrong)
        print('\nOn the Iris dataset, we achieve an accuracy of: ' +
              str(accuracy * 100) + '%')
        self.assertGreater(accuracy, 0.8)
Пример #6
0
myModel = Model()

# remove occurences of /n in all strings
with open(parser.config) as f:
    arr = f.readlines()
    # print("ARR",arr)
    num_layers = int(arr[0].replace('\n', ''))
    i = 1
    # print("NUM",num_layers)
    while (num_layers > 0):
        arr[i] = arr[i].replace('\n', '')
        v = arr[i].split(' ')
        # print("V0",v[0])
        # print("LAST",v[0][-1])
        if (v[0] == "relu"):
            myModel.addLayer(ReLU())
        elif (v[0] == "linear"):
            # print("YO")
            num_layers -= 1
            inp = int(v[1])
            out = int(v[2])
            myModel.addLayer(Linear(inp, out))
        i += 1
    layer_weights_path = arr[i]
    layer_bias_path = arr[i + 1]
    layer_bias_path = layer_bias_path.replace('\n', '')
    layer_weights_path = layer_weights_path.replace('\n', '')

weights = torchfile.load(layer_weights_path)
# print(weights)
# weights = torch.from_numpy(weights)
 def test_trainable(self):
     layer = ReLU.ReLU()
     self.assertFalse(layer.trainable)