Ejemplo n.º 1
0
def prepare_models(x, y):
    return [
        neural_network.NeuralNetwork(x, y, "sigmoid", "relu"),
        neural_network.NeuralNetwork(x, y, "relu", "relu"),
        neural_network.NeuralNetwork(x, y, "sigmoid", "sigmoid"),
        neural_network.NeuralNetwork(x, y, "relu", "sigmoid")
    ]
Ejemplo n.º 2
0
 def __init__(self):
     self.nNumbers   = NN.NeuralNetwork()
     self.nOperators = NN.NeuralNetwork()
     self.nLetters   = NN.NeuralNetwork()
     self.nNumbers.loadFrom(SettingsController.numbersNetworkPath)
     self.nOperators.loadFrom(SettingsController.operatorsNetworkPath)
     self.nLetters.loadFrom(SettingsController.lettersNetworkPath)
def main():
    N = 100 # number of points per class
    num_dims = 2 # dimensionality
    num_classes = 3 # number of classes

    X, y = data_generator.generate_spiral_data(N, num_dims, num_classes, seed=0)

    # Hyperparameters
    hidden_layers_sizes = [100]
    learning_rate = 1e-0
    regularization_strength = 1e-3
    num_epochs = 10000
    activation_function = 'relu'

    neural_net = neural_network.NeuralNetwork(
            num_dims, num_classes, hidden_layers_sizes, activation_function,
            num_epochs, learning_rate, regularization_strength, seed=1)
    neural_net.train(X, y, print_loss=1000)
    # evaluate training set accuracy
    predicted_class = neural_net.predict(X)
    print('Training accuracy: {0}'.format(np.mean(predicted_class == y)))

    # plot the resulting classifier
    neural_net.plot_loss()
    neural_net.plot_classification_surfaces(X, y)
Ejemplo n.º 4
0
    def generate_new_genomes(self) -> None:
        new_pop = []
        for i in range(len(self.population_a)):
            choose = randrange(math.floor(len(self.population_a)*0.2)) # randomly choose from top 20%
            dc = copy.deepcopy(self.population_a[choose])

            r = random()
            if r < self._MUTATION_RATE:
                dc.brain.mutate() #apply mutation to new genome

            #dc.brain.crosover() -> bugged

            #create new genome
            genome = neural_network.NeuralNetwork(6, dc.brain.hidden_neurons,
                                                  8, dc.brain.network_connections,
                                                  dc.brain.network_neurons,
                                                  dc.brain.innovation,
                                                  dc.brain.node_index, 0)

            pos = agent.Position()
            a = agent.Agent(pos, genome)
            new_pop.append(a)

        #initialize population with next generation of genomes
        self.population_a = new_pop
Ejemplo n.º 5
0
    def setUp(self):
        weights, biases, layer_sizes, L, mini_batch = neural_network.load_from_file(
            "test_data.pkl")

        # mini_batch = mini_batch[:1]

        self.neural_network = neural_network.NeuralNetwork(layer_sizes)
        self.neural_network.weights = [None] + [
            np.copy(weight) for weight in weights[1:]
        ]
        self.neural_network.biases = [None] + [
            np.copy(bias) for bias in biases[1:]
        ]
        self.mini_batch = list(mini_batch)

        self.neural_network_2 = neural_network_2.NeuralNetwork(layer_sizes)
        self.neural_network_2.weights = [None] + [
            np.copy(weight) for weight in weights[1:]
        ]
        self.neural_network_2.biases = [None] + [
            bias.reshape((len(bias), 1)) for bias in biases[1:]
        ]
        self.mini_batch_2 = [(input.reshape(
            (len(input), 1)), target.reshape((len(target), 1)))
                             for input, target in mini_batch]

        acs2 = self.neural_network_2.weights[1:]
        acs1 = self.neural_network.weights[1:]
        diff = [a2 - a1 for a2, a1 in zip(acs2, acs1)]
        print "!"
Ejemplo n.º 6
0
    def test_cost_function_on_simple_model(self):
        # The network has two layers, with two input nodes and two output nodes. Both output nodes
        # compute the same function.
        model = nn.NeuralNetwork([
            np.array([[0.0, 0.0], [1.0, 1.0], [1.0, 1.0]]),
        ])

        data = np.array([
            [2.0, 0.0, 0],
            [0.0, -1.0, 1],
        ])

        log, σ = np.log, utils.sigmoid
        expected_cost = np.mean([
            # We always predict 0, so the first example suffers:
            #  - a cost for being underconfident in the first output node.
            #  - a cost for being overconfident in the second output node.
            -log(σ(2)) + -log(1 - σ(2)),

            # We always predict 0, so the second example suffers:
            #  - a cost for being overconfident in the first output node.
            #  - a cost for being underconfident in the second output node.
            -log(1 - σ(-1)) + -log(σ(-1)),
        ])
        cost = nn.J(data, model)

        self.assertAlmostEqual(expected_cost, cost, places=3)
Ejemplo n.º 7
0
def run_neural_network(splits, neural_spec, learning_rate, momentum_rate):
    neural_net = neural_network.NeuralNetwork(neural_spec, learning_rate,
                                              momentum_rate)
    (trainX, trainY, validX, validY, testX, testY) = splits
    iterations, mse = train_neural(neural_net, trainX, trainY, validX, validY)
    acc = test_neural(neural_net, testX, testY)
    return acc, iterations, mse
Ejemplo n.º 8
0
def k_fold(X, E, T, h, f2, eta=1e-3, K=10):
    '''K-fold validation resampling based on neural netowrk'''
    
    MSE_train = 0
    MSE_test = 0
    R2_train = 0
    R2_test = 0
    
    Xmat = np.reshape(X, (K, int(len(X)/K), len(X[0])))
    Emat = np.reshape(E, (K, int(len(X)/K)))
    
    for i in range(K):
        Xnew = np.delete(Xmat, i, 0)
        Enew = np.delete(Emat, i, 0)
        
        X_train = np.reshape(Xnew, (len(Xnew)*len(Enew[0]), len(X[0])))
        E_train = np.reshape(Enew, (len(Xnew)*len(Enew[0])))
        
        obj = nn.NeuralNetwork(X_train, E_train, T, h, eta, f2=f2)
        W = obj.solver()
        E_train_tilde = obj.recall(X_train)
        E_test_tilde = obj.recall(Xmat[i])
        
        MSE_train += MSE(E_train_tilde, E_train)
        MSE_test += MSE(E_test_tilde, Emat[i])
        
        R2_train += R2(E_train_tilde, E_train)
        R2_test += R2(E_test_tilde, Emat[i])

    return MSE_train/K, MSE_test/K, R2_train/K, R2_test/K
Ejemplo n.º 9
0
def query_img():
    input_nodes = 28 * 28
    hidden_nodes = 100
    output_nodes = 10
    learning_rate = 0.2

    n = nn.NeuralNetwork(input_nodes, hidden_nodes, output_nodes,
                         learning_rate)

    n.load("mnist_dataset/w_input_hidden.txt",
           "mnist_dataset/w_hidden_output.txt")

    img_dir = "img/mnist/"

    files = os.listdir(img_dir)
    wrong_list = []
    for img_name in files:
        img_data = dataset.load_img(img_dir + img_name)
        result = n.query(img_data)
        label = int(os.path.splitext(img_name)[0].split("_")[1])
        if result == label:
            print(img_name, "->", result, "-> √")
        else:
            print(img_name, "->", result, "-> x")
            wrong_list.append(img_name)

    print("right rate:",
          str(int((1 - len(wrong_list) / len(files)) * 100)) + "%",
          "wrong list:", wrong_list)
Ejemplo n.º 10
0
def compare_batch_sizes(train_x, train_y, test_x, test_y, n_input, n_hidden, n_output, plot_costs=False):
    # 2. Run network with same settings a (1) except with
    # batch_sizes [1, 5, 10, 20, 100]

    # To store the accuracy, cross entropy, and quadratic cost
    # of a network for each epoch, for each network
    accuracies = []
    q_costs = []
    ce_costs = []
    labels = []

    print("Part 3: training with batch sizes [1, 5, 10, 20, 100]")

    for batch_size in [1, 5, 10, 20, 100]:
        network = nn.NeuralNetwork(sizes=[n_input, n_hidden, n_output], l_rate=3.0,
                                   batch_size=batch_size)
        network.SGD(train_x, train_y, test_x, test_y)

        # Record costs
        accuracies.append(network.accuracy_per_epoch)
        q_costs.append(network.quadratic_cost_per_epoch)
        ce_costs.append(network.cross_entropy_per_epoch)
        labels.append("batch_size=" + str(batch_size))

    if plot_costs:
        plot_data(bs_y=accuracies, bs_labels=labels, cost_type="Accuracies")
        plot_data(bs_y=q_costs, bs_labels=labels, cost_type="Quadratic Cost")
        plot_data(bs_y=ce_costs, bs_labels=labels, cost_type="Cross Entropy Cost")

    return labels, accuracies, q_costs, ce_costs
Ejemplo n.º 11
0
 def __init__(self):
     self.model = neural_network.NeuralNetwork([2, 2, 1])
     tmp = self.model.getLayer(0)
     tmp[0][0], tmp[1][0], tmp[2][0] = -150, 300, -300
     tmp[0][1], tmp[1][1], tmp[2][1] = -150, -300, 300
     tmp = self.model.getLayer(1)
     tmp[0][0], tmp[1][0], tmp[2][0] = -100, 100, 100
    def __create_models(self, offspring :list):
        models = []
        for i in range(0, len(offspring)):
            new_model = nn.NeuralNetwork().create_from_weights_list(offspring[i], self.network_shapes)
            models.append(new_model)

        return models
Ejemplo n.º 13
0
 def create_next_generation(self):
     """
     创建一个由精英、杂种、子代组成的后代列表
     :return:
     """
     network_data_list = []
     # 1.选取精英个体直接遗传:把精英个体的数据模型放入列表中
     for i in range(round(config.population * config.elite)):
         network_data_list.append(self.genomes[i].data)
     # 2.创建一部分随机个体:创建一些随机个体的数据模型放入列表中
     for i in range(round(config.population * config.new_bron)):
         network = neural_network.NeuralNetwork(config.network[0],
                                                config.network[1],
                                                config.network[2])
         network_data_list.append(network.getNetwork())
     # 3.选取2个个体进行繁衍:将前一半后后一半的杂交后放入下一组数据中
     while True:
         if len(network_data_list) == config.population:
             break
         father = self.genomes[random.randint(
             0,
             round(config.population / 2) - 1)]
         mother = self.genomes[random.randint(round(config.population / 2),
                                              config.population - 1)]
         child = self.breed(father, mother)
         network_data_list.append(child.data)
     return network_data_list
Ejemplo n.º 14
0
def compute_approximate_gradient_by_finite_difference(data: np.ndarray, net: nn.NeuralNetwork) -> np.ndarray:
  """
  This is a "brute force" way of computing the gradient we check against our more efficient, but
  trickier, implementation.
  """
  ϵ = 0.001

  result = []
  for l, Θ in enumerate(net.Θs):
    m, n = Θ.shape
    dΘ = np.zeros((m, n))
    for i in range(m):
      for j in range(n):
        # We want to compute d/dΘ_ij numerically. What we do is vary Θ_ij slightly, compute the
        # cost before and after, and look at the difference between them.

        # In math, we are doing d/dΘ_ij = (J(..., Θ_ij + ϵ, ...) - J(..., Θ_ij, ...)) / ϵ.

        # Our cost function doesn't take a list of theta matrices directly, so we construct a
        # plus_epsilon model in order to calcuate the above. We do this
        # with a lot of copying. Happily this is just a test method, so hopefully the inefficiency
        # won't bite us too badly.

        Θs_copy = copy.deepcopy(net.Θs)
        Θs_copy[l][i][j] += ϵ
        net_plus_epsilon = nn.NeuralNetwork(Θs_copy)

        dΘ[i][j] = (nn.J(data, net_plus_epsilon) - nn.J(data, net)) / ϵ

    result.append(dΘ)

  return np.array(result)
 def test_xor(self):
     input = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
     output = np.array([[0.], [1.], [1.], [0.]])
     net = nn.NeuralNetwork(10000, 1, 2, 1, 1, input, output, [], [])
     net.train()
     decimal = 1
     np.testing.assert_array_almost_equal(
         net.forward_pass(input)[-1], output, decimal)
Ejemplo n.º 16
0
 def __init__(self, row, cols):
     super(Grid, self).__init__()
     self.row = row
     self.cols = cols
     self.grid = self.makeGrid()
     self.brain = nn.NeuralNetwork([2, 5, 1], 0.1)
     self.data_x = [[0, 0], [1, 1], [1, 0], [0, 1]]
     self.data_y = [[0], [0], [1], [1]]
Ejemplo n.º 17
0
def test_nn():
    # 设置初始化参数,采用的是mnist数据集,为28*28的手写数字图像,隐含层100,输出层10代表0~9的数字,学习率初始设为0.2
    input_nodes = 28 * 28
    hidden_nodes = 100
    output_nodes = 10
    learning_rate = 0.2

    n = nn.NeuralNetwork(input_nodes, hidden_nodes, output_nodes,
                         learning_rate)

    # 第一步:开始训练
    print("start to train")
    train = True
    if train is True:
        # 训练方法1:用数据训练,采用较小的训练数据集
        training_data_list = dataset.get_data_list(
            "mnist_dataset/mnist_train.csv")
        count = 0  # 用于打印进度
        size = len(training_data_list)  # 用于打印进度
        for index, record in enumerate(training_data_list):
            label, inputs = dataset.get_scaled_data(record)
            targets = numpy.zeros(output_nodes) + 0.01
            targets[label] = 0.99
            n.train(inputs, targets)
            # 打印进度
            print_process(index, size)

        # 将最终的权值矩阵保存
        numpy.savetxt("w_input_hidden.txt", n.w_input_hidden)
        numpy.savetxt("w_hidden_output.txt", n.w_hidden_output)
    else:
        print("load data done")
        # 训练方法2:直接导入训练的结果(适用于已经有训练结果,即权值矩阵)
        n.load("mnist_dataset/w_input_hidden.txt",
               "mnist_dataset/w_hidden_output.txt")

    # 第二步:开始测试训练后的神经网络
    print("start to test")
    test_data_list = dataset.get_data_list("mnist_dataset/mnist_test.csv")
    scorecard = []  # 记分牌,保存每个测试数据的测试结果
    right = 0  # 正确总数
    size = len(test_data_list)  # 用于打印进度
    for index, record in enumerate(test_data_list):
        label, inputs = dataset.get_scaled_data(record)
        result = n.query(inputs)
        # 对比神经网络预测结果和标签
        if label == result:
            scorecard.append(1)
            right += 1
        else:
            scorecard.append(0)
        # 打印进度
        print_process(index, size)

    # 保存记分牌
    numpy.savetxt("scorecard.txt", scorecard)
    # 打印正确率
    print("right rate=", right / len(test_data_list) * 100, "%")
Ejemplo n.º 18
0
    def recombine(self):
        """Recombine the passed subset of the population."""
        children = []
        child_number = 0
        num_parents = len(self.population)
        for _ in range(self.population_size):
            first, second = np.random.choice(num_parents, 2, replace=False)
            left_parent = self.population[first]
            right_parent = self.population[second]
            l_name = left_parent.name
            r_name = right_parent.name
            left = left_parent.get_weights()
            right = right_parent.get_weights()
            child = []
            for matrix in range(4):  # hardcoding 1 hidden layer
                # dealing with weight matrix
                w_l = left[matrix].T
                w_r = right[matrix].T
                height = w_l.shape[0]
                assert w_l.shape == w_r.shape
                # how many rows come from left
                split = int(np.random.uniform(0, height))
                # randomly select rows
                indices = np.random.choice(height, split, replace=False)
                child_matrix = []
                for row in range(height):
                    if row in indices:
                        child_matrix.append(w_l[row])
                    else:
                        child_matrix.append(w_r[row])
                child_matrix = np.array(child_matrix).T
                child.append(child_matrix)
            name = l_name + r_name + str(child_number)
            children.append((child, name, l_name, r_name))
            child_number += 1

        new_population = []
        for weights, name, l_name, r_name in children:
            if l_name == r_name:
                raise ValueError("No Cloning!")
            new_population.append(
                neural_network.NeuralNetwork(
                    self.input_size,
                    self.hidden_layer_size,
                    self.output_size,
                    self.learning_rate,
                    weights=weights,
                    epochs=self.epochs,
                    name=name,
                    verbose=self.verbose,
                ))
            self.graph.add_node(name)
            self.graph.add_edges_from([(l_name, name), (r_name, name)])
        self.population = new_population
        self.generation += 1
        self.print_graph(False)
Ejemplo n.º 19
0
def chart_job(s, i, j):
    learning_data, testing_data = load_wine(test_count=20)
    net = nn.NeuralNetwork(0.01, 100, [*s], 1.04, 1.05, 0.7,
                           0.020)  # int((s[0]*s[1])*5)
    net.feed_training_data(*learning_data)
    net.feed_test_data(*testing_data)
    net.start_learning()
    prediction, cost = net.test(*testing_data)

    return (i, j, cost, s)
Ejemplo n.º 20
0
 def init_networks(self, population_size, hidden, innovations):
     for i in range(population_size):
         genome = neural_network.NeuralNetwork(6, hidden, 8, [], [], innovations, 0, 0)
         genome.construct()
         pos = agent.Position()
         a = agent.Agent(pos, genome)
         if not hidden:
             self.population_a.append(a)
         else:
             self.population_b.append(a)
Ejemplo n.º 21
0
    def set_params(self, **parameters):
        for parameter, value in parameters.items():
            setattr(self, parameter, value)

        self.NN = neural_network.NeuralNetwork(self.input_size,
                                               self.output_size,
                                               self.learning_rate,
                                               self.node_per_layer,
                                               self.layer_count, self.seed)
        return self
Ejemplo n.º 22
0
def main():
    print("Neural Network")

    nn = neural_network.NeuralNetwork([3, 5, 2])

    data = MNIST.MNIST("Dataset/")

    X = np.array([0.5, 1, 0.25]).T
    out = nn.predict(X)
    print(out)
Ejemplo n.º 23
0
    def __init__(self):
        self.key_state = 0
        self.x_val, self.y_val = None, None
        self.window = None
        self.drawing_area = None

        self.process = process.Process()
        self.neural_net = neural_network.NeuralNetwork()

        self.character_option = None
        self.initWindow()
def main():
    # Load mnist dataset
    mnist = np.load('mnist_dataset.npz')
    # Create a three-layer network
    # The first layer (input layer) contains 784 neurons
    # The second layer contains 200 neurons
    # The third layer (output layer) contains 10 neurons
    nn = neural_network.NeuralNetwork([784, 200, 10])
    nn.train(mnist["train_images"], mnist["train_labels"], 10)
    accuracy = nn.evaluate(mnist["test_images"], mnist["test_labels"])
    print(accuracy)
Ejemplo n.º 25
0
 def __init__(self, ai):
     self.birds = []
     self.ai = ai
     network_data_list = self.ai.manager.create_generation()
     for network_data in network_data_list:
         network = neural_network.NeuralNetwork(config.network[0],
                                                config.network[1],
                                                config.network[2])
         network.setNetwork(network_data)
         bird = Bird(network)
         self.birds.append(bird)
Ejemplo n.º 26
0
def main():
    print("Running test data")
    nn = neural_network.NeuralNetwork([2, 3, 1])

    # XOR data
    X = [[1, 1], [1, 0], [0, 1], [0, 0]]
    y = [0, 1, 1, 0]

    nn.train(X, y)

    for i in range(len(X)):
        print(X[i], y[i], nn.predict(X[i]))
Ejemplo n.º 27
0
def make_nn_grid(data):
    estimator = neural_network.NeuralNetwork()
    param_grid = {
        "input_size": [calculate_input_size(data)],
        "hidden_layer_size": HIDDEN_LAYER_SIZES,
        "output_size": OUTPUT_SIZE,
        "learning_rate": LEARNING_RATES,
        "epochs": [10],
        "verbose": [0],  # ****** REMEMBER TO TOGGLE VERBOSITY ******
    }
    print("\n\n***** Training Neural Networks *****\n\n")
    return cross_validate_on_data(estimator, param_grid, data)
Ejemplo n.º 28
0
 def __first_generation(self):
     '''
     创建第一个世代
     :return:世代中所有个体的神经网络数据
     '''
     network_data_list = []
     for i in range(config.population):
         network = neural_network.NeuralNetwork(config.network[0],
                                                config.network[1],
                                                config.network[2])
         network_data_list.append(network.getNetwork())
     return network_data_list
Ejemplo n.º 29
0
 def __first_generation(self):
     """
     创建第一个世代:随机创建population个神经模型,将他们添加到列表中
     :return:世代中所有个体的神经网络数据
     """
     network_data_list = []
     for i in range(config.population):
         network = neural_network.NeuralNetwork(config.network[0],
                                                config.network[1],
                                                config.network[2])
         network_data_list.append(network.getNetwork())
     return network_data_list
Ejemplo n.º 30
0
 def __init__(self,
              paddingSize=1,
              step=1,
              filterSize=3,
              numberOfFilters=32,
              inputSize=28,
              featureMapSize=28,
              activationFun=helpers.relu,
              activationDeriv=helpers.reluDeriv,
              poolingWindowSize=2,
              hiddenLayerSize=128,
              weightMin=-0.001,
              weightMax=0.001,
              weightInitializer=helpers.heInitializeFilters):
     self.weightInitializer = weightInitializer
     self.filterTensor = self.initializeFilterTensor(
         filterSize, numberOfFilters)
     self.featureMapTensor = np.empty(
         (numberOfFilters, featureMapSize, featureMapSize))
     self.featureMapNetTensor = np.empty(self.featureMapTensor.shape)
     self.poolingFeatureMapTensor = np.empty(
         (numberOfFilters, int(featureMapSize / poolingWindowSize),
          int(featureMapSize / poolingWindowSize)))
     self.poolingIndicesTensorX = np.empty(
         self.poolingFeatureMapTensor.shape)
     self.poolingIndicesTensorY = np.empty(
         self.poolingFeatureMapTensor.shape)
     self.poolingIndicesTensorZ = np.empty(
         self.poolingFeatureMapTensor.shape)
     self.mlpNetwork = mlp.NeuralNetwork(
         hiddenLayerSize,
         numberOfFilters * int(featureMapSize / poolingWindowSize) *
         int(featureMapSize / poolingWindowSize), 10, -0.1, 0.1, -0.1, 0.1,
         mlp_helpers.relu)
     self.layer3Weights = helpers.initializeWeightsUniform(
         numberOfFilters * int(featureMapSize / poolingWindowSize) *
         int(featureMapSize / poolingWindowSize), weightMin, weightMax)
     self.poolingErrorsTensor = np.empty(self.featureMapTensor.shape)
     self.convolutionErrorsTensor = np.empty(self.featureMapTensor.shape)
     self.convolutionErrorsFilters = np.zeros(self.filterTensor.shape)
     self.layer3Net = []
     self.layer3Activation = []
     self.layer3Errors = []
     self.paddingSize = paddingSize
     self.step = step
     self.filterSize = filterSize
     self.numberOfFilters = numberOfFilters
     self.inputSize = inputSize
     self.featureMapSize = featureMapSize
     self.activationFun = activationFun
     self.activationDeriv = activationDeriv
     self.poolingWindowSize = poolingWindowSize