コード例 #1
0
def main():
    load_from_file = True
    # number of input, hidden and output nodes
    input_nodes = 784
    hidden_nodes = 200
    output_nodes = 10

    # learning rate
    learning_rate = 0.1
    epochs = 5
    datasets = 60000

    n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

    if not load_from_file:
        train(n, output_nodes, datasets, epochs)
        wih, who = n.get_weights()
        pandas.DataFrame(wih).to_csv(os.path.join(os.getcwd(), 'wih.csv'),
                                     sep=';',
                                     header=None,
                                     index=False)
        pandas.DataFrame(who).to_csv(os.path.join(os.getcwd(), 'who.csv'),
                                     sep=';',
                                     header=None,
                                     index=False)
    else:
        wih = pandas.read_csv(os.path.join(os.getcwd(), 'wih.csv'),
                              sep=';',
                              header=None)
        who = pandas.read_csv(os.path.join(os.getcwd(), 'who.csv'),
                              sep=';',
                              header=None)
        n.weights(wih.values, who.values)
    test(n)
    show(n)
コード例 #2
0
def test_nn_blanks():
    x, y = load_tictactoe_csv("tic-tac-toeWBlanksTraining.csv")
    nn = NeuralNetwork(x, y, 11, .00066)
    nn.train(100000)
    boards = []
    labels = []
    with open("tic-tac-toeWBlanksValidation.csv") as file:
        for line in file:
            cols = line.strip().split(",")
            board = []
            for s in cols[:-1]:
                if s == "o":
                    board += [0]
                elif s == "x":
                    board += [1]
                else:
                    board += [2]
            label = [0] if cols[-1] == "Owin" else [1]
            labels.append(label)
            boards.append(board)
    lines = np.array(boards)
    outputs = np.array(labels)
    count = 0
    right = 0
    wrong = 0
    for line in lines:
        actual_output = outputs[count]
        calc_output = int(nn.inference(line) + .5)  # rounds to 0 or 1
        if actual_output == calc_output:
            right += 1
        else:
            wrong += 1
        count += 1
    print("Accuracy: " + str(right / (right + wrong)))
コード例 #3
0
def test_weight_matrix_without_hidden_layers():
    weights = np.random.uniform(size=200)
    matrix = weights.reshape(50, 4)
    nn = NeuralNetwork(49, 4, [], range(1))
    nn.weights = weights

    assert np.all(np.equal(list(nn.weight_matrices)[0], matrix))
コード例 #4
0
class NeuralNetworkEvaluator(Evaluator):
    def __init__(self, weights=None):
        self.weights = weights
        super().__init__(weights)
        self.network = NeuralNetwork(input_shape=(1, 22))
        self.genome_len = self.network.num_nodes
        if self.weights is not None:
            self.network.weights = self.network.weights_to_ndarray(self.weights)
        else:
            self.weights = self.network.weights_to_list()
            self.genome = self.weights

    def eval(self, engine):
        self.engine = engine
        state_eval = 0
        if self.engine.game_state is not None:
            if self.engine.game_state == 0:
                return 0
            else:
                return 10000 if self.engine.game_state == 1 else -10000
        state_eval = self.gene_eval()
        return state_eval

    def gene_eval(self):
        feature_vec = []
        for player in [1, 2]:
            player_vec = []
            if self.engine.game_state is None:
                self.player = player
            for feature in self.features:
                player_vec.append(feature())
            feature_vec.extend(player_vec)
        return self.network.feedforward(feature_vec)
コード例 #5
0
def validation(
        nn: NeuralNetwork,
        training_set: Sequence[Pattern],
        validation_set: Sequence[Pattern],
        error_calculator: ErrorCalculator = ErrorCalculator.MSE
) -> ValidationResult:
    """

    :param nn
    :param training_set
    :param validation_set
    :param error_calculator
    :return: it returns the score and the respective epoch of that score.

    It fits the neural network and applies to the error calculator the validation curve.
    """
    old_error = nn.error_calculator

    nn.error_calculator = error_calculator

    nn.fit(training_set, validation_set, training_curve=False)
    learning_curve_validation = nn.validation_curve

    idx, score = error_calculator.choose(learning_curve_validation)

    nn.error_calculator = old_error

    return ValidationResult(
        epoch=idx + 1,
        score_validation=score,
    )
コード例 #6
0
def test_weight_matrix_without_hidden_layers():
    weights = np.random.uniform(size=200)
    matrix = weights.reshape(50, 4)
    nn = NeuralNetwork(49, 4, [], range(1))
    nn.weights = weights

    assert np.all(np.equal(list(nn.weight_matrices)[0], matrix))
コード例 #7
0
ファイル: ga.py プロジェクト: dror1212/Neat-Python-Library
class ga:
    def __init__(self, inputs, hidden, outputs, mutation_rate, mutation_power):
        self.nn = NeuralNetwork(inputs, hidden, outputs)
        self.fitness = 0
        self.mutation_rate = mutation_rate
        self.mutation_power = mutation_power

    def predict(self, inputs_array):
        return self.nn.predict(inputs_array)

    def mutate(self, func = basic_mutate):
        self.nn.weights_inputs_hidden.BigMap(func, self.mutation_rate, self.mutation_power)
        self.nn.weights_hidden_outputs.BigMap(func, self.mutation_rate, self.mutation_power)
        self.nn.bias_hidden.BigMap(func, self.mutation_rate, self.mutation_power)
        self.nn.bias_output.BigMap(func, self.mutation_rate, self.mutation_power)

    def copy(self):
        cop = self.nn.copy()
        g = ga(self.nn.input_nodes, self.nn.hidden_nodes, self.nn.output_nodes, self.mutation_rate, self.mutation_power)
        g.nn = cop

        return g

    def crossOver(self, partner):
        child = self.copy()
        child.nn.weights_inputs_hidden.VariableMap(myCrossOver, partner.nn.weights_inputs_hidden)
        child.nn.weights_hidden_outputs.VariableMap(myCrossOver, partner.nn.weights_hidden_outputs)
        child.nn.bias_hidden.VariableMap(myCrossOver, partner.nn.bias_hidden)
        child.nn.bias_output.VariableMap(myCrossOver, partner.nn.bias_output)

        return child
コード例 #8
0
ファイル: agent.py プロジェクト: rishavb123/FlappyBirdBot
class BirdAgent(Bird):
    def __init__(self, brain=None):
        super().__init__()
        self.brain = NeuralNetwork([7, 4, 4, 1]) if brain == None else brain
        self.fitness = 0
        self.color = BLUE

    def take_action(self, pipes):
        features = self.create_features(pipes)
        # print(self.brain.predict(np.transpose(features))[0][0])
        if(self.brain.predict(np.transpose(features))[0][0] > 0.5):
            self.jump()
        self.fitness += 1

    def create_features(self, pipes):

        for i in range(len(pipes) - 1):
            if pipes[i].x + pipes[i].width > self.x:
                return (pipes[i].x - self.x, pipes[i].uy - self.y, pipes[i].ly - self.y, pipes[i + 1].x - self.x, pipes[i + 1].uy - self.y, pipes[i + 1].ly - self.y, self.v)

    def die(self, score):
        self.fitness += score * 10000

    def mutate(self, mutation_rate=0.01):
        self.brain.mutate(mutation_rate)
        return self
    
    @staticmethod
    def crossover(bird1, bird2):
        return BirdAgent(NeuralNetwork.crossover(bird1.brain, bird2.brain))
コード例 #9
0
 def crossover(self):
     a = self.pick_one()
     b = self.pick_one()
     w, b = a.DNA.crossover(b.DNA)
     childDNA = NeuralNetwork(5, 8, 1, weights=w, bias=b)
     childDNA.mutate(self.mr)
     return childDNA
コード例 #10
0
    def __init__(self, y, u, k, omega, Retau, verbose=False, model=None):
        self.y = np.copy(y)
        ny = np.size(self.y)
        self.verbose = verbose
        self.q = np.zeros(3*ny, dtype=np.float)
        self.Retau = Retau
        self.nu = 1e-4
        self.q[0:3*ny:3] = u[:]
        self.q[1:3*ny:3] = k[:]
        self.q[2:3*ny:3] = omega[:]

        self.writedir = "."
        self.tol = 1e-11
        self.ny = ny
        self.n = self.ny*3
        self.maxiter = 10
        self.dt = 1e6
        self.force_boundary = False
        
        self.neq = 1
        self.rho = 1.0
        self.dp = calc_dp(self.Retau, self.nu)
        self.sigma_w = 0.5
        self.beta_0 = 0.0708
        self.gamma_w = 13.0/25.0
        self.sigma_k = 0.6
        self.beta_s = 0.09
        self.model = model
        if self.model == None or self.model == "linear":
            self.beta = np.ones(ny, dtype=np.float)
        elif self.model == 'nn':
            self.nn = NeuralNetwork(sizes = [1, 3, 1])
            self.beta = np.random.randn(self.nn.n)*1e-2
            self.nn.set_from_vector(self.beta)
コード例 #11
0
    def train(self,
              neuralNetwork: NeuralNetwork,
              dataset,
              acceptable_error=0,
              examples_in_epoch=1000):
        counter = 0
        error = 0
        epoch = 0
        counter_history = []
        error_history = []

        for x, y in dataset:
            x = np.array(x)
            neuralNetwork.run(x)
            error += (y - neuralNetwork.output)**2
            counter += 1
            neuralNetwork.correction(np.array([y]))
            if counter % examples_in_epoch == 0 and counter != 0:
                relative_error = error / counter
                error_history.append(relative_error)
                counter_history.append(epoch)
                epoch += 1
                print("Epoch: ", epoch, " | relative error: ", relative_error)
                if (error <= acceptable_error):
                    print('Learinig finished at epoch: ', epoch)
                    break

                counter = 0
                error = 0
        return counter_history, error_history
コード例 #12
0
def train(XTrain, YTrain, args):
    """
    This function is used for the training phase.
    Parameters
    ----------
    XTrain : numpy matrix
        The matrix containing samples features (not indices) for training.
    YTrain : numpy matrix
        The array containing labels for training.
    args : List
        The list of parameters to set up the NN model.
    Returns
    -------
    NN : NeuralNetwork object
        This should be the trained NN object.
    """
    # 1. Initializes a network object with given args.
    nn = NeuralNetwork(args["NNodes"], args["activate"], args["deltaActivate"],
                       args["task"])

    # 2. Train the model with the function "fit".
    # (hint: use the plotDecisionBoundary function to visualize after training)
    # Parameters TODO: arguments or script
    # Neural Network Execution
    nn.fit(XTrain, YTrain, args["learningRate"], args["epochs"],
           args["regLambda"], args["batchSize"])

    # 3. Return the model.
    return nn
コード例 #13
0
def task1():
    # 二分类
    net = NeuralNetwork([2, 4, 1], activation='line', softmax_=False)

    train_N = 200
    test_N = 100

    x = np.random.normal(loc=0.0, scale=2.0, size=(train_N, 2))

    a = 1.0
    b = 0.15
    f = lambda x: a * x + b

    plt.figure(1)
    plt.plot(x, f(x), 'g', label='真实分割线')

    # 线性分割前面的点
    y = np.zeros([train_N, 1])

    for i in range(train_N):
        if f(x[i, 0]) >= x[i, 1]:
            # 点在直线下方
            y[i] = 1
            plt.plot(x[i, 0], x[i, 1], 'bo', markersize=8, label='类一')
        else:
            # 点在直线上方
            y[i] = -1
            plt.plot(x[i, 0], x[i, 1], 'ro', markersize=8, label='类二')

    plt.legend(labels=['真实分割线'], loc=1)
    plt.title('随机数生成及展示')
    plt.show()

    wb = net.train(x, y, epochs=100, lr=0.001, batchsize=8)

    newx = np.random.normal(loc=0.0, scale=2.0, size=(test_N, 2))
    y_preds = np.array(
        list(map(net.forward, newx, (wb for _ in range(len(newx))))))

    plt.figure(2)
    plt.plot(x, f(x), 'g', label='真实分割线')
    for i in range(test_N):
        if y_preds[i][0] > 0:
            plt.plot(newx[i, 0],
                     newx[i, 1],
                     'b^',
                     markersize=8,
                     label='类一(预测)')
        else:
            plt.plot(newx[i, 0],
                     newx[i, 1],
                     'r^',
                     markersize=8,
                     label='类二(预测)')

    plt.legend(labels=['真实分割线'], loc=1)
    # plt.plot(x, f(x), 'y')
    # plt.legend()
    plt.show()
コード例 #14
0
def test_activation_funcs():
    x_plus_one = lambda x: x + 1
    nn = NeuralNetwork(50, 2, (20, 10, 5), [x_plus_one])
    assert list(nn.activation_funcs) == [x_plus_one] * 4

    x_plus_two = lambda x: x + 2
    nn = NeuralNetwork(50, 2, (20, ), [x_plus_one, x_plus_two])
    assert list(nn.activation_funcs) == [x_plus_one, x_plus_two]
コード例 #15
0
def test():
    a = NeuralNetwork(3, [[2, 'sigmoid'], [1, 'relu']])
    b = NeuralNetwork(3, [[2, 'sigmoid'], [1, 'sigmoid']])

    print(a.layers)
    print(b.layers)

    print(crossover(a, b).layers)
コード例 #16
0
    def __init__(self, xPos, yPos,xPos_range, yPos_range, initEmpty,
                 vertical_fuel_depletion_rate=0.05, horizontal_fuel_depletion_rate=0.05,
                 name='agent', color=(0, 0, 0, 50)):
        # Call the parent's constructor
        super().__init__()

        self.gravity  = 0.0
        self.drag     = 0.0
        self.lift     = -10
        self.push     = 2
        self.maxLim_y_velocity   = 20
        self.minLim_y_velocity   = -20
        self.maxLim_x_velocity   = 4
        self.minLim_x_velocity   = -4
        self.velocity_y = 0
        self.velocity_x = 0
        self.radius   = 20
        self.color = color
        self.current_closest_block = None
        self.fuel = 1.0
        self.failure_meter = 0.0
        self.vertical_fuel_depletion_rate = vertical_fuel_depletion_rate
        self.horizontal_fuel_depletion_rate = horizontal_fuel_depletion_rate

        if xPos_range is not None:
            xPos = np.random.randint(xPos_range[0], xPos_range[1])

        if yPos_range is not None:
            yPos = np.random.randint(yPos_range[0], yPos_range[1])


        self.name = name
        self.image = pygame.Surface([self.radius, self.radius], pygame.SRCALPHA)
        self.image.fill(self.color)
        self.rect = self.image.get_rect()
        self.rect.x = xPos
        self.rect.y = yPos
        self.previous_xPos = self.rect.right
        self.starting_xPos = xPos
        self.starting_yPos = yPos

        self.timeSamplesExperianced       = 1
        self.totalDistanceFromGapOverTime = 0

        self.fitness        = 0
        self.avgDistFromGap = 0


        msLayeruUnits = [12, 7, 2]
        msActFunctions = ["relu", "tanh"]

        self.functional_system = NeuralNetwork(layer_units=msLayeruUnits, activation_func_list=msActFunctions)

        if initEmpty == False:
            self.functional_system.init_layers(init_type="he_normal")

        else:
            self.functional_system.init_layers(init_type="zeros")
コード例 #17
0
ファイル: charc.py プロジェクト: m0bbin/charc
    def do_train(self, command):
        train_parser = argparse.ArgumentParser(prog=self.prog+' train')

        train_parser.add_argument('-d', '--data', type=str, required=True, dest="datasets", nargs="*",
                                        help="path to the dataset file")
        train_parser.add_argument('-i', '--inodes', type=int,
                                        help="Number of input nodes")
        train_parser.add_argument('-o', '--onodes', type=int,
                                        help="Number of output nodes")
        train_parser.add_argument('-n', '--hnodes', type=int, required=True,
                                        help="Number of hidden nodes")
        train_parser.add_argument('-l', '--l_rate', type=float, default=0.1,
                                        help="Learning rate")
        train_parser.add_argument('-e', '--epoch', type=int, default=5,
                                        help="Numbers of iteration data set will be used in training")
        train_parser.add_argument('--out', type=str, default="out")

        args = train_parser.parse_args(command)

        self.print_("Creating Network object..", c="magenta", c_attrs=["bold"], trailing=True)
        sys.stdout.flush()
        temp_ds = Dataset.load(args.datasets[0])

        n = NeuralNetwork(temp_ds.input_nodes, args.hnodes, temp_ds.output_nodes, args.l_rate)
        self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False)

        #self.print_("Converting Images to arrays...", c="magenta", c_attrs=["bold"])
        #data = []
        #dataset = Dataset(n.input_nodes, n.output_nodes)
        #for i, csv in enumerate(args.fn):
        #    dataset.input_csv(csv, training=True)
        #    sys.stdout.flush()
        #    self.print_("\r\t({}/{})".format(i+1, len(args.fn)), trailing=True, use_prog=False)
        #self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False)



        self.print_("Training Network...", c="magenta", c_attrs=["bold"])
        errors = 0
        for dataset_fn in args.datasets:
            dataset = Dataset.load(dataset_fn)
            for j, record in enumerate(dataset.records):
                #for i in xrange(0, len(record.inputs)):

                percent = str((float(j+1)/len(dataset.records))*100)[:5]
                data = n.train(record, args.epoch)

                sys.stdout.flush()
                self.print_("\r\t{}% Error: {}".format(percent, data.output_errors[len(data.output_errors)-1][0]),  c_attrs=["bold"], use_prog=False, trailing=True)

            self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False)


        self.print_("Saving Network ({}.nn.pkl)...".format(args.out), c="magenta", c_attrs=["bold"], trailing=True)
        n.save(args.out)
        self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False)
        self.print_("ERRORS: {}".format(errors))
コード例 #18
0
def task3():
    train_N = 100
    test_N = 100

    x1 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [-10, 10]
    x2 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [10, 10]
    x3 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [-10, -10]
    x4 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [10, -10]

    y1 = np.array([[1., 0., 0., 0.] for _ in range(train_N)])
    y2 = np.array([[0., 1., 0., 0.] for _ in range(train_N)])
    y3 = np.array([[0., 0., 1., 0.] for _ in range(train_N)])
    y4 = np.array([[0., 0., 0., 1.] for _ in range(train_N)])

    plt.plot(x1[:, 0], x1[:, 1], 'ro')
    plt.plot(x2[:, 0], x2[:, 1], 'yo')
    plt.plot(x3[:, 0], x3[:, 1], 'bo')
    plt.plot(x4[:, 0], x4[:, 1], 'go')
    plt.show()

    x = np.vstack((x1, x2, x3, x4))
    y = np.vstack((y1, y2, y3, y4))

    net = NeuralNetwork([2, 4, 4], activation='relu', softmax_=True)

    wb = net.train(x,
                   y,
                   loss='cross_entropy',
                   epochs=200,
                   lr=0.01,
                   batchsize=2)
    # print("over")
    newx1 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [-10, 10]
    newx2 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [10, 10]
    newx3 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [-10, -10]
    newx4 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [10, -10]

    newx = np.vstack((newx1, newx2, newx3, newx4))

    y_preds = np.array(
        list(map(net.forward, newx, (wb for _ in range(len(newx))))))
    # print(y_preds.shape)
    # y_preds = np.array([softmax(a) for a in np.squeeze(y_preds)])
    print(y_preds)
    # print(y_preds)

    sty = ['r^', 'y^', 'b^', 'g^']

    plt.figure(2)

    for i in range(test_N):
        plt.plot(newx[i, 0],
                 newx[i, 1],
                 sty[int(np.argmax(y_preds[i]).max())],
                 markersize=8,
                 label='类一(预测)')
    plt.show()
コード例 #19
0
    def createNN(self):
        intputCount = self.size * self.size
        outputCount = self.size * self.size

        self.neural_network = NeuralNetwork(
            intputCount, intputCount * 6, outputCount, [
                intputCount * 5, intputCount * 4, intputCount * 3,
                intputCount * 2
            ])
コード例 #20
0
ファイル: nn_test.py プロジェクト: mattyw/ml-playground
 def test_simple(self):
     nn = NeuralNetwork(3, 3, 3, 0.3)
     nn.weights_input_hidden = np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5
     ]], ndmin=2).T
     nn.weights_hidden_output = np.array([[0.5, 0.6, 0.5], [0.5, 0.6, 0.5], [0.5, 0.6, 0.5
     ]], ndmin=2).T
     result = nn.query([1.0, 0.5, -1.5])
     expected = np.array([0.6791787, 0.7109495, 0.6791787], ndmin=2).T
     self.assertEqual(str(result), str(expected)) # TODO: Comparing strings is terrible
コード例 #21
0
 def __init__(self, weights=None):
     self.weights = weights
     super().__init__(weights)
     self.network = NeuralNetwork(input_shape=(1, 22))
     self.genome_len = self.network.num_nodes
     if self.weights is not None:
         self.network.weights = self.network.weights_to_ndarray(self.weights)
     else:
         self.weights = self.network.weights_to_list()
         self.genome = self.weights
コード例 #22
0
ファイル: predictor.py プロジェクト: Nagerrack/ISP
 def __init__(self):
     self.logger = logging.getLogger('predictor.Predictor')
     self.nn = NeuralNetwork()
     with open(self.BRANDS, 'r') as f:
         self.brands = np.array(json.load(f))
     with open(self.CATEGORIES, 'r') as f:
         self.categories = np.array(json.load(f))
     with open(self.TOKENIZER, 'r') as f:
         self.tokenizer = tokenizer_from_json(f.read())
     self.nn.load([self.MODEL_ARCH, self.MODEL_WEIGHTS])
コード例 #23
0
ファイル: predictor.py プロジェクト: Nagerrack/ISP
class Predictor():

    MODEL_WEIGHTS = 'prediction-server/model/model.h5'
    MODEL_ARCH = 'prediction-server/model/model.json'
    TOKENIZER = 'prediction-server/model/tokenizer.json'
    BRANDS = 'prediction-server/model/brands.json'
    CATEGORIES = 'prediction-server/model/categories.json'

    MAX_NAME_SEQ = 10
    MAX_ITEM_DESC_SEQ = 75

    def __init__(self):
        self.logger = logging.getLogger('predictor.Predictor')
        self.nn = NeuralNetwork()
        with open(self.BRANDS, 'r') as f:
            self.brands = np.array(json.load(f))
        with open(self.CATEGORIES, 'r') as f:
            self.categories = np.array(json.load(f))
        with open(self.TOKENIZER, 'r') as f:
            self.tokenizer = tokenizer_from_json(f.read())
        self.nn.load([self.MODEL_ARCH, self.MODEL_WEIGHTS])

    def predict(self, data):
        prepared_data = self.prepare_data(data)
        prediction = self.nn.predict(prepared_data)[0]
        price = self.prediction_to_price(prediction)
        return price

    def prediction_to_price(self, prediction):
        scale = 0.26295411
        minimum = -1
        prediction -= minimum
        prediction /= scale
        return np.exp(prediction) + 1

    def prepare_data(self, data):
        name = pad_sequences(
            self.tokenizer.texts_to_sequences([data["name"].lower()]),
            self.MAX_NAME_SEQ)
        description = pad_sequences(
            self.tokenizer.texts_to_sequences([data["description"].lower()]),
            self.MAX_ITEM_DESC_SEQ)
        brand_name = np.where(self.brands == data["brandName"])[0]
        category = np.where(self.categories == data["category"])[0]
        item_condition = [data["itemCondition"]]
        shipping = [data["shipping"]]
        return {
            'name': name,
            'item_desc': description,
            'brand_name': np.array(brand_name),
            'category_name': np.array(category),
            'item_condition': np.array(item_condition),
            'num_vars': np.array(shipping)
        }
コード例 #24
0
def test_weight_matrix_with_hidden_layers():
    weights_a = np.random.uniform(0, 1, 55)
    weights_b = np.random.uniform(0, 1, 12)
    matrix_a = weights_a.reshape((11, 5))
    matrix_b = weights_b.reshape((6, 2))

    nn = NeuralNetwork(10, 2, (5, ), range(2))
    nn.weights = list(it.chain(weights_a, weights_b))

    for m1, m2 in zip(nn.weight_matrices, [matrix_a, matrix_b]):
        assert np.all(np.equal(m1, m2))
コード例 #25
0
def test_weight_matrix_with_hidden_layers():
    weights_a = np.random.uniform(0, 1, 55)
    weights_b = np.random.uniform(0, 1, 12)
    matrix_a = weights_a.reshape((11, 5))
    matrix_b = weights_b.reshape((6, 2))

    nn = NeuralNetwork(10, 2, (5, ), range(2))
    nn.weights = list(it.chain(weights_a, weights_b))

    for m1, m2 in zip(nn.weight_matrices, [matrix_a, matrix_b]):
        assert np.all(np.equal(m1, m2))
コード例 #26
0
ファイル: nn_test_01.py プロジェクト: lansiz/eqpt
def seek_fp(x):
    nn = NeuralNetwork(connection_matrix, transmission_history_len=10**4)
    nn.set_strengthen_functions(strengthen_functions.__dict__['PF' + str(pf)])
    nn.initialize_synapses_strength(x, .1)
    nn.strengthen_rate = delta
    strength_stats = []
    for _ in range(200000):
        neurons_stimulated = set(np.where(neurons_stimulated_probs > np.random.rand(N))[0])
        nn.propagate_once(neurons_stimulated, transform_func)
        strength_stats.append(nn.stats()['strength'])
    return strength_stats
コード例 #27
0
def run_training(set, sep, args, num_classes):
    exist_tr = os.path.isfile("training_images/" + set + "/" + set + sep +
                              "train.pkl")
    exist_test = os.path.isfile("training_images/" + set + "/" + set + sep +
                                "test.pkl")
    dataset_train = None
    dataset_test = None
    if exist_tr and exist_test:
        dataset_train = load_from_pickle(set, sep + "train")
        dataset_test = load_from_pickle(set, sep + "test")
    else:
        dataset_train = load_from_csv(set, sep + "train")
        dataset_test = load_from_csv(set, sep + "test")

    img_size = 28
    img_pixels = img_size * img_size

    print("Training neural network...")
    iter_limit = 100
    for i, arg in zip(range(len(args)), args):
        if '-it=' in arg:
            iter_limit = int(arg.split("=")[1])

    nodes = 100
    for i, arg in zip(range(len(args)), args):
        if '-n=' in arg:
            nodes = int(arg.split("=")[1])

    acc_limit = 0.85
    for i, arg in zip(range(len(args)), args):
        if '-a=' in arg:
            acc_limit = float(arg.split("=")[1])
            if acc_limit >= 1.0:
                print("Accuracy threshold is too high. Reset to 0.85...")
                acc_limit = 0.85

    nn = NeuralNetwork(img_pixels, num_classes, nodes)

    params = None
    if '-r' in args:
        print("Resuming from previous training...")
        params = np.load("trained/" + set + "_params.npy")
    acc = 0.0
    iter = 0
    while acc < acc_limit and iter < iter_limit:
        data_train, labels_train = build_dataset(dataset_train, num_classes)
        data_test, labels_test = build_dataset(dataset_test, num_classes)
        params = nn.train(data_train, labels_train, params)
        np.save("trained/" + set + "_params", params)
        acc = nn.test(data_test, labels_test)
        print("Iteration {} completed with {}% accuracy".format(
            iter, round(acc * 100, 4)))
        iter += 1
コード例 #28
0
ファイル: nn_training.py プロジェクト: alexmirov95/RunAwAI
 def __init__(self,
              hidden_size=HIDDEN_NET_SIZE,
              hfunc_type='identity',
              test_function='quadratic'):
     self.nn = NeuralNetwork(LEARNING_CONSTANT)
     self.input_neuron_x = self.nn.add_input(None, 'x-coord')
     self.input_neuron_y = self.nn.add_input(None, 'y-coord')
     self.output_neuron = self.nn.add_output('above f(x)',
                                             function='binary')
     self.hidden_size = hidden_size
     self.hidden_layer = []
     self.hfunc = hfunc_type
     self.test_function = TestFunction(test_function)
コード例 #29
0
def test_ttt_nn(verbose=0):
    x, y = load_tictactoe_csv("tic-tac-toe-1.csv")
    nn = NeuralNetwork(x, y, 4, .1)
    nn.load_4_layer_ttt_network()
    nn.feedforward()
    if verbose > 0:
        print("NN 1 " + str(nn.loss()))
        print("NN output " + str(nn._output))
        print(nn.accuracy_precision())
    assert nn.loss() < .02
コード例 #30
0
ファイル: car.py プロジェクト: sakshamarora1/Road-Fighter-AI
 def __init__(self, x, y, window, brain=None):
     self.x = x
     self.y = y
     self.vel = 6
     self.width = 44
     self.height = 100
     self.window = window
     self.car = CAR
     self.fitness = 0
     self.score = 0
     if brain:
         self.brain = brain.copy()
         self.brain.mutate(mutate)
     else:
         self.brain = NeuralNetwork(4, 8, 2)
コード例 #31
0
        def generate_random(self):
            """ Generate a "seed" network. We create a two input, one output
            network which is the basis for our XOR evolution. We can manipulate
            the magic numbers in the initialization to get different behavior
            out of our network."""

            self.network = NeuralNetwork(2,
                                         1, ['a', 'b'], ['o'],
                                         struct_mut_new_rate=0.2,
                                         struct_mut_con_rate=0.2,
                                         n_struct_mut_rate=0.2)

            self.network.build_network()

            return
コード例 #32
0
ファイル: eda.py プロジェクト: rkaus/np-neuralnet
def create_NN(X, y):	
	''' Initialize neural network and train
		
		Args
		----
		X: 2D features array
		y: 1D target array
	'''
	X_train, X_test, y_train, y_test = train_test_split(X,y)
	feature_count = X_train.shape[1] 
	
	nn = NeuralNetwork(feature_count,13,1,1.0,2500)		## HYPERPARAMETERS
	# nn = NeuralNetwork(feature_count)		
	nn.iterate(X_train, X_test, y_train, y_test)

	return nn
コード例 #33
0
def predict(set, sep, num_classes, args):
    exist_test = os.path.isfile("training_images/" + set + "/" + set + sep +"test.pkl")
    if exist_test:
        dataset = load_from_pickle(set, sep + "test")
    else:
        dataset = load_from_csv(set, sep + "test")
    data, labels = build_dataset(dataset, num_classes)
    nn = NeuralNetwork()
    params = np.load("trained/" + set + "_params.npy")
    if '-a' in args:
        acc = nn.test(data, labels, params)
        print("Testing completed with {}% accuracy".format(round(acc*100,4)))
    else:
        ans = np.argmax(labels[0])
        guess = np.argmax(nn.predict(params, data[0]))
        print("Prediction was number {}, actual number was {}".format(guess, ans))
コード例 #34
0
def k_fold_CV(
        nn: NeuralNetwork,
        dataset: Sequence[Pattern],
        cv: int = 5,
        error_calculator: ErrorCalculator = ErrorCalculator.MSE,
        to_shuffle: bool = False,

        seed: Optional[int] = None,
) -> KFoldCVResult:
    if to_shuffle:
        dataset = shuffle(dataset, seed)

    len_training = int(np.round(len(dataset) * (cv - 1) / cv))
    shift_size = int(np.round(len_training))

    scores: MutableSequence[ValidationResult] = []

    for i in range(cv):
        training_set, validation_set = split_dataset(np.roll(dataset, shift_size * i), size=len_training)
        scores.append(validation(nn.set(), training_set, validation_set, error_calculator=error_calculator))

    scores_1 = list(map(lambda x: x.score_validation, scores))
    epochs = list(map(lambda x: x.epoch, scores))

    score = float(np.mean(scores_1))
    std = float(np.std(scores_1))
    epoch_mean = int(np.mean(epochs))

    return KFoldCVResult(
        score=score,
        std=std,
        epoch_mean=epoch_mean
    )
コード例 #35
0
ファイル: NNbot.py プロジェクト: Gerryflap/FOARnn
class NNtrainer(object):
    def __init__(self, foarConn, playernum):
        self.game = Game(foarConn, playernum)
        self.foarConn = foarConn
        self.nn = NeuralNetwork([5*6,3*4,2*3], 6*7, 2)

    def moveDone(self, index, playerNum):
        self.game.doMove(index, playerNum)

    def moveReq(self):
        maxI = 0
        maxScore = 0
        possibleStates = self.game.getPossibleStates()
        for i in possibleStates:
            score = self.nn.process(possibleStates[i].flatten())[0]
            if maxScore < score:
                maxI = i
                maxScore = score

        self.foarConn.send("MOVE " + maxI)
コード例 #36
0
ファイル: run.py プロジェクト: LucasSimpson/2048_MDP_NN
states, rewards = sm.get_complete_pairs_prepped ()

bm = BatchManager (states, rewards)

print ''
print 'Completed states: %s' % len (states)
print 'Percent of all states: %s' % (1.0 * len (states) / StateManager.dif_states)
print ''






# train neural network
nn = NeuralNetwork ()
spm = SpeciesManager (nn)
nn.train (bm)

states, rewards = sm.get_complete_pairs ()

print states [40]
print rewards [40]

# insert pairs
for i in range (len (states)):
	spm.insert  (states [i], rewards [i])



コード例 #37
0
ファイル: NNbot.py プロジェクト: Gerryflap/FOARnn
 def __init__(self, foarConn, playernum):
     self.game = Game(foarConn, playernum)
     self.foarConn = foarConn
     self.nn = NeuralNetwork([5*6,3*4,2*3], 6*7, 2)
コード例 #38
0
def evolve():
    # TODO : f*****g speed that up
    def select_parent(sortedPop):
        tmp1 = np.random.random()
        tmp2 = 0
        for item in sortedPop:
            tmp2 += item[1]
            if tmp2 >= tmp1:
                #print("selected", item[0])
                return item[0]

    shape = [5, 10, 4]

    popSize = 100
    genNb = 1000
    mutRate = 0.05
    crossRate = 1.0
    elitism = 2

    popList = []

    nnTemplate = NeuralNetwork(shape, 0, 0)
    genesNb = sum([x.size for x in nnTemplate.weights])
    print("\n"
          "############################################\n"
          "# Nb of genes           : {}".format(genesNb) + "\n"
          "# Population size       : {}".format(popSize) + "\n"
          "# Number of generations : {}".format(genNb) + "\n"
          "# Crossover rate        : {0:.0f}%".format(crossRate*100) + "\n"
          "# Mutation rate         : {0:.0f}%".format(mutRate*100) + "\n"
          "# Elitism               : {}".format(elitism) + "\n"
          "############################################\n"
          )

    print("Creating initial population of size {}".format(popSize))

    for i in range(popSize):
        # initial fitness of None
        popList.append([NeuralNetwork(shape, minStartWeight=-3.0, maxStartWeight=3.0), None])
    print("Population created.\n")

    for g in range(genNb):
        # print("Evaluating fitness.")
        popList = getFitness(popList)

        sortedPop = sorted(popList, key=itemgetter(1))

        if (g+1) == genNb:
            return sortedPop[0][0]

        print("Generation", g)
        print("Fitness min : ", sortedPop[0][1])
        print("Fitness max : ", sortedPop[-1][1])

        with open('./bestNN', 'w') as file:
            pickle.dump(sortedPop[-1][0], file)

        with open('./csv', 'a') as file:
            for item in sortedPop:
                file.write(str(item[1]))
                file.write(',')
            file.write('\n')


        # Normalizing fitness
        sumFit = 0
        for item in sortedPop:
            sumFit += item[1]

        for item in sortedPop:
            try:
                item[1] = item[1] / sumFit
            except ZeroDivisionError:
                item[1] = 1/popSize

        sortedPop = list(reversed(sortedPop))
        #print(sortedPop)
        # print("Fitness evaluated.\n")

        # print("Generating next generation.")
        newPop = []

        # Elitism step : copying bests
        for i in range(elitism):
            newPop.append(sortedPop[i])

        # For the rest
        for i in range(popSize-elitism):
            if np.random.random() < crossRate:  # CROSSOVER TIME
                parent1 = select_parent(sortedPop).weights
                parent2 = select_parent(sortedPop).weights

                child = NeuralNetwork(shape)
                for l in range(len(child.weights)):
                    for n in range(len(child.weights[l])):
                        for w in range(len(child.weights[l][n])):
                            if np.random.random() >= 0.5:
                                parent = parent1
                            else:
                                parent = parent2
                            child.weights[l][n][w] = parent[l][n][w]

                            if np.random.random() <= mutRate:  # MUTATION !
                                child.weights[l][n][w] += ((1.0 - (-1.0)) * np.random.random() + (-1.0))
            else:
                # CLONING TIME
                parent = select_parent(sortedPop).weights
                child = NeuralNetwork(shape)
                child.weights = parent

            newPop.append([child, None])
        popList = newPop