def __init__(self, learning_rate=.1, momentum=0.3, gradient_descent=True):
     self.param = None
     self.learning_rate = learning_rate
     self.momentum = momentum
     self.gradient_descent = gradient_descent
     self.sigmoid = Sigmoid()
     self.log_loss = LogisticLoss()
Example #2
0
 def __init__(self, n_hidden, n_iterations=3000, learning_rate=0.01):
     self.n_hidden = n_hidden
     self.n_iterations = n_iterations
     self.learning_rate = learning_rate
     self.hidden_activation = Sigmoid()
     self.output_activation = Softmax()
     self.loss = CrossEntropy()
Example #3
0
    def __init__(self, activation_function):
        Layer.__init__(self)

        # Instantiate the chosen activation function
        if activation_function is "relu":
            self.activation_function = Relu()
        if activation_function is "sigmoid":
            self.activation_function = Sigmoid()
 def forward(self, inputs):
     self.x = inputs.reshape(1, X_DIM)
     self.y1 = np.matmul(self.x, self.w1) + self.b1
     self.y1 = LeakyReLU(self.y1)
     self.y2 = np.matmul(self.y1, self.w2) + self.b2
     self.y2 = LeakyReLU(self.y2)
     self.y3 = np.matmul(self.y2, self.w3) + self.b3
     self.y = Sigmoid(self.y3)
     return self.y
    def __init__(self, grad_wrt_theta=True):
        sigmoid = Sigmoid()
        self.log_func = sigmoid.function
        self.log_grad = sigmoid.gradient

        if grad_wrt_theta:
            self.gradient = self._grad_wrt_theta
        if not grad_wrt_theta:
            self.gradient = self._grad_wrt_pred
            self.hess = self._hess_wrt_pred
Example #6
0
import random

import numpy as np
from deep_network import DeepNetwork

from activation_functions import Sigmoid, LeakyRelu

# Train the network to give us the XOR on neuron 0 and the OR on neuron 1
training_data = [[[0, 0], [0, 0]], [[0, 1], [1, 1]], [[1, 0], [1, 1]],
                 [[1, 1], [0, 1]]]

# network = DeepNetwork(2, 4, 1, LeakyRelu(), 0.03)
network = DeepNetwork(2, 4, 2, Sigmoid(), 0.5)

for training_session in range(20000):
    training_set = random.choice(training_data)
    inputs = training_set[0]
    target_output = training_set[1]
    outputs = network.feed_forward(inputs)
    network.back_propagate(inputs, outputs, target_output)
    error = np.subtract(outputs, target_output)
    print('error:',
          ['{:.4f}'.format(abs(error[0])), '{:.4f}'.format(abs(error[1]))],
          'target_output', target_output, 'output:', outputs)
Example #7
0
 def __init__(self):
     sigmoid = Sigmoid()
     self.log_func = sigmoid
     self.log_grad = sigmoid.gradient
Example #8
0
 def __init__(self, learning_rate=.1, gradient_descent=True):
     self.param = None
     self.learning_rate = learning_rate
     self.gradient_descent = gradient_descent
     self.sigmoid = Sigmoid()
import random

import numpy as np
from shallow_network import ShallowNetwork

from activation_functions import Sigmoid, LeakyRelu

# Train the network to behave like a binary "AND" function
training_data = [[[0, 0], [0]], [[0, 1], [0]], [[1, 0], [0]], [[1, 1], [1]]]

# network = ShallowNetwork(2, 1, LeakyRelu(), 0.03)
network = ShallowNetwork(2, 1, Sigmoid(), 0.5)

for training_session in range(10000):
    training_set = random.choice(training_data)
    inputs = training_set[0]
    target_output = training_set[1]
    outputs = network.feed_forward(inputs)
    network.back_propagate(inputs, outputs, target_output)
    error = np.subtract(outputs, target_output)
    print('error:', '{:.4f}'.format(abs(error[0])), 'target_output',
          target_output, 'output:', outputs)
Example #10
0
        score_sum = sum(np.array(self.game.gamegrid.matrix).flatten().tolist())
        penalty = self.fitness_penalty
        return score_max + score_sum + penalty


GENERATION_SIZE = 4
GENRATION_COUNT = 2
PRINT_STEPS = True
WEIGHTS_METHOD = 'random'

nn_parameters = {
    'neurons_per_hidden_layer': [17, 17, 17],
    'input_layer_size': 17,
    'output_layer_size': 4,
    'input_af': Log2(),
    'hidden_af': [TanH(), ReLU(), Sigmoid()],
    'output_af': TanH()
}

game_parameters = {
    'manual_input': True,
    'random': False,
    'steps': 0,
    'sleep': 0
}

ga = GeneticAlgorithm(generation_size=GENERATION_SIZE, **nn_parameters)
ga.add_new_generation(weights_method=WEIGHTS_METHOD)
ga.populate_new_generation(ga[0], ga[0], weights_method=WEIGHTS_METHOD)

for k in range(GENRATION_COUNT):