Exemplo n.º 1
0
    def train_single(self, input_vector, target_vector):
        """"
            Forward Propagation
            input:  input_vector  [784], target_vector [10]
            H1:     output_vector [100], weights_in_hidden[100, 784], output_hidden[100]
            output: output_vector2 [10], weight_hidden_output[10, 100], output_network[10]
            loss scalar 2.3

            Backward Propagation
            gradient [10], derived [10], tmp2 [10], hidden_errors [100,10], tmp5 [100,10]
            
        """
        input_vector = np.array(input_vector, ndmin=2).T
        target_vector = np.array(target_vector, ndmin=2).T

        output_vector1 = np.dot(self.weights_in_hidden, input_vector)
        output_hidden = Activation.leakyReLU(output_vector1)

        output_vector2 = np.dot(self.weights_hidden_output, output_hidden)
        output_network = Activation.leakyReLU(output_vector2)

        loss = Cross_Entropy.calc(output_network, target_vector)
        gradient = Cross_Entropy.derived_calc(output_network, target_vector)
        # update the weights:
        derived1 = Derivative.leakyReLU(gradient)
        tmp2 = derived1 * (loss * gradient)
        # calculate hidden errors:
        hidden_errors = np.dot(self.weights_hidden_output.T, loss * derived1)
        # update the weights:
        tmp5 = hidden_errors * Derivative.leakyReLU(output_hidden)
        self.weights_hidden_output += self.learning_rate * np.dot(
            tmp2, output_hidden.T)
        self.weights_in_hidden += self.learning_rate * np.dot(
            tmp5, input_vector.T)
Exemplo n.º 2
0
class Net:
    def __init__(self, method=Method.Sigmoid):
        self.weights = []  # Current weights
        self.old_weights = []  # Last time weights
        self.output = 0.0  # Neuron output
        self.inputted_features = []  # Inputted features
        self.summed_signal = 0.0  # Summed singal (the summation of input)
        self.learning_rate = 0.8  # Learning rate
        self.activition = Activation()  # Activation function

    # 加总信号
    def summarize_inputs(self, features=[]):
        if not features:
            return 0.0
        self.inputted_features = copy.deepcopy(features)
        self.summed_signal = np.dot(self.inputted_features,
                                    self.weights)  # a = X * W
        self.output = self.activition.activate(self.summed_signal)  # b = f(a)
        return self.output

    # 更新权重
    def update_weights(self, error_value=0.0):
        self.old_weights = copy.deepcopy(self.weights)
        for index, old_weight in enumerate(self.old_weights):
            # new weight          = old weight + learning rate * error_value(i) * input
            new_weight = old_weight + self.learning_rate * error_value * self.inputted_features[
                index]
            self.weights[index] = new_weight

    def differential_activition(self):
        return self.activition.differentiate(self.output)
Exemplo n.º 3
0
    def __init__(self, num_neurons, input_shape):
        print(
            'Adding Layer: input_shape: {}, number of neurons: {}, output_shape: {}'
            .format(input_shape, num_neurons, num_neurons))
        # Let's initialize the weights in interval [0,1) for respective synaptic inputs
        self.weights = np.random.uniform(low=0, high=1, size=input_shape)

        # Lets initialize the biases all with value '1' for every neuron in current layer
        self.biases = np.ones(num_neurons)

        # Lets initialize the activation_potentials all with value '0' for every neuron in current layer
        self.activation_potentials = np.zeros(num_neurons)

        # Outputs of this layer
        self.outputs = np.zeros(num_neurons)

        # Local Gradients of all the neurons in current layer
        self.local_gradients = np.zeros(num_neurons)

        # And finally the activation function, for non-linearity of outputs
        self.activation = Activation()

        # Inputs to this layer -> Outputs from previous layer
        self.previous_layers_outputs = []
        print('Added Layer ... ')
Exemplo n.º 4
0
    def back_prop(self,incoming_grad,lr = 0.01):
        """
        :param incoming_grad: should be a 1d vector
        :return:
        """
        if self.activation !="softmax":
            if self.activation == "sigmoid":
                self.local_grad = A.grad_sigmoid(self.sum_of_incoming)
            elif self.activation == "relu":
                self.local_grad = A.grad_relu(self.sum_of_incoming)
            self.local_grad *= incoming_grad #element wise multiplication
        else:
            self.local_grad = incoming_grad

        temp_to_pass_back = [self.local_grad for _ in range(self.weights.shape[0])]
        temp_to_pass_back = np.asarray(temp_to_pass_back)

        bias_grad = self.local_grad
        weight_grad = np.matmul(self.input.reshape((self.input.shape[0],1)),
                                self.local_grad.reshape((1,self.local_grad.shape[0])))

        back_grad = self.weights * temp_to_pass_back
        self.biases =self.biases - lr * bias_grad
        self.weights = self.weights - lr * weight_grad
        ##propogate gradient to previous layer

        return np.sum(back_grad,axis=1)
Exemplo n.º 5
0
  def __init__(self, start=0, end=1, frequency = 50, duty_cycle = 0.4, scaling = 1, non_linearity = -1, shape_="monophasic"):
      self.start = start
      self.end = end
    
      self.lit_data = DataLoader()
            
      self.a = Activation(frequency, duty_cycle, scaling, non_linearity)
      self.a.get_activation_signal(self.lit_data.activation_function(), shape=shape_)

      self.a_sol = Activation(frequency, duty_cycle, scaling, non_linearity)
      self.a_sol.get_activation_signal(self.lit_data.activation_function_soleus(), shape=shape_)
     
      rest_length_soleus = self.soleus_length(23.7*np.pi/180)*1.015
      rest_length_tibialis = self.tibialis_length(-37.4*np.pi/180)*0.9158 # lower is earlier activation
      print(rest_length_soleus)
      print(rest_length_tibialis)
      soleus_f0m = 2600.06
      self.soleus = HillTypeMuscle(soleus_f0m, .1342*rest_length_soleus, .8658*rest_length_soleus)
      self.tibialis = HillTypeMuscle(605.3465, .2206*rest_length_tibialis, .7794*rest_length_tibialis)

      # theta, velocity, initial CE length of soleus, initial CE length of TA
      self.initial_state = np.array([self.lit_data.ankle_angle(self.start)[0]*np.pi/180,
                                     self.lit_data.ankle_velocity(self.start)[0]*np.pi/180,
                                     0.827034,
                                     1.050905])
      print(self.initial_state)    
      self.time = None
      self.x1 = None
      self.x2 = None
      self.x3 = None
      self.x4 = None
Exemplo n.º 6
0
    def train_single(self, input_vector, target_vector):
        """
        input_vector and target_vector can be tuple, 
        list or ndarray
        """

        input_vector = np.array(input_vector, ndmin=2).T
        target_vector = np.array(target_vector, ndmin=2).T

        output_vector1 = np.dot(self.weights_in_hidden, input_vector)
        output_hidden = Activation.reLU(output_vector1)

        output_vector2 = np.dot(self.weights_hidden_output, output_hidden)
        #output_network = Activation.sigmoid(output_vector2)
        output_network = Activation.reLU(output_vector2)

        output_errors = target_vector - output_network
        # update the weights:
        #tmp = output_errors * Derivative.sigmoid(output_network)
        tmp = output_errors * Derivative.reLU(output_network)
        tmp = self.learning_rate * np.dot(tmp, output_hidden.T)
        self.weights_hidden_output += tmp
        # calculate hidden errors:
        hidden_errors = np.dot(self.weights_hidden_output.T, output_errors)
        # ----------------------------------------------------------------------
        # update the weights:
        tmp = hidden_errors * Derivative.reLU(output_hidden)
        # -----------------------------------------------------------------------
        self.weights_in_hidden += self.learning_rate * np.dot(
            tmp, input_vector.T)
Exemplo n.º 7
0
 def __init__(self, method=Method.Sigmoid):
     self.weights = []  # Current weights
     self.old_weights = []  # Last time weights
     self.output = 0.0  # Neuron output
     self.inputted_features = []  # Inputted features
     self.summed_signal = 0.0  # Summed singal (the summation of input)
     self.learning_rate = 0.8  # Learning rate
     self.activition = Activation()  # Activation function
Exemplo n.º 8
0
 def __init__(self):
     self.tag = self.__class__.__name__
     self.samples = []  # 所有的训练样本(特征值)
     self.targets = []  # 范本的目标输出
     self.weights = []  # 权重
     self.bias = 0.0  # 偏权值
     self.learning_rate = 1.0  # 学习速率
     self.max_iteration = 1  # 最大迭代数
     self.convergence = 0.001  # 收敛误差
     self.activation = Activation()
Exemplo n.º 9
0
 def __init__(self, has_recurrent=False):
     self.weights = []  # <number>
     self.recurrent_weights = []  # <number>
     self.bias = 0.0
     self.delta_value = 0.0  # Current delta value will be next delta value.
     self.has_recurrent = has_recurrent  # Has recurrent inputs ? (hidden net has recurrent, but output net not.
     self.activation = Activation(
     )  # 活化函式的 Get, Set 都在这里: net.activation.method.
     # 有另外开 self.activation_method 来方便存取
     self.output = NetOutput()
     self.timesteps = []  # <Timestep Object>
Exemplo n.º 10
0
    def run(self, input_vector):
        # input_vector can be tuple, list or ndarray
        input_vector = np.array(input_vector, ndmin=2).T
        # 1st layer
        output_vector = np.dot(self.weights_in_hidden, input_vector)
        output_vector = Activation.leakyReLU(output_vector)
        # 2nd layer
        output_vector = np.dot(self.weights_hidden_output, output_vector)
        output_vector = Activation.leakyReLU(output_vector)

        return output_vector
Exemplo n.º 11
0
def test_activation_functions():
    # Ensure the correct values are calculated by the member functions

    # We compare against sklearn functions
    from sklearn.neural_network._base import tanh, relu
    from scipy.special import expit as sigmoid

    N = 100

    act = Activation(function='sigmoid')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(sigmoid(x))

    act.set(function='tanh')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(tanh(x))

    act.set(function='relu')
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx(relu(x))

    alpha = 2.5082958
    act.set(function='leakyrelu', alpha=alpha)
    x = np.random.uniform(-10.0, 10.0, size=(N, 1))
    assert act(x) == pytest.approx((x >= 0.0) * x + (x < 0.0) * alpha * x)
Exemplo n.º 12
0
    def compute_gradients(self, test_input, test_output):
        test_input = np.array(test_input)
        test_output = np.array(test_output)
        deltas = list()
        slopes = list()
        error = -(test_output - self.predict(test_input))
        for i, (activation, inputs) in enumerate(zip(reversed(self._activations[:-1]), reversed(self._layer_inputs))):
            if i == 0:
                deltas.append(np.multiply(error, Activation.sigmoid_prime(inputs)))
            else:
                deltas.append(np.dot(deltas[-1], self._layer_weights[-i].T) * Activation.sigmoid_prime(inputs))
            slopes.append(np.multiply(activation.T, deltas[-1]))

        slopes = [slope.ravel() for slope in reversed(slopes)]
        return np.concatenate(slopes)
Exemplo n.º 13
0
    def feed_forward(self, x):  # aka forward propagation

        l_actvtns = x
        for l_ix in range(1, self.l_number):
            l_values = np.dot(l_actvtns, self.l_weights[l_ix]) \
                    + self.l_biases[l_ix]
            l_actvtns = aa.fn(self.actvtn_types[l_ix], l_values)
            self.l_actvtns[l_ix] = l_actvtns
Exemplo n.º 14
0
 def predict(self, input_matrix):
     input_matrix = np.array(input_matrix)
     self._activations = [input_matrix]
     self._layer_inputs = [input_matrix]
     for layer_weight in self._layer_weights:
         self._layer_inputs.append(np.dot(self._activations[-1], layer_weight))
         self._activations.append(Activation.sigmoid(self._layer_inputs[-1]))
     return self._activations[-1]
Exemplo n.º 15
0
def test_activation_init():
    # Ensure the setup is handled correctly when initializing an instance
    # of the activation class
    act = Activation()

    # Default values
    assert act.function == act._sigmoid
    assert act.alpha == pytest.approx(0.01)

    # String to correct function conversion
    act = Activation(function='tanh')
    assert act.function == act._tanh

    act = Activation(function='relu')
    assert act.function == act._relu

    act = Activation(function='leakyrelu')
    assert act.function == act._leakyrelu

    act = Activation(function='sigmoid')
    assert act.function == act._sigmoid

    # Check wrong string error is handled correctly
    caught = False
    try:
        act = Activation(function='this_is_not_an_allowed_string')
    except ValueError as e:
        caught = True
    assert caught == True

    # Check alpha value specification is handled correctly
    alpha = 0.867
    act = Activation(function='relu', alpha=alpha)
    assert act.alpha == pytest.approx(alpha)
Exemplo n.º 16
0
    def prediction(self, x):

        l_actvtns = x
        for l_ix in range(1, self.l_number):
            l_values = np.dot(l_actvtns, self.l_weights[l_ix]) \
                    + self.l_biases[l_ix]
            l_actvtns = aa.fn(self.actvtn_types[l_ix], l_values)

        return l_actvtns
def predict(w, b, X):
    m = X.shape[1]
    Y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)
    A = Activation.sigmoid(np.dot(w.T, X) + b)
    for i in range(A.shape[1]):
        Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0

    return Y_prediction
Exemplo n.º 18
0
 def predict(self, input_matrix):
     input_matrix = np.array(input_matrix)
     self._activations = [input_matrix]
     self._layer_inputs = [input_matrix]
     for layer_weight in self._layer_weights:
         self._layer_inputs.append(
             np.dot(self._activations[-1], layer_weight))
         self._activations.append(Activation.sigmoid(
             self._layer_inputs[-1]))
     return self._activations[-1]
Exemplo n.º 19
0
 def propagate(w, b, x, y):
     m = x.shape[1]
     A = Activation.sigmoid(np.dot(w.T, x) + b)
     cost = (-1 / m) * np.sum(y * np.log(A) + (1 - y) * (np.log(1 - A)))
     dz = A - y
     dw = (1 / m) * np.dot(x, dz.T)
     db = (1 / m) * np.sum(dz)
     cost = np.squeeze(cost)
     grads = {"dw": dw, "db": db}
     return grads, cost
Exemplo n.º 20
0
    def compute_gradients(self, test_input, test_output):
        test_input = np.array(test_input)
        test_output = np.array(test_output)
        deltas = list()
        slopes = list()
        error = -(test_output - self.predict(test_input))
        for i, (activation, inputs) in enumerate(
                zip(reversed(self._activations[:-1]),
                    reversed(self._layer_inputs))):
            if i == 0:
                deltas.append(
                    np.multiply(error, Activation.sigmoid_prime(inputs)))
            else:
                deltas.append(
                    np.dot(deltas[-1], self._layer_weights[-i].T) *
                    Activation.sigmoid_prime(inputs))
            slopes.append(np.multiply(activation.T, deltas[-1]))

        slopes = [slope.ravel() for slope in reversed(slopes)]
        return np.concatenate(slopes)
Exemplo n.º 21
0
 def __init__(self) -> None:
     super().__init__()
     # hyper params
     self.alpha: float = 1
     self.lambda_: float = 0
     self.c: float = 0
     self.gamma: float = 0
     # model params
     self.layers = []
     # engine params
     self.act: Activation() = None
     self.reg: Regularization() = None
     self.opt: Optimizer() = None
Exemplo n.º 22
0
    def __init__(self,
                 architecture=[784, 100, 10],
                 activation='sigmoid',
                 learning_rate=0.1,
                 momentum=0.5,
                 weight_decay=1e-4,
                 dropout=0.5,
                 early_stopping=True,
                 seed=99):
        """
        Neural network model initializer.
        """

        # Attributes
        self.architecture = architecture
        self.activation = activation
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.dropout = dropout
        self.early_stopping = early_stopping
        self.seed = seed

        # Turn `activation` and `learning_rate` to class instances
        if not isinstance(self.activation, Activation):
            self.activation = Activation(self.activation)
        if not isinstance(self.learning_rate, LearningRate):
            self.learning_rate = LearningRate(self.learning_rate)

        # Initialize a list of layers
        self.layers = []
        for i, (n_in,
                n_out) in enumerate(zip(architecture[:-2],
                                        architecture[1:-1])):
            l = HiddenLayer('layer{}'.format(i), n_in, n_out, self.activation,
                            self.learning_rate, self.momentum,
                            self.weight_decay, self.dropout, self.seed + i)
            self.layers.append(l)
        # Output layer
        n_in, n_out = architecture[-2], architecture[-1]
        l = OutputLayer('output_layer', n_in, n_out, self.learning_rate,
                        self.momentum, self.weight_decay, self.dropout,
                        self.seed + i + 1)
        self.layers.append(l)

        # Training updates
        self.epoch = 0
        self.training_error = []
        self.validation_error = []
        self.training_loss = []
        self.validation_loss = []
Exemplo n.º 23
0
    def store_grad(self,incoming_grad,lr = 0.01):
        """
        :param incoming_grad: should be a 1d vector
        :return:
        """
        if self.activation !="softmax":
            if self.activation == "sigmoid":
                self.local_grad = A.grad_sigmoid(self.sum_of_incoming)
            elif self.activation == "relu":
                self.local_grad = A.grad_relu(self.sum_of_incoming)
            elif self.activation == "swish":
                self.local_grad = A.grad_swish(self.sum_of_incoming)
            elif self.activation == "tanh":
                self.local_grad = A.grad_tanh(self.sum_of_incoming)
            # print("local grad is:",self.local_grad)
            self.local_grad *= incoming_grad #element wise multiplication
            # print("incoming grad is:",self.local_grad)
        else:
            self.local_grad = incoming_grad
            # print("for softmax incoming grad is:",self.local_grad)
    
        
        temp_to_pass_back = [self.local_grad for _ in range(self.weights.shape[0])]
        temp_to_pass_back = np.asarray(temp_to_pass_back)

        temp = np.matmul(self.input.reshape((self.input.shape[0],1)),
                                self.local_grad.reshape((1,self.local_grad.shape[0])))
        self.bias_grad += self.local_grad
        # print("temp is ",temp)
        # print("weight grad is ",self.weight_grad)
        self.weight_grad += temp
        # print("weight grad is ",self.weight_grad)
        ##propogate gradient to previous layer

        back_grad = self.weights * temp_to_pass_back
        # if np.linalg.norm(back_grad)>1.0:
        #     print("grad explosion , inside store_grad function of layer")
        return np.sum(back_grad,axis=1)
Exemplo n.º 24
0
    def train_single(self, input_vector, target_vector):
        """
        input_vector and target_vector can be tuple, 
        list or ndarray
        """

        input_vector = np.array(input_vector, ndmin=2).T
        target_vector = np.array(target_vector, ndmin=2).T

        output_vector1 = np.dot(self.weights_in_hidden, input_vector)
        output_hidden = Activation.reLU(output_vector1)
        output_hidden *= Dropout.get_mask(output_vector1)
        output_vector2 = np.dot(self.weights_hidden_output, output_hidden)
        output_network = Activation.reLU(output_vector2)
        output_network *= Dropout.get_mask(output_vector2)
        output_errors = target_vector - output_network
        # update the weights:
        #tmp = output_errors * Derivative.sigmoid(output_network)
        try:
            tmp = output_errors * Derivative.reLU(output_network)
            tmp = self.learning_rate * np.dot(tmp, output_hidden.T)
            self.weights_hidden_output += tmp
        except:
            print("Something went wrong when writing to the file")

        # calculate hidden errors:
        try:
            hidden_errors = np.dot(self.weights_hidden_output.T, output_errors)
        except:
            print("Something went wrong when writing to the file")
        # ----------------------------------------------------------------------
        # update the weights:
        tmp1 = Derivative.reLU(output_hidden)
        tmp = hidden_errors * tmp1
        # -----------------------------------------------------------------------
        self.weights_in_hidden += self.learning_rate * np.dot(
            tmp, input_vector.T)
Exemplo n.º 25
0
    def feed_forward(self, layer):
        """Feeds forward the layers values"""
        for i in range(len(self.bias.weights)):
            layer.nodes[i].value = 0

        for i in range(len(self.nodes)):
            for weight in range(len(self.nodes[i].weights)):
                layer.nodes[weight].value += self.nodes[i].value * self.nodes[
                    i].weights[weight]

        for weight in range(len(self.bias.weights)):
            layer.nodes[weight].value += self.bias.weights[weight]

        for w in range(len(layer.nodes)):
            # use tanh as our activation function
            layer.nodes[w].value = Activation.tanh(layer.nodes[w].value)
Exemplo n.º 26
0
    def back_propagtn(self, x, y):

        learng_coeff = self.learng_eta / len(x)  # scaling

        l_losses = cc.dv(self.cost_type, y, self.l_actvtns[self.l_number-1]) \
                + rr.dv(self.reg_type, y, self.l_actvtns[self.l_number-1])

        for l_ix in range(self.l_number - 1, 0, -1):
            l_slopes = aa.dv(self.actvtn_types[l_ix], self.l_actvtns[l_ix])
            l_deltas = l_losses * l_slopes
            weights_nabla = self.l_actvtns[l_ix-1].T.dot(l_deltas)
            biases_nabla  = np.sum(l_deltas, axis=0, keepdims=True)[0]
            self.l_weights[l_ix] = self.l_weights[l_ix] \
                    + weights_nabla * learng_coeff
            self.l_biases[l_ix]  = self.l_biases[l_ix] \
                    + biases_nabla * learng_coeff
            l_losses = l_deltas.dot(self.l_weights[l_ix].T)
Exemplo n.º 27
0
def activate_tag_callback(tag):
    if display_manager.active_window() != prompt:
        return
    print "Activating tag: %s" % tag
    display_manager.launch(load_window)
    res = urllib2.urlopen("http://192.168.1.10:8000/api/activate?tag=%s" %
                          tag).read()
    print res
    res_dict = json.loads(res)
    if (res_dict["status"] == "error"):
        load_window.finish()
    else:
        name = res_dict["player"]["first_name"] + " " + res_dict["player"][
            "last_name"]
        team = res_dict["player"]["team"]
        rule = res_dict["player"]["rule_text"]
        activation = Activation(name, team, rule)
        display_manager.launch_consume(activation)
Exemplo n.º 28
0
def apply_activation_fun(data,activation="relu"):
    if activation=="relu":
        return A.relu(data)
    elif activation == "softmax":
        return A.softmax(data)
    elif activation == "tanh":
        return A.tanh(data)
    elif activation == "softplus":
        return A.softplus(data)
    elif activation == "swish":
        return A.swish(data)
    elif activation == "sigmoid":
        return A.sigmoid(data)
Exemplo n.º 29
0
class Perceptron(Neuron):
    def __init__(self, learning_rate: float, X: np.array, Y: np.array,
                 func: str):
        super().__init__(learning_rate, X, Y)
        self._activer = Activation(func)

    def run(self, steps=1001, show=100):
        for step in range(steps):
            cost = 0

            for x_n, y_n in zip(self._data, self._label):
                y_pred = self._predict(x_n)
                y_pred = self._activer.chooser(y_pred)
                diff = y_n - int(y_pred)
                self._update_w_online(x_n, diff)
                self._update_bias(diff)
                cost += diff**2

            if step % show == 0:
                print('step {0}: {1}'.format(step, cost))
        self.print_status(step)
Exemplo n.º 30
0
class Sigmoid(Neuron):
    def __init__(self, learning_rate: float, X: np.array, Y: np.array,
                 func: str):
        super().__init__(learning_rate, X, Y)
        self._activer = Activation(func)

    def run(self, steps=1001, show=100):

        for step in range(steps):
            z = np.dot(self._data, self._w.T) + self._bias
            y_pred = self._activer.chooser(z)
            error = self._label - y_pred

            self._update_w_offline(self._data, error.T)
            self._update_bias(error.sum())

            if step % show == 0:
                cost = np.mean(-self._label * np.log(y_pred) -
                               (1 - self._label) * np.log(1 - y_pred))
                print('step {0}: {1}'.format(step, cost))
        self.print_status(step)
Exemplo n.º 31
0
 def __init__(self,
              input_layer,
              num_units,
              init_stddev,
              activation_fun=Activation('relu')):
     self.num_units = num_units
     self.activation_fun = activation_fun
     # the input shape will be of size (batch_size, num_units_prev)
     # where num_units_prev is the number of units in the input
     # (previous) layer
     self.input_shape = input_layer.output_size()
     # TODO ################################
     # TODO: implement weight initialization
     # TODO ################################
     # this is the weight matrix it should have shape: (num_units_prev, num_units)
     # use normal distrbution with mean 0 and stdv = init_stddev
     self.W = np.random.normal(0, init_stddev,
                               (self.input_shape[1], num_units))  #FIXME
     # and this is the bias vector of shape: (num_units)
     self.b = np.random.normal(0, init_stddev, (num_units, ))  #FIXME
     # create dummy variables for parameter gradients
     # no need to change these here!
     self.dW = None
     self.db = None
Exemplo n.º 32
0
 def set_activation(self, frequency, duty_cycle, scaling, non_linearity, shape_):
     self.a = Activation(frequency, duty_cycle, scaling, non_linearity)
     self.a.get_activation_signal(self.lit_data.activation_function(), shape=shape_)
     self.a.plot()