Exemplo n.º 1
0
def forward_propagate(feature_array, weights_biases_dict):
    hidden_layer_values = np.dot(weights_biases_dict['hidden_weights'], feature_array) + weights_biases_dict['hidden_biases']
    hidden_layer_outputs = sigmoid(hidden_layer_values)

    output_layer_values = np.dot(weights_biases_dict['output_weights'], hidden_layer_outputs) + weights_biases_dict['output_biases']
    output_layer_outputs = sigmoid(output_layer_values)

    output_vals = {
    'hidden_layer_outputs': hidden_layer_outputs,
    'output_layer_outputs': output_layer_outputs
    }

    return output_vals
def one_layer_forward_propagation(A_prev, W, b, activation):
    """
    Parameters
    ----------
    A_prev : Matrix or Vector
        Activation matrix or vector from previous layer neuron
        of shape (n_nodes, n_samples)
    W : Matrix
        weight matrix of shape (n_layer, n_prev_layer)
    b : vector
        bias vector of shape (n_layer, 1)

    Returns
    -------
    None.

    """
    Z = np.dot(W, A_prev) + b

    assert Z.shape == (W.shape[0], A_prev.shape[1])

    if activation == 'sigmoid':
        A = hf.sigmoid(Z)
    elif activation == 'relu':
        A = hf.relu(Z)
    cache = (Z, A_prev, W, b)
    return A, cache
Exemplo n.º 3
0
def forward_propagate(network):
    """ Forward propagate values from input layer to output layer """
    for layer in network[1:]:
        for node in layer:
            node.inputValue = -1 * node.weights[0]
            for i, input_node in enumerate(node.inputs):
                node.inputValue += input_node.value * node.weights[i + 1]
            node.value = sigmoid(node.inputValue)
 def activate_forward(self, A_prev, W, b, activation):
     if activation == "sigmoid":
         Z, linear_cache = self.linear_forward(A_prev, W, b)
         A, activation_cache = sigmoid(Z)
     elif activation == "relu":
         Z, linear_cache = self.linear_forward(A_prev, W, b)
         A, activation_cache = relu(Z)
     cache = (linear_cache, activation_cache)
     return A, cache
Exemplo n.º 5
0
def feed_forward(X, thetas, i_size, h_sizes, o_size):
    Thetas = build_Thetas(thetas, i_size, h_sizes, o_size)
    A = []
    Z = []
    a1 = np.ones((X.shape[0], X.shape[1] + 1))
    a1[:, 1:] = X
    a1 = np.transpose(a1)
    A.append(a1)
    for i in range(len(Thetas) - 1):
        theta = Thetas[i]
        z = np.dot(theta, A[-1])
        Z.append(z)
        a = np.vstack((np.ones((1, z.shape[1])), sigmoid(z)))
        A.append(a)
    z_last = np.dot(Thetas[-1], A[-1])
    #Z.append(z_last)
    A.append(sigmoid(z_last))
    return A, Z, Thetas
    def forwardPropagate(self):

        self.resetValues()
        ones = np.ones((self.m, 1))

        for i in range(self.layers - 1):

            self.a[i] = np.hstack(
                (ones, self.a[i]
                 ))  #Ones column is added to be multiplied by the biases

            self.z.append(self.a[i] @ self.weights[i])
            self.a.append(sigmoid(self.z[i + 1]))
            self.aPrimes.append(sigmoidPrime(self.a[i + 1]))

        return self.a[self.layers - 1]
Exemplo n.º 7
0
 def __activate(self, X):
     activation, _ = sigmoid(np.dot(self.__W.T, X) + self.__b)
     return activation
Exemplo n.º 8
0
print(data.head())

X = data.iloc[:, 0:8]
print(X.head())

y = data.iloc[:, -1]
print(y.head())

start_time = time.time()

num_iter = 10000

intercept = np.ones((X.shape[0], 1))
X = np.concatenate((intercept, X), axis=1)
theta = np.zeros(X.shape[1])
# X = X.astype(float)
for i in range(num_iter):
    h = helper_functions.sigmoid(X, theta)
    gradient = helper_functions.gradient_descent(X, h, y)
    theta = helper_functions.update_weight_loss(theta, 0.01, gradient)

print("Training time (Log Reg using Gradient descent):" +
      str(time.time() - start_time) + " seconds")
print("Learning rate: {}\nIteration: {}".format(0.1, num_iter))

result = helper_functions.sigmoid(X, theta)
f = pd.DataFrame(np.around(result, decimals=6)).join(y)
f['pred'] = f[0].apply(lambda x: 0 if x < 0.5 else 1)
print("Accuracy (Loss minimization):")
print(f.loc[f['pred'] == f['Outcome']].shape[0] / f.shape[0] * 100)