def feed_forward(X, theta, n_hidden_layers=1): """Applies forward propagation to calculate model's hypothesis. Args: X (numpy.array): Features' dataset. theta (numpy.array): Column vector of model's parameters. n_hidden_layers (int): Number of hidden layers in network. Returns: (numpy.array(numpy.array), numpy.array(numpy.array)): A 2-tuple consisting of an array of parameters prior to activation by layer and an array of activation matrices by layer. """ z = empty((n_hidden_layers + 2), dtype=object) a = empty((n_hidden_layers + 2), dtype=object) # Input layer a[0] = X # Hidden unit layers for l in range(1, (len(a) - 1)): z[l] = a[l - 1].dot(theta[l - 1].T) a[l] = g(z[l]) a[l] = append( ones((len(a[l]), 1), float64), # add intercept a[l], axis=1) # Output layer z[len(a) - 1] = a[(len(a) - 2)].dot(theta[(len(a) - 2)].T) a[len(a) - 1] = g(z[len(a) - 1]) # hypothesis return z, a
def h(X, w, b): """Logistic regression hypothesis. Args: X (numpy.array): Transposed features' dataset. w (numpy.array): Column vector of model's parameters. b (float): Model's intercept parameter. Returns: numpy.array: The probability that each entry belong to class 1. """ return g(dot(w.T, X) + b)
def feed_forward(X, theta, n_hidden_layers=1): """Applies forward propagation to calculate model's hypothesis. :param X: Features' dataset. :type X: numpy.array :param theta: Column vector of model's parameters. :type theta: numpy.array :param n_hidden_layers: Number of hidden layers in network. :type n_hidden_layers: int :returns: - z - array of parameters prior to activation by layer. - a - array of activation matrices by layer. :rtype: - z (:py:class: numpy.array(numpy.array)) - a (:py:class: numpy.array(numpy.array)) """ z = empty((n_hidden_layers + 2), dtype=object) a = empty((n_hidden_layers + 2), dtype=object) # Input layer a[0] = X # Hidden unit layers for l in range(1, (len(a) - 1)): z[l] = a[l - 1].dot(theta[l - 1].T) a[l] = g(z[l]) a[l] = append( ones((len(a[l]), 1), float64), # add intercept a[l], axis=1) # Output layer z[len(a) - 1] = a[(len(a) - 2)].dot(theta[(len(a) - 2)].T) a[len(a) - 1] = g(z[len(a) - 1]) # hypothesis return z, a
def h(X, theta): """Logistic regression hypothesis. Args: X (numpy.array): Features' dataset plus bias column. theta (numpy.array): Column vector of model's parameters. Raises: ValueError Returns: numpy.array: The probability that each entry belong to class 1. """ return g(X.dot(theta))
def predict_prob(X, theta): """Produces the probability that the entries belong to class 1. Returns: X (numpy.array): Features' dataset plus bias column. theta (numpy.array): Column vector of model's parameters. Raises: ValueError Returns: numpy.array: The probability that each entry belong to class 1. """ return g(X.dot(theta))
def h(X, theta): """Logistic regression hypothesis. :param X: Features' dataset plus bias column. :type X: numpy.array :param theta: Column vector of model's parameters. :type theta: numpy.array :raises: ValueError :returns: The probability that each entry belong to class 1. :rtype: numpy.array """ return g(X.dot(theta))
def predict_prob(X, theta): """ Produces the probability that the entries belong to class 1. :param X: Features' dataset plus bias column. :type X: numpy.array :param theta: Column vector of model's parameters. :type theta: numpy.array :raises: ValueError :returns: The probability that each entry belong to class 1. :rtype: numpy.array """ return g(X.dot(theta))