Пример #1
0
def cost(theta, X, y):
    '''Return the gradient for given hypothesis function
       @Params: X, y, theta'''
    m = float(len(X))                            # Number of training examples. The float is essential.
    cost = (-1/m) * sum ((y * log(sigmoid(X.dot(theta)))) + ((1 - y) * log(1 - sigmoid(X.dot(theta)))))
    grad = (1/m) * (X.T).dot((sigmoid(X.dot(theta)) - y))
    return cost, grad
Пример #2
0
def gradients(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lam):
    theta1_params = nn_params[0: (hidden_layer_size * (input_layer_size + 1))] 
    theta2_params = nn_params[(hidden_layer_size * (input_layer_size + 1)):] 

    theta_1 = theta1_params.reshape(hidden_layer_size, input_layer_size + 1)
    theta_2 = theta2_params.reshape(num_labels, (hidden_layer_size + 1))

    m = X.shape[0]

    Z2 = np.c_[np.ones(m), X].dot(theta_1.T)
    A2 = sigmoid(Z2)

    Z3 = np.c_[np.ones(A2.shape[0]), A2].dot(theta_2.T)
    A3 = HX = sigmoid(Z3)

    d3 = A3 - y;
    d2 = d3.dot(theta_2[:, 1:]) * sigmoidGradient(Z2)

    Delta1 = d2.T.dot(np.c_[np.ones(m), X])
    Delta2 = d3.T.dot(np.c_[np.ones(A2.shape[0]), A2])

    theta_1[:, 0] = 0
    theta_2[:, 0] = 0

    Theta1Grad = ((1.0/m) * Delta1) + ((lam/m) * theta_1)
    Theta2Grad = ((1.0/m) * Delta2) + ((lam/m) * theta_2)
    grads = np.append(Theta1Grad.flatten(), Theta2Grad.flatten())
    return grads
Пример #3
0
def costFunction(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    return np.sum(first - second) / (len(X))
Пример #4
0
def costFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X,
                 y, lam):
    theta1_params = nn_params[0:(hidden_layer_size * (input_layer_size + 1))]
    theta2_params = nn_params[(hidden_layer_size * (input_layer_size + 1)):]

    theta_1 = theta1_params.reshape(hidden_layer_size, input_layer_size + 1)
    theta_2 = theta2_params.reshape(num_labels, (hidden_layer_size + 1))

    m = X.shape[0]

    Z2 = np.c_[np.ones(m), X].dot(theta_1.T)
    A2 = sigmoid(Z2)

    Z3 = np.c_[np.ones(A2.shape[0]), A2].dot(theta_2.T)
    A3 = HX = sigmoid(Z3)

    firstPartOfCost = -((y) * np.log(HX))
    secondPartOfCost = ((1.0 - y) * np.log(1.0 - HX))

    allThetas = np.append(theta_1.flatten()[1:], theta_2.flatten()[1:])
    regularizationTerm = (lam / (2.0 * m)) * np.sum(np.power(allThetas, 2))

    J = ((1.0 / m) * np.sum(
        np.sum(firstPartOfCost - secondPartOfCost))) + regularizationTerm

    return J
Пример #5
0
def predict(theta1, theta2, x):
    # Useful values
    m = x.shape[0]
    num_labels = theta2.shape[0]
    print('shape x {}, theta1 {}, theta2 {}'.format(x.shape, theta1.shape,
                                                    theta2.shape))
    # You need to return the following variable correctly
    p = np.zeros(m)

    # ===================== Your Code Here =====================
    # Instructions : Complete the following code to make predictions using
    #                your learned neural network. You should set p to a
    #                1-D array containing labels between 1 to num_labels.
    #
    x = np.c_[np.ones((m, 1)), x]

    a2 = sigmoid(np.dot(x, theta1.T))

    a2 = np.c_[np.ones((m, 1)), a2]

    a3 = sigmoid(np.dot(a2, theta2.T))

    p = np.argmax(a3, axis=1) + 1

    return p
 def forward_pass(self, inputs):
     # We will calculate the output(s), by feeding the inputs forward through the network
     
     # If a forward pass has occured before (i.e., bias term has been appended to y_hidden), then we have to remove the bias from the hidden neurons
     if len(self.y_hidden)==(self.n_hidden+1):
         self.y_hidden = self.y_hidden[1:]
     
     # set hidden states and output states to zero
     self.reset_activations()
     
     # append term to be multiplied with the hidden layer's bias
     inputs = np.append(1, inputs)
     
     # activate hidden neurons
     for i in range(self.n_hidden):
         hidden_neuron = 0.0
         for j in range(len(inputs)):
             hidden_neuron += + inputs[j] * self.w_hidden[j,i]
         self.y_hidden[i] = sigmoid(hidden_neuron)
     
     # append term to be multiplied with the output layer's bias
     self.y_hidden = np.append(1.0, self.y_hidden)
     
     # activate output neurons
     for i in range(self.n_out):
         output_neuron = 0.0
         for j in range(len(self.y_hidden)):
             output_neuron += self.y_hidden[j] * self.w_out[j,i]
         self.y_out[i] = sigmoid(output_neuron)
     
     predictions = self.y_out.copy()
     
     return predictions
Пример #7
0
def return_cost(theta, X, y, lmd):
    J=0
    m=y.size
    J=(-1/m)*(y.dot(np.log(sigmoid(X.dot(theta))))+\
        (1-y).dot(np.log(1-sigmoid(X.dot(theta)))))+\
            (lmd/(2*m))*theta[1:].dot(theta[1:])
    return J
Пример #8
0
def gradients(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y,
              lam):
    theta1_params = nn_params[0:(hidden_layer_size * (input_layer_size + 1))]
    theta2_params = nn_params[(hidden_layer_size * (input_layer_size + 1)):]

    theta_1 = theta1_params.reshape(hidden_layer_size, input_layer_size + 1)
    theta_2 = theta2_params.reshape(num_labels, (hidden_layer_size + 1))

    m = X.shape[0]

    Z2 = np.c_[np.ones(m), X].dot(theta_1.T)
    A2 = sigmoid(Z2)

    Z3 = np.c_[np.ones(A2.shape[0]), A2].dot(theta_2.T)
    A3 = HX = sigmoid(Z3)

    d3 = A3 - y
    d2 = d3.dot(theta_2[:, 1:]) * sigmoidGradient(Z2)

    Delta1 = d2.T.dot(np.c_[np.ones(m), X])
    Delta2 = d3.T.dot(np.c_[np.ones(A2.shape[0]), A2])

    theta_1[:, 0] = 0
    theta_2[:, 0] = 0

    Theta1Grad = ((1.0 / m) * Delta1) + ((lam / m) * theta_1)
    Theta2Grad = ((1.0 / m) * Delta2) + ((lam / m) * theta_2)
    grads = np.append(Theta1Grad.flatten(), Theta2Grad.flatten())
    return grads
Пример #9
0
def cost_function_reg(theta, X, y, lamda=0):

    cost = np.mean(-y * np.log(sigmoid(np.dot(X, theta))) -
                   (1 - y) * np.log(1 - sigmoid(np.dot(X, theta)))) + lamda / (
                       2 * X.shape[0]) * sum(theta[1:] * theta[1:])
    # 没有theta[0]

    return cost
Пример #10
0
def costFunctionReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (learningRate /
           (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg
Пример #11
0
def predict(theta1, theta2, x):
    m = x.shape[0]

    x = np.c_[np.ones(m), x]
    h1 = sigmoid(np.dot(x, theta1.T))
    h1 = np.c_[np.ones(h1.shape[0]), h1]
    h2 = sigmoid(np.dot(h1, theta2.T))
    p = np.argmax(h2, axis=1) + 1

    return p
Пример #12
0
    def value(self, x):

        layer1 = sigmoid(np.dot(x, self.weights1))
        output = sigmoid(np.dot(layer1, self.weights2))

        if output[0][0] > 0.5:

            print("White")

        else:

            print("Black")
Пример #13
0
def feedForward(theta, X):
    t1, t2 = deserialize(theta)
    m = X.shape[0]
    a1 = X

    z2 = a1 @ t1.T
    a2 = np.insert(sigmoid(z2), 0, np.ones(m), axis=1)

    z3 = a2 @ t2.T
    h = sigmoid(z3)

    return a1, z2, a2, z3, h
Пример #14
0
def sigmoid_gradient(z):
    g = np.zeros(z.shape)

    # ===================== Your Code Here =====================
    # Instructions : Compute the gradient of the sigmoid function evaluated at
    #                each value of z (z can be a matrix, vector or scalar)
    #

    g = sigmoid(z) * (1 - sigmoid(z))
    # ===========================================================

    return g
Пример #15
0
def costFunction(theta, X, y):
    m = len(y)
    grad = np.zeros(np.shape(theta))
    J = 0
    thetat = theta.transpose()
    for i in range(0,m):
        J = J + 1.0/ m * ( -y[i] * np.log(sigmoid(thetat*X[i, :].transpose())) - (1 - y[i]) * np.log(1-sigmoid(thetat*X[i, :].transpose())))

    for j in range(0, len(theta)):
        for i in range(0, m):
            grad[j] = grad[j] + 1.0 / m * (sigmoid(thetat*X[i,:].transpose())-y[i])* X[i,j]

    return [J, grad]
def predict(theta1, theta2, x):
    m = x.shape[0]
    num_label = theta2.shape[0]
    x = np.c_[np.ones(m), x]  # 5000*401
    p = np.zeros(m)
    z2 = x.dot(theta1.T)  # 5000*401
    a2 = sigmoid(z2)  # 5000*25
    a2 = np.c_[np.ones(m), a2]  #5000*26
    z3 = a2.dot(theta2.T)  # 5000*10
    a3 = sigmoid(z3)
    p = np.argmax(a3, axis=1)
    p += 1
    return p
Пример #17
0
def predict(theta1, theta2, X):
    # Useful values
    m = X.shape[0]
    a1 = np.c_[np.ones(m), X]  # 输入层

    z2 = a1.dot(theta1.T)
    a2 = np.insert(sigmoid(z2), 0, np.ones(m),
                   axis=1)  # a2 = sigmoid(z2)  # 隐藏层
    z3 = a2.dot(theta2.T)
    a3 = sigmoid(z3)  # 输出层

    p = np.argmax(a3, axis=1) + 1

    return p
Пример #18
0
def regularized_cost(theta, X, y, l):
    """
    don't penalize theta_0
    args:
        X: feature matrix, (m, n+1) # 插入了x0=1
        y: target vector, (m, )
        l: lambda constant for regularization
    """
    thetaReg = theta[1:]
    first = (-y*np.log(sigmoid(X@theta))) + (y-1)*np.log(1-sigmoid(X@theta))
    reg = (thetaReg@thetaReg)*l / (2*len(X))
    cost = np.mean(first) + reg

    return cost
Пример #19
0
def cost(theta, X, y):

    m = len(y)
    grad = np.zeros(np.shape(theta))
    J = 0.0
    thetat = theta.transpose()
    for i in range(0,m):
        if y[i]==1:
            J = J + 1.0/ m * ( -y[i] * np.log(sigmoid(thetat*X[i, :].transpose())))
        else:
            J = J + 1.0/ m * (-(1 - y[i]) * np.log(1-sigmoid(thetat*X[i, :].transpose())))
        #J = J + 1.0/ m * ( -y[i] * np.log(sigmoid(thetat*X[i, :].transpose())) - (1 - y[i]) * np.log(1-sigmoid(thetat*X[i, :].transpose())))
    print J
    return J
Пример #20
0
def costFunctionReg(theta, X, y, RegParam):
    '''
    J = COSTFUNCTIONREG(theta, X, y, RegParam) computes the cost of using
    theta as the parameter for regularized logistic regression and the
    gradient of the cost w.r.t. to the parameters. 
    '''
    (m, n) = X.shape
    theta = theta.reshape((n, 1))
    h = sigmoid(np.dot(X, theta))

    # Regularize Cost function:
    # NB. Regularization term starts at theta1
    J = 1/m * (-np.dot(np.transpose(np.vstack(y)), np.log(h))
               - np.dot(np.transpose(np.vstack(1-y)), np.log(1-h))) \
        + (RegParam/(2*m)) * np.sum(np.power(theta[1:n], 2))

    # NB. Regularization term starts at theta1
    # compute regularization term for all
    grad = np.dot(np.transpose(X), (h-np.vstack(y)))/m \
        + np.dot((RegParam/m), theta)

    # adjust for the fist term, theta0
    grad[0] = np.dot(np.transpose(np.vstack(X.iloc[:, 0])),
                     (h - np.vstack(y))) / m

    return J, grad.flatten()
Пример #21
0
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C_x.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x] # list to store all the activations, layer by layer
     zs = [] # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation)+b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * \
         sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in xrange(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
     return (nabla_b, nabla_w)
Пример #22
0
def gradient_reg(theta, X, y, lamda=0):

    temp = np.zeros(X.shape[1])
    temp[1:] = lamda / X.shape[0] * theta[1:]  # 不惩罚第一项 theta[0]
    grad = (1 / X.shape[0]) * np.dot(X.T, sigmoid(np.dot(X, theta)) - y) + temp

    return grad
Пример #23
0
def lr_cost_function(theta, X, y, lmd):
    m = y.size

    # You need to return the following values correctly
    cost = 0
    grad = np.zeros(theta.shape)

    # ===================== Your Code Here =====================
    # Instructions : Compute the cost of a particular choice of theta
    #                You should set cost and grad correctly.
    #

    hypothesis = sigmoid(np.dot(X, theta))

    reg_theta = theta[1:]

    cost = np.sum(-y * np.log(hypothesis) - np.subtract(1, y) * np.log(np.subtract(1, hypothesis))) / m \
           + (lmd / (2 * m)) * np.sum(reg_theta * reg_theta)

    error = np.subtract(hypothesis, y)

    grad = np.dot(X.T, error) / m
    grad[1:] = grad[1:] + reg_theta * (lmd / m)

    # =========================================================

    return cost, grad
Пример #24
0
def cost_function_reg(theta, X, y, lmd):
    m = y.size

    # You need to return the following values correctly
    cost = 0
    grad = np.zeros(theta.shape)

    # ===================== Your Code Here =====================
    # Instructions : Compute the cost of a particular choice of theta
    #                You should set cost and grad correctly.
    #

    h = sigmoid(np.dot(X, theta))

    # no penalty for theta0

    cost = 1. / m * np.sum(-y * np.log(h) - (1 - y) * np.log(1 - h))
    cost = cost + lmd / (2. * m) * np.dot(theta,
                                          theta) - lmd / (2. * m) * theta[0]**2

    loss = h - y
    grad = 1. / m * np.dot(np.transpose(X), loss) + lmd / (1. * m) * theta
    grad[0] -= lmd / (1. * m) * theta[0]

    # ===========================================================

    return cost, grad
Пример #25
0
def cost_function_reg(theta, X, y, lmd):
    m = y.size

    # You need to return the following values correctly
    cost = 0
    grad = np.zeros(theta.shape)

    # ===================== Your Code Here =====================
    # Instructions : Compute the cost of a particular choice of theta
    #                You should set cost and grad correctly.
    #

    hypothesis = sigmoid(np.dot(X, theta))

    reg_theta = theta[1:]

    cost = np.sum(-y * np.log(hypothesis) - (1 - y) * np.log(1 - hypothesis)) / m \
           + (lmd / (2 * m)) * np.sum(reg_theta * reg_theta)

    normal_grad = (np.dot(X.T, hypothesis - y) / m).flatten()

    grad[0] = normal_grad[0]
    grad[1:] = normal_grad[1:] + reg_theta * (lmd / m)

    # ===========================================================

    return cost, grad
Пример #26
0
def predict_one_vs_all(all_theta, X):
    m = X.shape[0]
    num_labels = all_theta.shape[0]

    # You need to return the following variable correctly;
    p = np.zeros(m)

    # Add ones to the X data matrix
    X = np.c_[np.ones(m), X]

    # ===================== Your Code Here =====================
    # Instructions : Complete the following code to make predictions using
    #                your learned logistic regression parameters (one vs all).
    #                You should set p to a vector of predictions (from 1 to
    #                num_labels)
    #
    # Hint : This code can be done all vectorized using the max function
    #        In particular, the max function can also return the index of the
    #        max element, for more information see 'np.argmax' function.
    #
    y = sigmoid(np.matmul(X, all_theta.T))
    p = np.argmax(y, axis=1)
    p[p == 0] = 10

    return p
Пример #27
0
def costReg(lam, theta, X, y):

	m = len(y)
	grad = np.zeros(np.shape(theta))
	J = 0.0
	thetat = theta.transpose()
	for i in range(0,m):
		if y[i]==1:
			J = J + 1.0/ m * ( -y[i] * np.log(sigmoid(thetat*X[i, :].transpose())))
		else:
			J = J + 1.0/ m * (-(1 - y[i]) * np.log(1-sigmoid(thetat*X[i, :].transpose())))
			#J = J + 1.0/ m * ( -y[i] * np.log(sigmoid(thetat*X[i, :].transpose())) - (1 - y[i]) * np.log(1-sigmoid(thetat*X[i, :].transpose())))
	for j in range(1, len(theta)):
		J = J + lam/ (2*m) *theta[j]**2

	print J
	return J
Пример #28
0
def cross_entropy_loss(theta, X, y):
    m, n = X.shape
    y = y.reshape((m, 1))
    theta = theta.reshape((n, 1))
    h = sigmoid(np.dot(X, theta))
    J = np.sum(-y * np.log(h) - (1 - y) * np.log(1 - h)) / m

    return J
Пример #29
0
def cross_entropy_loss_reg(theta, X, y, reg_lambda):
    m, n = X.shape
    y = y.reshape((m, 1))
    theta = theta.reshape((n, 1))
    h = sigmoid(np.dot(X, theta))
    J = (2 * np.sum(-y * np.log(h) - (1 - y) * np.log(1 - h)) + reg_lambda * np.sum(theta ** 2)) / (2 * m)

    return J
Пример #30
0
def CostFunction(theta, X, y):
    m = len(y)
    J = 0
    grad = np.zeros(np.shape(theta))
    g = sigmoid(np.dot(X, theta))
    J = np.mean(((-y) * (np.log(g))) - ((1 - y) * (np.log(1 - g))))
    grad = np.mean((g.reshape(m, 1) - y.reshape(m, 1)) * X, axis=0)
    return J, grad
Пример #31
0
    def feedForward(self, activation):
        #return output of network
        #iterate over all layers and feed activation forward

        activation = np.array(activation)
        for b, w in zip(self.biases, self.weights):
            activation = sigmoid(np.matmul(w, activation) + b[0])
        return activation
Пример #32
0
def plot_sigmoid():
    x = np.linspace(1, 2000) / 100.0 - 10
    y = sigmoid(x)
    fig, ax1 = plt.subplots()
    ax1.plot(x, y)
    # set label of horizontal axis
    ax1.set_xlabel('x')
    # set label of vertical axis
    ax1.set_ylabel('sigmoid(x)')
Пример #33
0
def grad(theta, X, y):
    m = len(y)
    grad = np.zeros(np.shape(theta))
    thetat = theta.transpose()
    for j in range(0, len(theta)):
        for i in range(0, m):
            grad[j] = grad[j] + 1.0 / m * (sigmoid(thetat*X[i,:].transpose())-y[i])* X[i,j]

    return grad
def costFunction(theta, X, y, lamda):
	m = shape(X)[0]
	hypo = sigmoid(X.dot(theta))
	term1 = log(hypo).T.dot(-y)
	term2 = log(1.0 - hypo).T.dot(1-y)

	left = (term1 - term2)/m
	right = theta.T.dot(theta)*lamda/(2*m)
	return left + right
Пример #35
0
def cross_entropy_gradient(theta, X, y):
    m, n = X.shape
    y = y.reshape((m, 1))
    theta = theta.reshape((n, 1))
    h = sigmoid(np.dot(X, theta))
    grad = np.dot(np.transpose(X), h - y) / m
    grad = np.ndarray.flatten(grad)

    return grad
Пример #36
0
def cross_entropy_gradient_reg(theta, X, y, reg_lambda):
    m, n = X.shape
    y = y.reshape((m, 1))
    theta = theta.reshape((n, 1))
    h = sigmoid(np.dot(X, theta))
    grad = (np.dot(np.transpose(X), h - y))
    grad[1:] = grad[1:] + (reg_lambda / m) * theta[1:]
    grad = np.ndarray.flatten(grad)

    return grad
Пример #37
0
def gradReg(lam, theta, X, y):
	m = len(y)
	grad = np.zeros(np.shape(theta))
	thetat = theta.transpose()
	for j in range(0, len(theta)):
		if (j!=0):
			grad[j] = grad[j] + lam/(2*m)*theta[j]**2
		for i in range(0, m):
			grad[j] = grad[j] + 1.0 / m * (sigmoid(thetat*X[i,:].transpose())-y[i])* X[i,j]


	return grad
Пример #38
0
def costFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lam):
    theta1_params = nn_params[0: (hidden_layer_size * (input_layer_size + 1))] 
    theta2_params = nn_params[(hidden_layer_size * (input_layer_size + 1)):] 

    theta_1 = theta1_params.reshape(hidden_layer_size, input_layer_size + 1)
    theta_2 = theta2_params.reshape(num_labels, (hidden_layer_size + 1))

    m = X.shape[0]
    
    Z2 = np.c_[np.ones(m), X].dot(theta_1.T)
    A2 = sigmoid(Z2)

    Z3 = np.c_[np.ones(A2.shape[0]), A2].dot(theta_2.T)
    A3 = HX = sigmoid(Z3)
    
    firstPartOfCost = -( (y) * np.log(HX) )
    secondPartOfCost = ((1.0 - y) * np.log(1.0-HX))

    allThetas = np.append(theta_1.flatten()[1:], theta_2.flatten()[1:])
    regularizationTerm = (lam/(2.0 * m)) * np.sum( np.power(allThetas, 2)) 
    
    J = ((1.0/m) * np.sum(np.sum(firstPartOfCost - secondPartOfCost)) ) + regularizationTerm

    return J
Пример #39
0
def costFunctionReg(theta, X, y, _lambda):
    '''costFunctionReg() - Regularized Logistic Regression Cost'''

    m = len(X)
    z = np.dot(X, theta.T)
    h = sigmoid(z)

    # cost
    pos = np.dot(-y, np.log(h))
    neg = np.dot(1 - y, np.log(1 - h))
    cost = (pos - neg)

    # regularization term
    reg_para = (_lambda / (2. * m)) * sum([th ** 2 for th in theta[1:]])

    # cost with regularization
    costReg = (1. / m) * (cost + reg_para)

    return costReg
def gradient(theta, X, y):
    m = float(len(y))
    print 'shape sigmoid(X.dot(theta)): ', shape(sigmoid(X.dot(theta)))
    grad = (1/m) * (X.T).dot((sigmoid(X.dot(theta)) - y))
    return grad
Пример #41
0
def sigSq(X):
    
    return np.square(sigmoid(X))
Пример #42
0
def costFunction(theta, X, y):
	m = shape(X)[0]
	hypo = sigmoid(X.dot(theta))
	term1 = log(hypo).T.dot(-y)
	term2 = log(1.0 - hypo).T.dot(1-y)
	return ((term1 - term2)/m).flatten()
Пример #43
0
Файл: ex2.py Проект: iamaziz/ml
def ex2():
    #%% Load Data
    #%  The first two columns contains the exam scores and the third column
    #%  contains the label.
    data = np.loadtxt('data/ex2data1.txt', delimiter=',')
    x = data[:, :2]
    y = data[:, 2]
    #%% ==================== Part 1: Plotting ====================
    #%  We start the exercise by first plotting the data to understand the
    #%  the problem we are working with.
    print(
        'Plotting data with o indicating (y = 1) examples and x indicating (y = 0) examples.\n')
    plotData(x, y)

    plt.xlabel('Exam 1 Score')
    plt.ylabel('Exam 2 Score')
    plt.legend(['Admitted', 'Not admitted'], bbox_to_anchor=(1.5, 1))
    plt.show()
    #%% ============ Part 2: Compute Cost and Gradient ============
    #%  In this part of the exercise, you will implement the cost and gradient
    #%  for logistic regression. You neeed to complete the code in costFunction()

    #%  Setup the data matrix appropriately, and add ones for the intercept term
    [m, n] = x.shape

    #% Add intercept term to x and X_test
    ones = np.ones(m)
    X = np.array([ones, x[:, 0], x[:, 1]]).T

    #% Initialize fitting parameters
    initial_theta = np.zeros(n + 1)

    #% Compute and display initial cost and gradient
    cost = costFunction(initial_theta, X, y)
    print('Cost at initial theta (zeros):\n{}'.format(cost))
    # ============= Part 3: Optimizing using fmin() or minimize()
    print('Gradient at initial theta (zeros):\n{}'.format(initial_theta))
    # %  In this exercise, you will use a built-in function (scipy.optimize.fmin) to find the
    # %  optimal parameters theta.
    f = lambda t: costFunction(t, X, y)  # %  Set options for fmin()
    fmin_opt = {'full_output': True, 'maxiter': 400, 'retall': True}
    # %  Run fmin to obtain the optimal theta
    theta, cost, iters, calls, warnflag, allvecs = fmin(
        f, initial_theta, **fmin_opt)
    print('Cost at theta found by fmin(): {}'.format(cost))
    print('theta: {}'.format(theta))

    # %  Set options for minimize()
    # mini_opt = {'maxiter': 400, 'disp': True}
    # %  Run minimize to obtain the optimal theta
    # results = minimize(f, initial_theta, method='Nelder-Mead', options=mini_opt)
    # cost = results['fun']
    # theta = results['x']
    # print('Cost at theta found by minimize(): {}'.format(cost))
    # print('theta: {}'.format(theta))

    cost_change = [costFunction(allvecs[i], X, y) for i in range(156)]
    plt.plot(cost_change)
    plt.grid()
    plt.title('cost function $y$ per iteration $x$')
    plt.show()  # % Print theta to screen
    print('Cost at theta found by fmin:\n{}\n'.format(cost))
    print('theta: \n')
    print('{}\n'.format(theta))

    # % Plot Boundary
    plotDecisionBoundary(theta, X, y)

    # % Show plot
    plt.show()

    # %% ============== Part 4: Predict and Accuracies ==============
    # %  After learning the parameters, you'll like to use it to predict the outcomes
    # %  on unseen data. In this part, you will use the logistic regression model
    # %  to predict the probability that a student with score 45 on exam 1 and
    # %  score 85 on exam 2 will be admitted.
    # %
    # %  Furthermore, you will compute the training and test set accuracies of
    # %  our model.
    # %
    # %  Your task is to complete the code in predict.m

    # %  Predict probability for a student with score 45 on exam 1
    # %  and score 85 on exam 2

    scores = np.array([1, 45, 85])
    prob = sigmoid(np.dot(scores, theta))
    print(
        'For a student with scores 45 and 85, we predict an admission probability of:\n{}\n\n'.format(prob))

    # % Compute accuracy on our training set
    p = predict(theta, X)

    print('Train Accuracy: {}\n'.format(np.mean(np.double(p == y)) * 100))
Пример #44
0
def sigSqgrad(X):
    
    return 2*(sigmoid(X))*sigmoidGrad(X)
Пример #45
0
def sigmoidGradient(z):
    return sigmoid(z) * (1 - sigmoid(z))
Пример #46
0
def predict(theta, X):
    p = sigmoid(X.dot(theta)) >= 0.5
    return p
Пример #47
0
x1_abs = []
x1_ord = []
for i in range(0,len(label)):
    if label[i]==1:
		x1_abs.append(list_abs[i])
		x1_ord.append(list_ord[i])
    else:
		x0_abs.append(list_abs[i])
		x0_ord.append(list_ord[i])
sets = [[x0_abs,x0_ord,'o'],[x1_abs,x1_ord,'+']]
MultiScatter(sets)

zz = np.mat([[1,2,3],[4,5,6]])
zz  = np.mat(1)
zz = np.mat([-723.2])
gg = sigmoid(zz)
print gg

[m, n] = np.shape(X)
initial_theta  = np.zeros((n, 1))

result_jg = costFunction(initial_theta, X ,y)
print result_jg

#xx = map(lambda t: 0.206+t/25.0,range(-25,25, 1))
#yy = map(lambda t: 0.201+t/25.0,range(-25,25, 1))

#zz = [0.0]*len(xx)
#for i in range(0,len(xx)):
#Plot(xx,zz)