Пример #1
0
 def test_gradientDescent(self):
     theta = np.zeros((2,1))
     iterations = 1500
     alpha = 0.01
     converged_theta = gradientDescent(self.X, self.y, theta, alpha, iterations)
     self.assertAlmostEqual(converged_theta[0], -3.63, places=2)
     self.assertAlmostEqual(converged_theta[1], 1.17, places=2)
     self.assertAlmostEqual(hypothesis(np.array([1,3.5]),converged_theta), 0.45, places=2)
     self.assertAlmostEqual(hypothesis(np.array([1,7]),converged_theta), 4.53, places=2)
Пример #2
0
 def test_prediction(self):
     self.X = np.concatenate([np.ones((self.m, 1)), self.X], axis=1)
     theta = np.zeros((self.n + 1, 1))
     theta_optimized, _ = gradient_descent(self.X, self.y, theta)
     test_data = np.array([1, 45, 85]).reshape((1, 3))
     prediction = hypothesis(test_data, theta_optimized)
     self.assertAlmostEqual(prediction, 0.776, places=3)
     self.assertEqual(classify(test_data, self.X, theta_optimized), 1)
 def use_function(self, vector):
     vector = np.insert(vector, 0, 1)
     new_vector = []
     if self.feature_scaling_coeficient is not None:
         for i in range(len(vector)):
             new_vector.append(
                 utils.feature_scaling_corrector(
                     vector[i], self.feature_scaling_coeficient[i]))
     else:
         new_vector = vector
     print(new_vector)
     return utils.hypothesis(self.theta_values, new_vector)
Пример #4
0
 def test_classify_all(self):
     m,n = self.X.shape
     _,K = self.y.shape
     X = np.hstack(( np.ones((m,1)), self.X ))
     initial_theta = np.zeros((n+1,K))
     lamda = 0.1
     theta = classifyall(initial_theta, X, self.y, lamda)
     hypo = hypothesis(X, theta)
     predicted_y = hypo.argmax(axis=1)
     expected_y = np.array([ d if d!=10 else 0 for d in self.data['y'].reshape(-1)])
     acc = accuracy(predicted_y,expected_y)
     self.assertAlmostEqual(acc, 94.9, places=0) # I can't get 94.9, only 94.64.... close enough I guess
Пример #5
0
 def test_classify_all(self):
     m, n = self.X.shape
     _, K = self.y.shape
     X = np.hstack((np.ones((m, 1)), self.X))
     initial_theta = np.zeros((n + 1, K))
     lamda = 0.1
     theta = classifyall(initial_theta, X, self.y, lamda)
     hypo = hypothesis(X, theta)
     predicted_y = hypo.argmax(axis=1)
     expected_y = np.array(
         [d if d != 10 else 0 for d in self.data['y'].reshape(-1)])
     acc = accuracy(predicted_y, expected_y)
     self.assertAlmostEqual(
         acc, 94.9,
         places=0)  # I can't get 94.9, only 94.64.... close enough I guess
Пример #6
0
 def test_gradient_descent(self):
     iterations = 400
     alpha = 0.01 #[0.01, 0.03, 0.1, 0.3, 1.0]
     m,n = self.X.shape
     theta = np.zeros((n+1,1))
     
     # add x_0 and do feature normalization on the rest of the columns
     self.X = np.concatenate([np.ones((self.m,1)),self.X],axis=1)
     self.X[:,1:n+1], mu, sigma = feature_normalization(self.X[:,1:n+1])
     
     theta = gradientDescent(self.X, self.y, theta, alpha, iterations)
     
     test = np.array([1.0, 1650.0, 3.0]).reshape((3,1))
     test[1:,:] = ( test[1:,:] - mu ) / sigma
     test = test.reshape((1,3)) # m=1, n=2, because there is 1 test case, with 2 features in it
     self.assertAlmostEqual(hypothesis(test, theta), 289314.62, places=2)
Пример #7
0
    def test_plot_decision_boundary(self):
        negatives = self.data[self.data[:, 2] ==
                              0]  # SELECT * FROM self.data WHERE col2 == 0
        positives = self.data[self.data[:, 2] ==
                              1]  # SELECT * FROM self.data WHERE col2 == 1
        plt.xlabel("Microchip Test 1")
        plt.ylabel("Microchip Test 2")
        plt.scatter(negatives[:, 0],
                    negatives[:, 1],
                    c='y',
                    marker='o',
                    s=40,
                    linewidths=1,
                    label="y=0")
        plt.scatter(positives[:, 0],
                    positives[:, 1],
                    c='b',
                    marker='+',
                    s=40,
                    linewidths=2,
                    label="y=1")

        dimension = 6
        X_mapped = map_feature(self.X[:, 0], self.X[:, 1], dimension)
        m, n = X_mapped.shape
        X_mapped = np.hstack((np.ones((m, 1)), X_mapped))
        theta = np.zeros((n + 1, 1))
        lamda = 1.0
        theta_optimized, min_cost = regularized_gradient_descent(
            X_mapped, self.y, theta, lamda)

        x1 = np.linspace(-1, 1.5, 50)
        x2 = np.linspace(-1, 1.5, 50)

        X1, X2 = np.meshgrid(x1, x2)
        hypo = np.zeros((len(x1), len(x2)))
        for i in range(0, len(x1)):
            for j in range(0, len(x2)):
                mapped = map_feature(
                    np.array([X1[i][j]]).reshape((1, 1)),
                    np.array([X2[i][j]]).reshape((1, 1)), dimension)
                mapped = np.hstack((np.ones((1, 1)), mapped))
                hypo[i][j] = hypothesis(mapped, theta_optimized)[0]

        plt.contour(X1, X2, hypo, [0.5], label='Decision Boundary')
        plt.legend()
        plt.show()
Пример #8
0
 def test_normal_quation(self):
     self.X = np.concatenate([np.ones((self.m,1)),self.X],axis=1)
     theta = normal_equation(self.X, self.y)
     test = np.array([1.0, 1650.0, 3.0]).reshape((1,3))
     self.assertAlmostEqual(hypothesis(test, theta), 293081.46, places=2)