def test_correc(): x = np.arange(1, 6) # Example 1: theta1 = np.array([5, 0]) assert np.equal(simple_predict(x, theta1), np.array([5., 5., 5., 5., 5.])).all() # Do you understand why y_hat contains only 5's here? # Example 2: theta2 = np.array([0, 1]) assert (simple_predict(x, theta2) == np.array([1., 2., 3., 4., 5.])).all() # Do you understand why y_hat == x here? # Example 3: theta3 = np.array([5, 3]) assert (simple_predict(x, theta3) == np.array([8., 11., 14., 17., 20.])).all() # Example 4: theta4 = np.array([-3, 1]) assert (simple_predict(x, theta4) == np.array([-2., -1., 0., 1., 2.])).all()
def plot(x, y, theta): """Plot the data and prediction line from three non-empty numpy.ndarray. Args: x: has to be an numpy.ndarray, a vector of dimension m * 1. y: has to be an numpy.ndarray, a vector of dimension m * 1. theta: has to be an numpy.ndarray, a vector of dimension 2 * 1. Returns: Nothing. Raises: This function should not raise any Exceptions. """ y1 = simple_predict(x, theta) plt.plot(x, y1, 'r') plt.plot(x, y, 'bo') plt.show()
def gradient(x, y, theta): """Computes a gradient vector from three non-empty numpy.ndarray, without any for-loop. The → three arrays must have the compatible dimensions. Args: x: has to be an numpy.ndarray, a matrix of dimension m * n. y: has to be an numpy.ndarray, a vector of dimension m * 1. theta: has to be an numpy.ndarray, a vector (n +1) * 1. Returns: The gradient as a numpy.ndarray, a vector of dimensions n * 1, containg the result of the → formula for all j. None if x, y, or theta are empty numpy.ndarray. None if x, y and theta do not have compatible dimensions. Raises: This function should not raise any Exception. """ if len(x) < 1 or len(y) < 1 or len( theta) < 1 or x is None or y is None or theta is None or x.shape[ 0] != y.shape[0]: return None y_hat = simple_predict(x, theta) gr_vec = (np.matmul(add_intercept(x).transpose(), (y_hat - y))) / y.shape[0] return gr_vec
#!/usr/bin/python3 import numpy as np from prediction import simple_predict x = np.arange(1, 6) print(simple_predict(x, np.array([5, 0]))) print(simple_predict(x, np.array([0, 1]))) print(simple_predict(x, np.array([5, 3]))) print(simple_predict(x, np.array([-3, 1])))
import numpy as np from prediction import simple_predict x = np.arange(1,6) theta1 = np.array([5, 0]) print(simple_predict(x, theta1)) print(np.array([1., 2., 3., 4., 5.]))
import numpy as np from prediction import simple_predict x = np.arange(1, 13).reshape((4, 3)) # Example 1: theta1 = np.array([5, 0, 0, 0]) print(simple_predict(x, theta1)) # Ouput: # array([5., 5., 5., 5.]) # Do you understand why y_hat contains only 5's here? # Example 2: theta2 = np.array([0, 1, 0, 0]) print(simple_predict(x, theta2)) # Output: # array([ 1., 4., 7., 10.]) # Do you understand why y_hat == x[:,0] here? # Example 3: theta3 = np.array([-1.5, 0.6, 2.3, 1.98]) print(simple_predict(x, theta3)) # Output: # array([ 9.64, 24.28, 38.92, 53.56]) # Example 4: theta4 = np.array([-3, 1, 2, 3.5]) print(simple_predict(x, theta4)) # Output: # array([12.5, 32. , 51.5, 71. ])
def test_better(): x = np.arange(1, 6) # Example 1: theta1 = np.array([5, 1]) assert np.equal(simple_predict(x, theta1), np.array([6., 7., 8., 9., 10.])).all()
#!/usr/bin/env python3 from prediction import simple_predict import numpy as np x = np.arange(1, 6) #Example 1: theta1 = np.array([5, 0]) p = simple_predict(x, theta1) assert np.array_equal(p, [5., 5., 5., 5., 5.]) print(p) #Example 2: theta2 = np.array([0, 1]) p = simple_predict(x, theta2) assert np.array_equal(p, [1., 2., 3., 4., 5.]) print(p) #Example 3: theta3 = np.array([5, 3]) p = simple_predict(x, theta3) assert np.array_equal(p, [8., 11., 14., 17., 20.]) print(p) #Example 4: theta4 = np.array([-3, 1]) p = simple_predict(x, theta4) assert np.array_equal(p, [-2., -1., 0., 1., 2.]) print(p)