def test_linear_activation_forward4(self): A_prev = array([[-0.00172428, -0.00877858], [0.00042214, 0.00582815], [-0.01100619, 0.01144724]]) W = array([[0.00901591, 0.00502494, 0.00900856]]) b = array([[-0.00683728]]) A, cache = linear_activation_forward(A_prev, W, b, activation="relu") assert_allclose(A, array([[0, 0]]), rtol=0, atol=0.0001, equal_nan=False) assert_allclose(cache[0][0], A_prev) assert_allclose(cache[0][1], W) assert_allclose(cache[0][2], b) assert_allclose(cache[1], array([[-0.00695, -0.006784]]), rtol=0, atol=0.0001, equal_nan=False)
def test_linear_activation_forward6(self): A_prev = array([[0.00838983, 0.00931102], [0.00285587, 0.00885141], [-0.00754398, 0.01252868]]) W = array([[0.0051293, -0.00298093, 0.00488518]]) b = array([[-0.00075572]]) A, cache = linear_activation_forward(A_prev, W, b, activation="relu") assert_allclose(A, array([[0, 0]]), rtol=0, atol=0.0001, equal_nan=False) assert_allclose(cache[0][0], A_prev) assert_allclose(cache[0][1], W) assert_allclose(cache[0][2], b) assert_allclose(cache[1], array([[-0.00075, -0.00067]]), rtol=0, atol=0.0001, equal_nan=False)
def test_linear_activation_forward2(self): A_prev = array([[-0.41675785, -0.05626683], [-2.1361961, 1.64027081], [-1.79343559, -0.84174737]]) W = array([[0.50288142, -1.24528809, -1.05795222]]) b = array([[-0.90900761]]) A, cache = linear_activation_forward(A_prev, W, b, activation="relu") assert_allclose(A, array([[3.43896131, 0]]), rtol=0, atol=0.0001, equal_nan=False) assert_allclose(cache[0][0], A_prev) assert_allclose(cache[0][1], W) assert_allclose(cache[0][2], b) assert_allclose(cache[1], array([[3.43896131, -2.08938436]]), rtol=0, atol=0.0001, equal_nan=False)