def test_check_gradient_jacobian_vector_arg(): helpers.check_gradient( lambda x: numpy.array([x[0]**2 * x[1], 5 * x[0] + math.sin(x[1])]), lambda x: numpy.array([[2 * x[0] * x[1], x[0]**2], [5.0, math.cos(x[1])]]), f_arg_tensor=numpy.random.random(2), f_shape='jac')
def test_dsoftmax_matrix(): tensor_shape = [random.randint(2, 10) for _ in range(2)] helpers.check_gradient(lambda X: calculate.softmax(X), lambda X: calculate.dsoftmax(calculate.softmax(X)), f_arg_tensor=numpy.random.random(tensor_shape), f_shape='jac-stack')
def test_check_gradient_scalar_3tensor_arg(): helpers.check_gradient(lambda x: numpy.sum(x**2), lambda x: 2.0 * x, f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10), random.randint(1, 10))), f_shape='scalar')
def test_dlogit_matrix(): tensor_shape = [random.randint(1, 10) for _ in range(2)] helpers.check_gradient(lambda X: calculate.logit(X), lambda X: calculate.dlogit(X), f_arg_tensor=numpy.random.random(tensor_shape), f_shape='lin')
def test_check_gradient_jacobian(): helpers.check_gradient( lambda x: numpy.array([x[0]**2 * x[1], 5 * x[0] + math.sin(x[1])]), lambda x: numpy.array([[2 * x[0] * x[1], x[0]**2], [5.0, math.cos(x[1])]]), inputs=numpy.random.rand(2), f_shape='jac')
def test_check_gradient_jacobian_3tensor_arg(): helpers.check_gradient(lambda x: numpy.array( [numpy.sum(x**2), numpy.sum(x**3)]), lambda x: numpy.array([2 * x, 3 * x**2]), f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10), random.randint(1, 10))), f_shape='jac')
def check_error_gradient(error_func, tensor_d=1): tensor_shape = [random.randint(1, 10) for _ in range(tensor_d)] tensor_b = numpy.random.random(tensor_shape) helpers.check_gradient(lambda X: error_func(X, tensor_b), lambda X: error_func.derivative(X, tensor_b)[1], f_arg_tensor=numpy.random.random(tensor_shape), f_shape='scalar')
def check_error_gradient(error_func): vec_length = random.randint(1, 10) vec_b = numpy.random.random(vec_length) helpers.check_gradient( lambda X: error_func(X, vec_b), lambda X: error_func.derivative(X, vec_b)[1], inputs=numpy.random.random(vec_length), f_shape='scalar')
def test_check_gradient_jacobian_matrix_arg_matrix_out(): helpers.check_gradient( lambda x: numpy.array([[numpy.sum( x**2), numpy.sum(x**3)], [numpy.sum(x**4), numpy.sum(x**5)]]), lambda x: numpy.array([[2 * x, 3 * x**2], [4 * x**3, 5 * x**4]]), f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10))), f_shape='jac')
def test_check_gradient_jac_stack(): helpers.check_gradient(lambda x: numpy.hstack([ numpy.sum(x**2, axis=1, keepdims=True), numpy.sum(x**3, axis=1, keepdims=True) ]), lambda x: numpy.array([[2 * x[i], 3 * x[i]**2] for i in range(x.shape[0])]), f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10))), f_shape='jac-stack')
def test_check_gradient_lin_matrix_arg(): helpers.check_gradient(lambda x: x**2, lambda x: 2.0 * x, f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10))), f_shape='lin') helpers.check_gradient(lambda x: numpy.sqrt(x), lambda x: 1.0 / (2 * numpy.sqrt(x)), f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10))), f_shape='lin')
def test_output_correct_jacobian(): rows = random.randint(1, 10) cols = random.randint(1, 10) flat_model_target_vec = numpy.random.random(rows * cols) similarities = numpy.random.random(rows) target_vec = numpy.random.random(cols) helpers.check_gradient( lambda x: ill._output_correct_objective(x, similarities, target_vec), lambda x: ill._output_correct_jacobian(x, similarities, target_vec), f_arg_tensor=flat_model_target_vec, f_shape='scalar')
def _check_jacobian(make_model_func): attrs = random.randint(1, 10) outs = random.randint(1, 10) model = make_model_func(attrs, random.randint(1, 10), outs) inp_matrix, tar_matrix = datasets.get_random_regression(10, attrs, outs) # Test jacobian of error function f = lambda xk: mlp._mlp_obj(model, inp_matrix, tar_matrix, xk) df = lambda xk: mlp._mlp_obj_jac(model, inp_matrix, tar_matrix, xk)[1] helpers.check_gradient( f, df, inputs=mlp._flatten(model._weight_matrices), f_shape='scalar')
def test_check_gradient_jacobian_matrix_arg(): helpers.check_gradient( lambda x: numpy.array( [x[0][0]**2 + x[1][0]**2, x[0][1]**2 + x[1][1]**2]), lambda x: numpy.array([[[2 * x[0][0], 0], [2 * x[1][0], 0]], [[0, 2 * x[0][1]], [0, 2 * x[1][1]]]]), f_arg_tensor=numpy.random.random((2, 2)), f_shape='jac') helpers.check_gradient(lambda x: numpy.array( [numpy.sum(x**2), numpy.sum(x**3)]), lambda x: numpy.array([2 * x, 3 * x**2]), f_arg_tensor=numpy.random.random( (random.randint(1, 10), random.randint(1, 10))), f_shape='jac')
def _check_jacobian(make_model_func): attrs = random.randint(1, 10) outs = random.randint(1, 10) model = make_model_func(attrs, outs) inp_matrix, tar_matrix = datasets.get_random_regression(10, attrs, outs) # Test jacobian of error function f = lambda xk: model._get_obj(xk, inp_matrix, tar_matrix) df = lambda xk: model._get_obj_jac(xk, inp_matrix, tar_matrix)[1] helpers.check_gradient(f, df, inputs=model._weight_matrix.ravel(), f_shape='scalar')
def _check_jacobian(make_model_func): attrs = random.randint(1, 10) outs = random.randint(1, 10) model = make_model_func(attrs, random.randint(1, 10), outs) inp_matrix, tar_matrix = datasets.get_random_regression(random.randint(1, 10), attrs, outs) # Test jacobian of error function f = lambda xk: model._get_obj(xk, inp_matrix, tar_matrix) df = lambda xk: model._get_obj_jac(xk, inp_matrix, tar_matrix)[1] helpers.check_gradient( f, df, f_arg_tensor=mlp._flatten(model._bias_vec, model._weight_matrices), f_shape='scalar')
def test_check_gradient_lin(): helpers.check_gradient(lambda x: x**2, lambda x: 2 * x, f_shape='lin') helpers.check_gradient(lambda x: numpy.sqrt(x), lambda x: 1.0 / (2 * numpy.sqrt(x)), f_shape='lin')
def test_dgaussian(): helpers.check_gradient( calculate.gaussian, lambda x: calculate.dgaussian(x, calculate.gaussian(x)), f_shape='lin')
def test_L2Penalty_jacobian(): penalty_func = error.L2Penalty(penalty_weight=random.uniform(0.0, 2.0)) helpers.check_gradient(penalty_func, penalty_func.derivative)
def test_check_gradient_scalar(): helpers.check_gradient(lambda x: numpy.sum(x**2), lambda x: 2.0 * x, f_shape='scalar')
def test_dsoftmax(): helpers.check_gradient(calculate.softmax, lambda x: calculate.dsoftmax(calculate.softmax(x)), f_shape='jac')
def test_drelu(): helpers.check_gradient(calculate.relu, calculate.drelu, f_shape='lin')
def test_big_drelu(): helpers.check_gradient(calculate.relu, calculate.drelu, inputs=numpy.array([0., 1000.]), f_shape='lin')
def test_dtanh(): helpers.check_gradient(calculate.tanh, lambda x: calculate.dtanh(calculate.tanh(x)), f_shape='lin')
def test_dlogit(): helpers.check_gradient(calculate.logit, calculate.dlogit, f_shape='lin')