def test_items_values_keys():
    def fun(input_dict):
        A = 0.
        B = 0.
        for i, (k, v) in enumerate(input_dict.items()):
            A = A + np.sum(np.sin(v)) * (i + 1.0)
            B = B + np.sum(np.cos(v))
        for v in input_dict.values():
            A = A + np.sum(np.sin(v))
        for k in input_dict.keys():
            A = A + np.sum(np.cos(input_dict[k]))
        return A + B

    def d_fun(input_dict):
        g = grad(fun)(input_dict)
        A = np.sum(g['item_1'])
        B = np.sum(np.sin(g['item_1']))
        C = np.sum(np.sin(g['item_2']))
        return A + B + C

    input_dict = {
        'item_1': npr.randn(5, 6),
        'item_2': npr.randn(4, 3),
        'item_X': npr.randn(2, 4)
    }

    check_grads(fun, input_dict)
    check_grads(d_fun, input_dict)
Example #2
0
def check_fun_and_grads(fun, args, kwargs, argnums):
    wrt_args = [args[i] for i in argnums]
    try:
        if isinstance(fun, primitive):
            check_equivalent(fun(*args, **kwargs),
                             fun.fun(*args, **kwargs))
    except:
        print("Value test failed! Args were", args, kwargs)
        raise

    with warnings.catch_warnings(record=True) as w:
        try:
            def scalar_fun(*new_args):
                full_args = list(args)
                for i, argnum in enumerate(argnums):
                    full_args[argnum] = new_args[i]
                return to_scalar(fun(*full_args, **kwargs))
            check_grads(scalar_fun, *wrt_args)
        except:
            print("First derivative test failed! Args were", args, kwargs)
            raise

        try:
            for i in range(len(argnums)):
                def d_scalar_fun(*args):
                    return to_scalar(grad(scalar_fun, argnum=i)(*args))
                check_grads(d_scalar_fun, *wrt_args)
        except:
            print("Second derivative test failed! Args were", args, kwargs)
            raise
Example #3
0
def test_meta_gradient_with_langevin():
    num_samples = 4
    num_langevin_steps = 3

    D = 2
    init_mean = npr.randn(D) * 0.01
    init_log_stddevs = np.log(1*np.ones(D)) + npr.randn(D) * 0.01
    init_log_stepsizes = np.log(0.01*np.ones(num_langevin_steps)) + npr.randn(num_langevin_steps) * 0.01
    init_log_noise_sizes = np.log(.001*np.ones(num_langevin_steps)) + npr.randn(num_langevin_steps) * 0.01
    init_log_gradient_scales = np.log(1*np.ones(D))
    init_gradient_power = 0.9

    sample_and_run_langevin, parser = build_langevin_sampler(logprob_two_moons, D, num_langevin_steps, approx=False)

    sampler_params = np.zeros(len(parser))
    parser.put(sampler_params, 'mean', init_mean)
    parser.put(sampler_params, 'log_stddev', init_log_stddevs)
    parser.put(sampler_params, 'log_stepsizes', init_log_stepsizes)
    parser.put(sampler_params, 'log_noise_sizes', init_log_noise_sizes)
    parser.put(sampler_params, 'log_gradient_scales', init_log_gradient_scales)
    parser.put(sampler_params, 'invsig_gradient_power', inv_sigmoid(init_gradient_power))

    def get_batch_marginal_likelihood_estimate(sampler_params):
        rs = np.random.npr.RandomState(0)
        samples, loglik_estimates, entropy_estimates = sample_and_run_langevin(sampler_params, rs, num_samples)
        marginal_likelihood_estimates = loglik_estimates + entropy_estimates
        return np.mean(marginal_likelihood_estimates)

    check_grads(get_batch_marginal_likelihood_estimate, sampler_params)
def test_nested_list():
    A = [[1.0], 2.0, 1.5]

    def fun(x):
        return x[1:][0]

    check_grads(fun, A)
Example #5
0
def check_fun_and_grads(fun, args, kwargs, argnums):
    wrt_args = [args[i] for i in argnums]
    try:
        if isinstance(fun, primitive):
            check_equivalent(fun(*args, **kwargs),
                             fun.fun(*args, **kwargs))
    except:
        print "Value test failed! Args were", args, kwargs
        raise

    try:
        def scalar_fun(*new_args):
            full_args = list(args)
            for i, argnum in enumerate(argnums):
                full_args[argnum] = new_args[i]
            return to_scalar(fun(*full_args, **kwargs))
        check_grads(scalar_fun, *wrt_args)
    except:
        print "First derivative test failed! Args were", args, kwargs
        raise

    try:
        for i in range(len(argnums)):
            def d_scalar_fun(*args):
                return to_scalar(grad(scalar_fun, argnum=i)(*args))
            check_grads(d_scalar_fun, *wrt_args)
    except:
        print "Second derivative test failed! Args were", args, kwargs
        raise
Example #6
0
def check_fun_and_grads(fun, args, kwargs, argnums):
    wrt_args = [args[i] for i in argnums]
    try:
        if isinstance(fun, primitive):
            wrapped   = fun(*args, **kwargs)
            unwrapped = fun.fun(*args, **kwargs)
            try:
                assert wrapped == unwrapped
            except:
                check_equivalent(wrapped, unwrapped)
    except:
        print("Value test failed! Args were", args, kwargs)
        raise

    with warnings.catch_warnings(record=True) as w:
        try:
            def scalar_fun(*new_args):
                full_args = list(args)
                for i, argnum in enumerate(argnums):
                    full_args[argnum] = new_args[i]
                return to_scalar(fun(*full_args, **kwargs))
            check_grads(scalar_fun, *wrt_args)
        except:
            print("First derivative test failed! Args were", args, kwargs)
            raise

        try:
            for i in range(len(argnums)):
                def d_scalar_fun(*args):
                    return to_scalar(grad(scalar_fun, argnum=i)(*args))
                check_grads(d_scalar_fun, *wrt_args)
        except:
            print("Second derivative test failed! Args were", args, kwargs)
            raise
def opt_traj(func, fdict, T, opt_method = 'SGD', init = None, \
    learning_rate = 0.1, seed = 100, momentum = False, noise_level = 0.0):
    # do optimization and return the trajectory
    params = {'x': 0.0, 'y': 0.0}
    domain = fdict['domain']
    optimum = fdict['optimum']
    loss_and_grad = value_and_grad(func)
    #quick_grad_check(func, params)   
    params = init_params(params, domain, init, seed)
    check_grads(func, params)
    opt_server = Parameter_Server(opt_method, momentum)
    opt_server.init_gradient_storage(params)
    
    x_traj = []
    y_traj = []
    f_traj = []
    
    print 'optimising function using %s...' % opt_method
    for t in xrange(T):
        (func_value, func_grad) = loss_and_grad(params)
        x_traj.append(params['x'])
        y_traj.append(params['y'])
        f_traj.append(func_value)
        func_grad = inject_noise(func_grad, noise_level)
        if opt_method == 'SGD':
            norm = np.sqrt(func_grad['x'] ** 2 + func_grad['y'] ** 2)
            if norm >= 2.0:
                func_grad['x'] /= norm / 2; func_grad['y'] /= norm / 2
        params = opt_server.update(params, func_grad, learning_rate)

    return np.array(x_traj), np.array(y_traj), np.array(f_traj)
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x,x)) + np.cos(np.dot(x,x))

    assert jacobian(fun)(npr.randn(3)).shape == (3,3,3)
    assert jacobian(jacobian(fun))(npr.randn(3)).shape == (3,3,3,3)
    # assert jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3,3,3,3,3)

    check_grads(lambda x: np.sum(np.sin(jacobian(fun)(x))), npr.randn(3))
    check_grads(lambda x: np.sum(np.sin(jacobian(jacobian(fun))(x))), npr.randn(3))
Example #9
0
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x, x)) + np.cos(np.dot(x, x))

    jacobian(fun)(npr.randn(3)).shape == (3, 3, 3)
    jacobian(jacobian(fun))(npr.randn(3)).shape == (3, 3, 3, 3)
    jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3, 3, 3, 3, 3)

    check_grads(lambda x: np.sum(jacobian(fun)(x)), npr.randn(3))
    check_grads(lambda x: np.sum(jacobian(jacobian(fun))(x)), npr.randn(3))
def test_gradient_csr_binary_dot_left():
    """
    Checks that the gradient is computed correctly.
    """
    def sum_csr_binary_dot_left(feats):
        result = csr_binary_dot_left(feats, rows, cols)
        return np.sum(result)

    # gradfunc = grad(sum_csr_binary_dot_left)
    check_grads(sum_csr_binary_dot_left, feats)
def test_gradient_csr_binary_dot_left():
    """
    Checks that the gradient is computed correctly.
    """
    def sum_csr_binary_dot_left(feats):
        result = csr_binary_dot_left(feats, rows, cols)
        return np.sum(result)

    # gradfunc = grad(sum_csr_binary_dot_left)
    check_grads(sum_csr_binary_dot_left, feats)
def test_grads():
    def fun(input_list):
        A = np.sum(np.sin(input_list[0]))
        B = np.sum(np.cos(input_list[1]))
        return A + B

    def d_fun(input_list):
        g = grad(fun)(input_list)
        A = np.sum(g[0])
        B = np.sum(np.sin(g[0]))
        C = np.sum(np.sin(g[1]))
        return A + B + C

    input_list = [npr.randn(5, 6), npr.randn(4, 3), npr.randn(2, 4)]

    check_grads(fun, input_list)
    check_grads(d_fun, input_list)
Example #13
0
def test_grads():
    def fun(input_dict):
        A = np.sum(np.sin(input_dict['item_1']))
        B = np.sum(np.cos(input_dict['item_2']))
        return A + B

    def d_fun(input_dict):
        g = grad(fun)(input_dict)
        A = np.sum(g['item_1'])
        B = np.sum(np.sin(g['item_1']))
        C = np.sum(np.sin(g['item_2']))
        return A + B + C

    input_dict = {'item_1' : npr.randn(5, 6),
                  'item_2' : npr.randn(4, 3),
                  'item_X' : npr.randn(2, 4)}

    check_grads(fun, input_dict)
    check_grads(d_fun, input_dict)
Example #14
0
def test_grads():
    def fun(input_list):
        A = np.sum(np.sin(input_list[0]))
        B = np.sum(np.cos(input_list[1]))
        return A + B

    def d_fun(input_list):
        g = grad(fun)(input_list)
        A = np.sum(g[0])
        B = np.sum(np.sin(g[0]))
        C = np.sum(np.sin(g[1]))
        return A + B + C

    input_list = [npr.randn(5, 6),
                  npr.randn(4, 3),
                  npr.randn(2, 4)]

    check_grads(fun, input_list)
    check_grads(d_fun, input_list)
Example #15
0
def test_iter():
    def fun(input_dict):
        for i, k in enumerate(sorted(input_dict)):
            A = np.sum(np.sin(input_dict[k])) * (i + 1.0)
            B = np.sum(np.cos(input_dict[k]))
        return A + B

    def d_fun(input_dict):
        g = grad(fun)(input_dict)
        A = np.sum(g['item_1'])
        B = np.sum(np.sin(g['item_1']))
        C = np.sum(np.sin(g['item_2']))
        return A + B + C

    input_dict = {'item_1' : npr.randn(5, 6),
                  'item_2' : npr.randn(4, 3),
                  'item_X' : npr.randn(2, 4)}

    check_grads(fun, input_dict)
    check_grads(d_fun, input_dict)
def test_gradient_csr_binary_dot_right():
    def sum_csr_binary_dot_right(feats):
        result = csr_binary_dot_right(feats, rows, cols)
        return np.sum(result)

    check_grads(sum_csr_binary_dot_right, feats)
def test_morg_net_gradient():
    loss, parser = morg_fp_func()
    weights = npr.randn(len(parser))
    check_grads(loss, weights)
Example #18
0
def check_symmetric_matrix_grads(fun, *args):
    symmetrize = lambda A: symm(np.tril(A))
    new_fun = lambda *args: fun(symmetrize(args[0]), *args[1:])
    return check_grads(new_fun, *args)
def check_conv_grads(conv_params):
    loss, parser = conv_fp_func(conv_params)
    weights = npr.randn(len(parser)) * 0.1
    check_grads(loss, weights)
def check_conv_grads(conv_params):
    loss, parser = conv_fp_func(conv_params)
    weights = npr.randn(len(parser)) * 0.1
    check_grads(loss, weights)
def test_gradient_csr_binary_dot_right():
    def sum_csr_binary_dot_right(feats):
        result = csr_binary_dot_right(feats, rows, cols)
        return np.sum(result)

    check_grads(sum_csr_binary_dot_right, feats)
def test_morg_net_gradient():
    loss, parser = morg_fp_func()
    weights = npr.randn(len(parser))
    check_grads(loss, weights)
def check_fun_and_grads(fun, args, kwargs, argnums, fwd=True):
    cover.load()
    cover.start()
    wrt_args = [args[i] for i in argnums]
    rand_vecs = [npr.randn(flatten(arg)[0].size) for arg in wrt_args]

    try:
        if isinstance(fun, primitive):
            wrapped   = fun(*args, **kwargs)
            unwrapped = fun.fun(*args, **kwargs)
            try:
                assert wrapped == unwrapped
            except:
                check_equivalent(wrapped, unwrapped)
    except:
        print("Value test failed! Args were", args, kwargs)
        raise

    with warnings.catch_warnings(record=True) as w:
        try:
            def scalar_fun(*new_args):
                full_args = list(args)
                for i, argnum in enumerate(argnums):
                    full_args[argnum] = new_args[i]
                return to_scalar(fun(*full_args, **kwargs))
            check_grads(scalar_fun, *wrt_args)
        except:
            print("First derivative test failed! Args were", args, kwargs)
            raise

        try:
            for i in range(len(argnums)):
                def d_scalar_fun(*args):
                    return to_scalar(grad(scalar_fun, argnum=i)(*args))
                check_grads(d_scalar_fun, *wrt_args)
        except:
            print("Second derivative test failed! Args were", args, kwargs)
            raise

        if fwd:
            try:
                def scalar_args_fun(*new_args):
                    full_args = list(args)
                    for i, argnum in enumerate(argnums):
                        wrt_flat, unflatten = flatten(wrt_args[i])
                        full_args[argnum] = unflatten(wrt_flat + new_args[i] * rand_vecs[i])
                    return to_scalar(fun(*full_args, **kwargs))
                check_forward_grads(scalar_args_fun, *anp.zeros(len(wrt_args)))
            except:
                print("First forward derivative test failed! Args were", args, kwargs)
                raise

            try:
                for i, _ in enumerate(argnums):
                    def d_scalar_args_fun(*args):
                        return forward_derivative(scalar_args_fun, argnum=i)(*args)
                    check_grads(d_scalar_args_fun, *anp.zeros(len(wrt_args)))
                    check_forward_grads(d_scalar_args_fun, *anp.zeros(len(wrt_args)))
                    check_forward_grads(grad(scalar_args_fun, argnum=i), *anp.zeros(len(wrt_args)))
            except:
                print("Second forward derivative test failed! Args were", args, kwargs)
                raise
        cover.stop()
        cover.save()