Exemple #1
0
def test_2():
    """This function tests that the BLACKBOX results are unaffected by the size of the batch."""
    d, box, n, m, batch, strategy = get_valid_request()

    rslt_base = None
    for batch in range(1, 5):
        rslt = search(rosen, box, n, m, batch, strategy)

        if rslt_base is None:
            rslt_base = rslt
        np.testing.assert_almost_equal(rslt, rslt_base)
Exemple #2
0
def test_3():
    """This function tests that the results line up with the original algorithm."""
    # Unfortunately, there will be small differences when using the FORTRAN implementation
    # of selected functions. However, this test always passes when the PYTHON versions are
    # used. I test all implementations against each other in other tests and as the interface
    # is identical, I am confident that the discrepancies here are only due to small numerical
    # differences in the PYTHON and FORTRAN calculations that accumulate if all are used at once.
    open(PYTHON_FNAME, 'a').close()

    d, box, n, m, batch, strategy = get_valid_request()

    np.random.seed(123)
    alg_original = bb_search(rosen, box, n, m, batch, resfile='output.csv')

    alg_revised = search(rosen, box, n, m, batch, strategy, legacy=True)

    np.testing.assert_almost_equal(alg_original, alg_revised)

    os.remove(PYTHON_FNAME)
Exemple #3
0
def test_5():
    """This test function ensures that the results are unaffected by using either the FORTRAN or
    PYTHON function."""
    d, box, n, m, batch, strategy = get_valid_request()

    rslt_base = None
    for is_python in [True, False]:

        if is_python:
            open(PYTHON_FNAME, 'a').close()

        rslt = search(rosen, box, n, m, batch, strategy)

        if rslt_base is None:
            rslt_base = rslt
        np.testing.assert_almost_equal(rslt, rslt_base)

        if os.path.exists(PYTHON_FNAME):
            os.remove(PYTHON_FNAME)
Exemple #4
0
def test_4():
    """This test function compares the output from the F2PY functions to their PYTHON
    counterparts."""
    for _ in range(100):

        d, _, n, _, _, _ = get_valid_request()

        points = np.random.uniform(size=(n, d + 1))
        points[:, 0:-1] = latin(n, d)

        mat, eval_points = np.identity(d), np.random.rand(d)
        lam, b, a = rbf(points, mat)
        k = np.random.choice(range(n))
        r = np.random.uniform()
        x = np.random.uniform(size=d)

        rslts_base = None
        for is_python in [True, False]:

            if is_python:
                open(PYTHON_FNAME, 'a').close()

            rslts = list()
            rslts.append(fit_full(lam, b, a, mat, points[:, 0:-1],
                                  eval_points))
            rslts.append(constraint_full(points, r, k, x))
            rslts.append(get_capital_phi(points[:, 0:-1], mat, n, d))
            rslts.append(spread(points[:, 0:-1], n, d))

            if os.path.exists(PYTHON_FNAME):
                os.remove(PYTHON_FNAME)

            if rslts_base is None:
                rslts_base = rslts

            for i, rslt in enumerate(rslts):
                np.testing.assert_almost_equal(rslt, rslts_base[i])

            if os.path.exists(PYTHON_FNAME):
                os.remove(PYTHON_FNAME)
Exemple #5
0
def test_8():
    """This test function compares the output from the F2PY functions to their PYTHON
    counterparts. These are only used in the FORTRAN codes and only exposed through F2PY for
    testing purposes."""
    for _ in range(100):

        d, _, n, _, _, _ = get_valid_request()

        points = np.random.uniform(size=(n, d + 1))
        points[:, 0:-1] = latin(n, d)

        mat, eval_points = np.identity(d), np.random.rand(d)
        lam, b, a = rbf(points, mat)

        # We test the derivative calculation of the criterion function.
        args = [lam, b, a, mat, points[:, :-1]]
        pyth = approx_fprime(eval_points,
                             partial(pyth_fit_full, *args),
                             epsilon=1e-6)
        fort = replacements_f2py.f2py_derivative_function(
            eval_points, *args + [n, d])
        np.testing.assert_almost_equal(fort, pyth)
Exemple #6
0
import os

from scipy.optimize import rosen
import numpy as np

from blackbox.tests.auxiliary import get_valid_request
from blackbox import search
import blackbox

is_creation = False

if is_creation:
    num_tests = 100
    tests = []
    for seed in range(num_tests):

        request = [rosen] + list(get_valid_request())[1:]
        rslt = search(*request)
        tests.append([request, rslt])
    pkl.dump(tests, open('regression_vault.blackbox.pkl', 'wb'))

# TODO: These are only working on heracles at the moment, portability is still an issue.
FNAME_VAULT = os.path.dirname(
    blackbox.__file__) + '/tests/material/regression_vault.blackbox.pkl'
for i, test in enumerate(pkl.load(open(FNAME_VAULT, 'rb'))):
    print(" running test " + str(i))
    request, rslt = test
    stat = search(*request)

    np.testing.assert_equal(rslt, stat)
Exemple #7
0
#     pyth = approx_fprime(x, fit_partial, epsilon=1e-6)
#     fort = replacements_f2py.f2py_derivative_function(x, lam, b, a, T, points[:, :-1], n, d)
#
# #    replacements_f2py.f2py_derivative_constraints(points[:, :-1], r, x, n, d)
#
#
#     np.testing.assert_almost_equal(fort, pyth)
#
# raise SystemExit('.. exit for now')

import scipy
np.random.seed(123)
for _ in range(0):
    print(_)

    d, _, n, _, _, _ = get_valid_request()
    points = np.random.uniform(size=(n, d + 1))
    points[:, 0:-1] = latin(n, d)

    #d = points.shape[1] - 1
    #n = points.shape[0]

    r = np.random.uniform() + 0.01
    mat, start = np.identity(d), np.random.rand(d)
    lam, b, a = rbf(points, mat)
    T = mat

    cons = list()
    for k in range(n):
        constraint = partial(constraint_full, points, r, k)
        cons.append(constraint)