Пример #1
0
def dlib_find_max_global(f, bounds, **kwargs):
    varnames = f.__code__.co_varnames[:f.__code__.co_argcount]
    bound1_, bound2_ = [], []
    for varname in varnames:
        bound1_.append(bounds[varname][0])
        bound2_.append(bounds[varname][1])
    
    return dlib.find_max_global(f, bound1_, bound2_, **kwargs)
Пример #2
0
def dlib_find_max_global(f, bounds, int_vars=[], **kwargs):
    varnames = f.__code__.co_varnames[:f.__code__.co_argcount]
    bound1_, bound2_, int_vars_ = [], [], []
    for varname in varnames:
        bound1_.append(bounds[varname][0])
        bound2_.append(bounds[varname][1])
        int_vars_.append(1 if varname in int_vars else 0)
    
    return dlib.find_max_global(f, bound1=bound1_, bound2=bound2_, is_integer_variable=int_vars_, **kwargs)
Пример #3
0
def optimize():
    """Run optimization for depth multiplier, weight decay and dropout keep probability."""
    lower_bounds = [0.25, 0.00004, 0.5]
    upper_bounds = [1.0, 0.001, 1.0]
    result = dlib.find_max_global(hyperparameter_score, lower_bounds,
                                  upper_bounds, FLAGS.num_train_runs)
    print(
        'Finished optimization. Best hyperparameters found: \n'
        'depth_multiplier = %.3f, weight_decay = %.5f, dropout_keep_prob = %.3f, accuracy = %.2f%%'
        % (result[0][0], result[0][1], result[0][2], result[1] * 100))
Пример #4
0
def test_global_optimization_nargs():
    w0 = find_max_global(lambda *args: sum(args), [0, 0, 0], [1, 1, 1], 10)
    w1 = find_min_global(lambda *args: sum(args), [0, 0, 0], [1, 1, 1], 10)
    assert w0 == ([1, 1, 1], 3)
    assert w1 == ([0, 0, 0], 0)

    w2 = find_max_global(lambda a, b, c, *args: a + b + c - sum(args), [0, 0, 0], [1, 1, 1], 10)
    w3 = find_min_global(lambda a, b, c, *args: a + b + c - sum(args), [0, 0, 0], [1, 1, 1], 10)
    assert w2 == ([1, 1, 1], 3)
    assert w3 == ([0, 0, 0], 0)

    with raises(Exception):
        find_max_global(lambda a, b: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_min_global(lambda a, b: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_max_global(lambda a, b, c, d, *args: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_min_global(lambda a, b, c, d, *args: 0, [0, 0, 0], [1, 1, 1], 10)
Пример #5
0
def test_global_optimization_nargs():
    w0 = find_max_global(lambda *args: sum(args), [0, 0, 0], [1, 1, 1], 10)
    w1 = find_min_global(lambda *args: sum(args), [0, 0, 0], [1, 1, 1], 10)
    assert w0 == ([1, 1, 1], 3)
    assert w1 == ([0, 0, 0], 0)

    w2 = find_max_global(lambda a, b, c, *args: a + b + c - sum(args),
                         [0, 0, 0], [1, 1, 1], 10)
    w3 = find_min_global(lambda a, b, c, *args: a + b + c - sum(args),
                         [0, 0, 0], [1, 1, 1], 10)
    assert w2 == ([1, 1, 1], 3)
    assert w3 == ([0, 0, 0], 0)

    with raises(Exception):
        find_max_global(lambda a, b: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_min_global(lambda a, b: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_max_global(lambda a, b, c, d, *args: 0, [0, 0, 0], [1, 1, 1], 10)
    with raises(Exception):
        find_min_global(lambda a, b, c, d, *args: 0, [0, 0, 0], [1, 1, 1], 10)
Пример #6
0
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from math import sin, cos, pi, exp, sqrt


# Let's fine the maximizer of this horrible function:
def messy_holder_table(x, y):
    Z = abs(sin(x) * cos(y) * exp(abs(1 - sqrt(x * x + y * y) / pi)))
    R = max(9, abs(x) + abs(y))**5
    return 1e5 * Z / R


xy, z = dlib.find_max_global(
    messy_holder_table,
    [-15, -15],  # Lower bound constraints on x and y respectively
    [15, 15],  # Upper bound constraints on x and y respectively
    100
)  # The number of times find_min_global() will call messy_holder_table()

print("xy: ", xy)
print("z: ", z)
opt_z = messy_holder_table(-8.162150706931659, 0)
print("distance from optimal: ", opt_z - z)

# Now plot a 3D view of messy_holder_table() and also draw the point the optimizer located
X = np.arange(-15, 15, 0.1)
Y = np.arange(-15, 15, 0.1)
X, Y = np.meshgrid(X, Y)

from numpy import sin, cos, pi, exp, sqrt
Z = abs(sin(X) * cos(Y) * exp(abs(1 - sqrt(X * X + Y * Y) / pi)))
Пример #7
0
import dlib 

lbounds = [df['cement'].min(), df['slag'].min(), df['flyash'].min(), df['water'].min(), df['superplasticizer'].min(), df['coarseaggregate'].min(), 
           df['fineaggregate'].min(), df['age'].min()]
ubounds = [df['cement'].max(), df['slag'].max(), df['flyash'].max(), df['water'].max(), df['superplasticizer'].max(), df['coarseaggregate'].max(), 
           df['fineaggregate'].max(), df['age'].max()]
max_fun_calls = 1000

def maxlip_obj_fun(X1, X2, X3, X4, X5, X6, X7, X8):
    X = [[X1, X2, X3, X4, X5, X6, X7, X8]]
    results = model_full.predict(X)
    return results


sol, obj_val = dlib.find_max_global(maxlip_obj_fun, lbounds, ubounds, max_fun_calls)

print("MAXLIPO Results: ")
print('cement:', sol[0])
print('slag:', sol[1])
print('flyash:', sol[2])
print('water:', sol[3])
print('superplasticizer:', sol[4])
print('coarseaggregate:', sol[5])
print('fineaggregate:', sol[6])
print('age:', sol[7])


print("Max Strength: ", obj_val)

Пример #8
0
#   root folder and run:
#       python setup.py install
#
#   Compiling dlib should work on any operating system so long as you have
#   CMake and boost-python installed.  On Ubuntu, this can be done easily by
#   running the command:
#       sudo apt-get install libboost-python-dev cmake
#

import dlib
from math import sin, cos, pi, exp, sqrt


# This is a standard test function for these kinds of optimization problems.
# It has a bunch of local maxima, with the global maximum resulting in
# holder_table()==19.2085025679.
def holder_table(x0, x1):
    return abs(sin(x0) * cos(x1) * exp(abs(1 - sqrt(x0 * x0 + x1 * x1) / pi)))


# Find the optimal inputs to holder_table().  The print statements that follow
# show that find_max_global() finds the optimal settings to high precision.
x, y = dlib.find_max_global(
    holder_table,
    [-10, -10],  # Lower bound constraints on x0 and x1 respectively
    [10, 10],  # Upper bound constraints on x0 and x1 respectively
    80)  # The number of times find_max_global() will call holder_table()

print("optimal inputs: {}".format(x))
print("optimal output: {}".format(y))
Пример #9
0
        options)

    # You can do a lot here.  Run the detector through
    # dlib.threshold_filter_singular_values() for instance to make sure it
    # learns something that will work once thresholded. We can also add a
    # penalty for having a lot of filters.   Run this program a few times and
    # try out different ways of penalizing the return from test_params() and
    # see what happens.
    result = dlib.test_simple_object_detector(
        "images/small_face_dataset/cluster_001_test.xml", "detector1_.svm")
    print("C = {}, nuclear_norm = {}".format(C, nuclear_norm))
    print("testing accuracy: ", result)
    sys.stdout.flush()
    # For settings with the same average precision, we should prefer smaller C
    # since smaller C has better generalization.
    return result.average_precision - C * 1e-8


lower = [0.01, 0]
upper = [100, 10]

x, y = dlib.find_max_global(test_params,
                            bound1=lower,
                            bound2=upper,
                            num_function_calls=20)

print("optimal inputs: {}".format(x))
print("optimal output: {}".format(y))

test_params(x[0], x[1])