def run_example(verbose=True, **kwargs):
    """Run the example, aksing MOE for optimal hyperparameters given historical data."""
    covariance_info = gp_hyper_opt(
            points_sampled,
            **kwargs
            )

    if verbose:
        print covariance_info
Example #2
0
def run_example(num_to_sample=20, verbose=True, testapp=None, **kwargs):
    """Run the combined example."""
    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
Example #3
0
def run_example(verbose=True, **kwargs):
    """Run the example, aksing MOE for optimal hyperparameters given historical data."""
    covariance_info = gp_hyper_opt(points_sampled, **kwargs)

    if verbose:
        print(covariance_info)
Example #4
0
def run_example(num_to_sample=20, verbose=True, testapp=None, gp_next_points_kwargs=None, gp_hyper_opt_kwargs=None, gp_mean_var_kwargs=None, **kwargs):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    else:
        gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    else:
        gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    else:
        gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
Example #5
0
    print(sigma0.value)
    for k, temperature in enumerate(temperatures):
        observed = measurements[k]
        predicted = indexed_data.density.ix[(q0_val, sigma0_val, temperature)]
        tau = (observed * relative_error) ** -2.
        var = pymc.Normal("obs_%d" % k, mu=predicted, tau=tau, observed=True, value=observed)
        print(predicted, observed, tau, var.logp)
        variables.append(var)
    
    model = pymc.MCMC(variables)
    return model.logp

a, b = data[keys].iloc[0].values
logp = objective(a, b)

get_bounds = lambda variable: (variable.parents["lower"], variable.parents["upper"])

experiment_bounds = [get_bounds(q0), get_bounds(sigma0)]
exp = Experiment(experiment_bounds)

for (q0_val, sigma0_val) in data.set_index(keys).index:
    value = objective(q0_val, sigma0_val)
    print(q0_val, sigma0_val, value)
    error = 0.001
    exp.historical_data.append_sample_points([[(q0_val, sigma0_val), value, error]])


covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points())
next_point_to_sample = gp_next_points(exp, covariance_info=covariance_info)
print next_point_to_sample
Example #6
0
def run_example(
        num_to_sample=20,
        verbose=True,
        testapp=None,
        gp_next_points_kwargs=None,
        gp_hyper_opt_kwargs=None,
        gp_mean_var_kwargs=None,
        **kwargs
):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
Example #7
0
                          mu=predicted,
                          tau=tau,
                          observed=True,
                          value=observed)
        print(predicted, observed, tau, var.logp)
        variables.append(var)

    model = pymc.MCMC(variables)
    return model.logp


a, b = data[keys].iloc[0].values
logp = objective(a, b)

get_bounds = lambda variable: (variable.parents["lower"], variable.parents[
    "upper"])

experiment_bounds = [get_bounds(q0), get_bounds(sigma0)]
exp = Experiment(experiment_bounds)

for (q0_val, sigma0_val) in data.set_index(keys).index:
    value = objective(q0_val, sigma0_val)
    print(q0_val, sigma0_val, value)
    error = 0.001
    exp.historical_data.append_sample_points([[(q0_val, sigma0_val), value,
                                               error]])

covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points())
next_point_to_sample = gp_next_points(exp, covariance_info=covariance_info)
print next_point_to_sample