def run_example(verbose=True, testapp=None):
    """Run the example, finding the posterior mean and variance for various poinst from a random GP."""
    points_to_evaluate = [[x] for x in numpy.arange(0, 1, 0.05)]  # uniform grid of points
    mean, var = gp_mean_var(
            points_sampled,  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            )

    if verbose:
        # Print out the mean and variance of the GP at each point_to_evaluate
        for i, point in enumerate(points_to_evaluate):
            print "GP({0:s}) ~ N({1:.18E}, {2:.18E})".format(str(point), mean[i], var[i][i])
Example #2
0
def run_example(verbose=True, testapp=None, **kwargs):
    """Run the example, finding the posterior mean and variance for various poinst from a random GP."""
    points_to_evaluate = [[x] for x in numpy.arange(0, 1, 0.05)
                          ]  # uniform grid of points
    mean, var = gp_mean_var(
        points_sampled,  # Historical data to inform Gaussian Process
        points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
        testapp=testapp,
        **kwargs)

    if verbose:
        # Print out the mean and variance of the GP at each point_to_evaluate
        for i, point in enumerate(points_to_evaluate):
            print "GP({0:s}) ~ N({1:.18E}, {2:.18E})".format(
                str(point), mean[i], var[i][i])
Example #3
0
def run_example(num_to_sample=20, verbose=True, testapp=None, **kwargs):
    """Run the combined example."""
    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
Example #4
0
def run_example(num_to_sample=20, verbose=True, testapp=None, gp_next_points_kwargs=None, gp_hyper_opt_kwargs=None, gp_mean_var_kwargs=None, **kwargs):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    else:
        gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    else:
        gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    else:
        gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
Example #5
0
def run_example(
        num_to_sample=20,
        verbose=True,
        testapp=None,
        gp_next_points_kwargs=None,
        gp_hyper_opt_kwargs=None,
        gp_mean_var_kwargs=None,
        **kwargs
):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
def plot_sample_arms(active_arms, sample_arms, iteration_number):
    """Plot the underlying Gaussian Process, showing samples, GP mean/variance, and the true CTR.

    Plot is written to ``ab_plot_%.2d.pdf`` where ``%.2d`` is the two-digit ``iteration_number``.
    This makes it convenient to sequence the plots later for animation.

    The shows the current state of the Gaussian Process. Arms currently being sampled are highlighted
    with red Xs and previous arms are marked with blue Xs. The GP mean line is drawn and the variance
    region is shaded. The true CTR curve (that the GP approximates) is also shown.

    :param active_arms: list of coordinate-tuples corresponding to arms/cohorts currently being sampled
    :type active_arms: list of tuple
    :param sample_arms: all arms from prev and current cohorts, keyed by coordinate-tuples
      Arm refers specifically to a :class:`moe.bandit.data_containers.SampleArm`
    :type sample_arms: dict
    :param iteration_number: the index of the current iteration/round being tested in our experiment
    :type iteration_number: int >= 0

    """
    figure = plt.figure()
    ax = figure.add_subplot(111)

    experiment = moe_experiment_from_sample_arms(sample_arms)

    resolution = 0.01
    x_vals = [[x] for x in numpy.arange(0, 1, resolution)]  # uniform grid of points

    # Calculate the mean and variance of the underlying Gaussian Proccess
    mean, var = gp_mean_var(
            experiment.historical_data.to_list_of_sample_points(),
            x_vals,
            covariance_info=COVARIANCE_INFO,
            )
    mean = -numpy.array(mean)

    # Plot the mean line
    ax.plot(
            numpy.arange(0, 1, resolution),
            mean,
            'b--',
            )
    # Plot the variance
    var_diag = numpy.fabs(numpy.diag(var))
    ax.fill_between(
            numpy.arange(0, 1, resolution),
            mean - var_diag,
            mean + var_diag,
            facecolor='green',
            alpha=0.2,
            )

    # Plot the true, underlying CTR of the system wrt the parameter
    true_vals = numpy.array([true_click_through_rate(x) for x in x_vals]) / true_click_through_rate(STATUS_QUO_PARAMETER) - 1.0
    ax.plot(
            numpy.arange(0, 1, resolution),
            true_vals,
            'k--',
            alpha=0.5,
            )

    # Plot the observed CTR of the simulated system at sampled parameter values
    sample_points = experiment.historical_data.to_list_of_sample_points(),
    for sample_point in sample_points[0]:
        if tuple(sample_point.point) in active_arms:
            # New points are plotted as red x's
            fmt = 'rx'
        else:
            # Previously sampled points are plotted as blue x's
            fmt = 'bx'
        # These are simulated samples, include error bars
        ax.errorbar(
                sample_point.point,
                [-sample_point.value],
                yerr=sample_point.noise_variance,
                fmt=fmt,
                )

    plt.xlabel("Underlying Parameter")
    plt.ylabel("Relative CTR Gain vs Status Quo")
    plt.title("Relative CTR Gain and Gaussian Process")

    plt.savefig("ab_plot_%.2d.pdf" % iteration_number, bbox_inches=0)
def plot_sample_arms(active_arms, sample_arms, iteration_number):
    """Plot the underlying Gaussian Process, showing samples, GP mean/variance, and the true CTR.

    Plot is written to ``ab_plot_%.2d.pdf`` where ``%.2d`` is the two-digit ``iteration_number``.
    This makes it convenient to sequence the plots later for animation.

    The shows the current state of the Gaussian Process. Arms currently being sampled are highlighted
    with red Xs and previous arms are marked with blue Xs. The GP mean line is drawn and the variance
    region is shaded. The true CTR curve (that the GP approximates) is also shown.

    :param active_arms: list of coordinate-tuples corresponding to arms/cohorts currently being sampled
    :type active_arms: list of tuple
    :param sample_arms: all arms from prev and current cohorts, keyed by coordinate-tuples
      Arm refers specifically to a :class:`moe.bandit.data_containers.SampleArm`
    :type sample_arms: dict
    :param iteration_number: the index of the current iteration/round being tested in our experiment
    :type iteration_number: int >= 0

    """
    figure = plt.figure()
    ax = figure.add_subplot(111)

    experiment = moe_experiment_from_sample_arms(sample_arms)

    resolution = 0.01
    x_vals = [[x] for x in numpy.arange(0, 1, resolution)]  # uniform grid of points

    # Calculate the mean and variance of the underlying Gaussian Proccess
    mean, var = gp_mean_var(
            experiment.historical_data.to_list_of_sample_points(),
            x_vals,
            covariance_info=COVARIANCE_INFO,
            )
    mean = -numpy.array(mean)

    # Plot the mean line
    ax.plot(
            numpy.arange(0, 1, resolution),
            mean,
            'b--',
            )
    # Plot the variance
    var_diag = numpy.fabs(numpy.diag(var))
    ax.fill_between(
            numpy.arange(0, 1, resolution),
            mean - var_diag,
            mean + var_diag,
            facecolor='green',
            alpha=0.2,
            )

    # Plot the true, underlying CTR of the system wrt the parameter
    true_vals = numpy.array([true_click_through_rate(x) for x in x_vals]) / true_click_through_rate(STATUS_QUO_PARAMETER) - 1.0
    ax.plot(
            numpy.arange(0, 1, resolution),
            true_vals,
            'k--',
            alpha=0.5,
            )

    # Plot the observed CTR of the simulated system at sampled parameter values
    sample_points = experiment.historical_data.to_list_of_sample_points(),
    for sample_point in sample_points[0]:
        if tuple(sample_point.point) in active_arms:
            # New points are plotted as red x's
            fmt = 'rx'
        else:
            # Previously sampled points are plotted as blue x's
            fmt = 'bx'
        # These are simulated samples, include error bars
        ax.errorbar(
                sample_point.point,
                [-sample_point.value],
                yerr=sample_point.noise_variance,
                fmt=fmt,
                )

    plt.xlabel("Underlying Parameter")
    plt.ylabel("Relative CTR Gain vs Status Quo")
    plt.title("Relative CTR Gain and Gaussian Process")

    plt.savefig("ab_plot_%.2d.pdf" % iteration_number, bbox_inches=0)