コード例 #1
0
def do_xgb_MOE(num_points_to_sample, X_train, y_train, verbose=True, **kwargs):
    # Finding Best XGB parameters using MOE
    xgb_parameters = {}
    # Range of XGBoost parameters that are optimized
    exp_xgb = Experiment([[0.1, 1], [0.002, 1]], [0.01, 1])  # learning_rate_range = [0.1, 1]; n_estimators_range = [2, 1000] is normalized
                                                            # max_depth_range = [1, 100] is normalized

    n_folds = 10
    cv_folds = cross_validation.StratifiedKFold(y_train, n_folds=n_folds)

    best_point = []
    best_point_value = 0.
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp_xgb, rest_host='localhost', rest_port=6543, **kwargs)[0]  # By default we only ask for one point

        # Sample the point from objective function
        xgb_parameters['learning_rate'] = next_point_to_sample[0]
        xgb_parameters['n_estimators'] = int(round(next_point_to_sample[1]*1000))   
        xgb_parameters['max_depth'] = int(round(next_point_to_sample[2]*100))       
        acc_cv, prec_cv, rec_cv, cm_cv, cm_full_cv = xgboost_cross_validation(X_train, y_train, xgb_parameters, cv_folds)
        value_of_next_point = acc_cv
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP
        exp_xgb.historical_data.append_sample_points([SamplePoint(next_point_to_sample, -value_of_next_point, 0.0001)])  # We can add some noise
    best_point[1] = int(round(best_point[1] * 1000))
    best_point[2] = int(round(best_point[2] * 100))
    return best_point, best_point_value
def do_rfc_MOE(num_points_to_sample, X_train, y_train, verbose=True, **kwargs):
    exp_rfc = Experiment([[0.005, 1], [0.04, 1], [0.1, 1], [0.1, 1]])  # n_estimators_range = [5, 1000] and  max_features_range = [2, 24] are normalized 
                                                                       # max_depth_range = [1, 10] & min_samples_leaf_range = [1, 10] are normalized
    best_point = []
    best_point_value = 0.    
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp_rfc, rest_host='localhost', rest_port=6543, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from objective function
        n_estimators = int(round(next_point_to_sample[0] * 1000.0)) 
        max_features =  int(round(next_point_to_sample[1] * 50))  
        max_depth = int(round(next_point_to_sample[2] * 10))    
        min_samples_leaf = int(round(next_point_to_sample[3] * 10))  
        rfc = RandomForestClassifier(n_estimators=n_estimators, criterion='gini', 
            max_depth=max_depth, min_samples_split=2, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=0.0,            
            max_features=max_features, max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, 
            random_state=None, verbose=0, warm_start=False, class_weight=None)
        score_cv = cross_validation.cross_val_score(rfc, X_train, y_train, cv=10, scoring='accuracy')
        value_of next_point =  np.mean(score_cv) 
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample          
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP
        exp_rfc.historical_data.append_sample_points([SamplePoint(next_point_to_sample, -value_of_next_point, 0.0001)])  # We can add some noise
コード例 #3
0
def find_new_points_to_sample(experiment, num_points=1, verbose=False):
    """Find the optimal next point(s) to sample using expected improvement (via MOE).

    :param experiment: an Experiment object containing the historical data and metadata MOE
      needs to optimize
    :type experiment: :class:`moe.easy_interface.experiment.Experiment`
    :param num_points: number of new points (experiments) that we want MOE to suggest
    :type num_points: int >= 1
    :param verbose: whether to print status messages to stdout
    :type verbose: bool
    :return: the next point(s) to sample
    :rtype: list of length ``num_points`` of coordinates (list of length ``dim``)

    """
    if verbose:
        print("Getting {0} new suggested point(s) to sample from MOE...".format(num_points))

    # Query MOE for the next points to sample
    next_points_to_sample = gp_next_points(
            experiment,
            method_route_name=GP_NEXT_POINTS_KRIGING_ROUTE_NAME,
            covariance_info=COVARIANCE_INFO,
            num_to_sample=num_points,
            optimizer_info={
                'optimizer_type': GRADIENT_DESCENT_OPTIMIZER,
                },
            )

    if verbose:
        print("Optimal points to sample next: {0}".format(next_points_to_sample))

    return next_points_to_sample
コード例 #4
0
def find_fastest_policy(trials, alpha_0=10, beta_0=10, ex_cutoff=250, perf_thresh=0.93,
                        students=None, test_path=TEST_PATH, make_plot=True):
    """
    Find the best alpha/beta for the teaching policy.
    Args:
        trials: Number of teaching plans to try out (including first plan).
        alpha_0: Starting alpha.
        beta_0: Starting beta.
        ex_cutoff: Max number of examples to show.
        perf_thresh: The threshold of what is considered perfect.
        students: The students to teach, will default to STUDENTS if non given.
        test_path: The path of the file with test qs/answers.
        make_plot: Whether to make a scatter plot of the history.
    Returns: The best alpha/beta found.
    """
    if students is None:
        students = STUDENTS
    test_qs, test_ans = plan_eval.read_test(test_path)
    history = []
    eval_policy = _create_perf_evaluator(ex_cutoff, perf_thresh, students, test_qs, test_ans, history)

    experiment = Experiment([[0, ALPHA_MAX], [0, BETA_MAX]])
    # Run the start experiment and evaluate.
    experiment.historical_data.append_sample_points([eval_policy(alpha_0, beta_0)])
    for i in xrange(trials-1):
        print '--------TRIAL %d DONE--------' % (i + 1)
        alpha, beta = gp_next_points(experiment)[0]
        experiment.historical_data.append_sample_points([eval_policy(alpha, beta)])
    best = min(history)
    print len(history)
    print len(history)

    if make_plot:
        plot_history(min(history), history)
    return best
コード例 #5
0
def find_accurate_policy(trials, alpha_0=10, beta_0=10, num_exs=250, students=None,
                         test_path=TEST_PATH, make_plot=False):
    """
    Find the best alpha/beta for the teaching policy.
    Args:
        trials: Number of teaching plans to try out (including first plan).
        alpha_0: Starting alpha.
        beta_0: Starting beta.
        num_exs: The number of examples given to students per teaching plan.
        students: The students to teach, will default to STUDENTS if non given.
        test_path: The path of the file with test qs/answers.
        make_plot: Whether to make a scatter plot of the history.
    Returns: The best alpha/beta found.
    """
    if students is None:
        students = STUDENTS
    test_qs, test_ans = plan_eval.read_test(test_path)
    history = []
    eval_policy = _create_evaluator(num_exs, students, test_qs, test_ans, history)

    experiment = Experiment([[0, ALPHA_MAX], [0, BETA_MAX]])
    # Run the start experiment and evaluate.
    experiment.historical_data.append_sample_points([eval_policy(alpha_0, beta_0)])
    for i in xrange(trials-1):
        print '--------TRIAL %d DONE--------' % (i + 1)
        alpha, beta = gp_next_points(experiment)[0]
        experiment.historical_data.append_sample_points([eval_policy(alpha, beta)])
    best = max(history)
    if make_plot:
        plot_history(max(history), history)
    return best
コード例 #6
0
def run_example(num_points_to_sample=20, verbose=True, **kwargs):
    """Run the example, aksing MOE for ``num_points_to_sample`` optimal points to sample."""
    exp = Experiment([[0, 2],
                      [0,
                       4]])  # 2D experiment, we build a tensor product domain
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        SamplePoint(
            [0, 0], function_to_minimize([0, 0]), 0.05
        ),  # Iterables of the form [point, f_val, f_var] are also allowed
    ])

    # Sample num_points_to_sample points
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
            exp, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(
                str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points(
            [SamplePoint(next_point_to_sample, value_of_next_point,
                         0.01)])  # We can add some noise
コード例 #7
0
def find_new_points_to_sample(experiment, num_points=1, verbose=False):
    """Find the optimal next point(s) to sample using expected improvement (via MOE).

    :param experiment: an Experiment object containing the historical data and metadata MOE
      needs to optimize
    :type experiment: :class:`moe.easy_interface.experiment.Experiment`
    :param num_points: number of new points (experiments) that we want MOE to suggest
    :type num_points: int >= 1
    :param verbose: whether to print status messages to stdout
    :type verbose: bool
    :return: the next point(s) to sample
    :rtype: list of length ``num_points`` of coordinates (list of length ``dim``)

    """
    if verbose:
        print "Getting {0} new suggested point(s) to sample from MOE...".format(num_points)

    # Query MOE for the next points to sample
    next_points_to_sample = gp_next_points(
            experiment,
            method_route_name=GP_NEXT_POINTS_KRIGING_ROUTE_NAME,
            covariance_info=COVARIANCE_INFO,
            num_to_sample=num_points,
            optimizer_info={
                'optimizer_type': GRADIENT_DESCENT_OPTIMIZER,
                },
            )

    if verbose:
        print "Optimal points to sample next: {0}".format(next_points_to_sample)

    return next_points_to_sample
コード例 #8
0
def UnifiedBOExp(iter):
    #bounds = [ [-3,.5], [-3,.5], [0,1], [0,1], [0,1] ]

    #reduce dof for BO. Let's set hard threshold 0.95 and hard p(i) as 0.05
    bounds = [ [-3,.5], [-3,.5], [0,1] ]
    exp = Experiment(bounds)
    objs = []

    for c in range(iter):
        #get list of next params
        x = gp_next_points(exp, num_to_sample=1, covariance_info=moe_sq_exp)[0]
        #put into dict
        params = {'pg':x[0], 'ps':x[1], 'pi':0.05, 'pt':x[2], 'threshold':0.95}

        #setup trial
        student = Simulator(True)
        trial = UnifiedTrial(student, params)

        y = trial.run()
        exp.historical_data.append_sample_points([SamplePoint(x, y, NOISE_VAL)])
        objs.append(y)

        print "x is: "
        print x
        print "objective: "
        print y
        print "predicted best point: "
        #print moe_compute_best_pt_info(exp, moe_covariance_info)[0]
        print ["%0.2f" % i for i in moe_compute_best_pt_info(exp, moe_covariance_info)[0]]
コード例 #9
0
 def BO_sample(self):
     x = gp_next_points(self.exp)[0]
     y = self.model.evaluate_params_from_BO(x)
     np.set_printoptions(3)
     print np.array(x)
     print y
     self.exp.historical_data.append_sample_points([SamplePoint(x,y,0.001)])
     #print gp_hyper_opt(self.exp.historical_data.points_sampled)
     """pts = self.exp.historical_data.points_sampled
コード例 #10
0
    def _getNextParamFromMOE(self):

        while True:
            try:
                next_points_to_sample = simple_endpoint.gp_next_points(self.moe)[0]
                self._updateSettings(next_points_to_sample)
                break
            except:
                pass

            print "Error connecting to MOE.  Retrying..."
            time.sleep(2.0)
コード例 #11
0
    def _getNextParamFromMOE(self):

        while True:
            try:
                next_points_to_sample = simple_endpoint.gp_next_points(
                    self.moe)[0]
                self._updateSettings(next_points_to_sample)
                break
            except:
                pass

            print "Error connecting to MOE.  Retrying..."
            time.sleep(2.0)
コード例 #12
0
def run_example(num_points_to_sample=200, verbose=False, **kwargs):
	b = Branin()
	bounds = b.get_meta_information()['bounds']
	dimensions = len(bounds)
	lower =np.array([i[0] for i in bounds])
	upper =np.array([i[1] for i in bounds])
	start_point = (upper-lower)/2
	exp = Experiment([lower,upper])
	exp.historical_data.append_sample_points([
        SamplePoint(start_point, wrapper(start_point,b), 0.6)])
	for _ in range(num_points_to_sample):
		next_point_to_sample = gp_next_points(exp, **kwargs)[0]
		value_of_next_point = wrapper(next_point_to_sample,b)
		if verbose:
			print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
		exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.6)])
def do_xgb_train_MOE(num_points_to_sample,
                     X_train,
                     y_train,
                     verbose=True,
                     **kwargs):
    # Finding Best XGB parameters using MOE
    xgb_parameters = {}
    xgb_parameters['objective'] = 'multi:softmax'
    xgb_parameters['silent'] = 1
    xgb_parameters['nthread'] = 4
    xgb_parameters['num_class'] = 6
    # Range of XGBoost parameters that are optimized
    exp_xgb = Experiment([
        [0.1, 1], [0.02, 1]
    ])  # eta_range = [0.1, 1]; max_depth_range = [2, 100] but it is normalized

    num_round = 5
    n_folds = 10
    cv_folds = cross_validation.StratifiedKFold(y_train, n_folds=n_folds)

    best_point = []
    best_point_value = 0.
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
            exp_xgb, rest_host='localhost', rest_port=6543,
            **kwargs)[0]  # By default we only ask for one point

        # Sample the point from objective function
        xgb_parameters['eta'] = next_point_to_sample[0]
        xgb_parameters['max_depth'] = int(round(next_point_to_sample[1] * 100))
        acc_cv, prec_cv, rec_cv, cm_cv, cm_full_cv = xgboost_train_cross_validation(
            X_train, y_train, xgb_parameters, num_round, cv_folds)
        value_of_next_point = acc_cv
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(
                str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP
        exp_xgb.historical_data.append_sample_points(
            [SamplePoint(next_point_to_sample, -value_of_next_point,
                         0.0001)])  # We can add some noise
    best_point[1] = int(round(best_point[1] * 100))
    return best_point, best_point_value
コード例 #14
0
 def search(self, num_hyperparameter_trials):
     for trial in range(num_hyperparameter_trials):
         # Select next hyperparameters with MOE, rounding hyperparameters that are integers
         raw_hyperparameters = gp_next_points(self.experiment)[0]
         hyperparameters = {name: int(round(value)) if isinstance(bounds[0], int) else value
                            for (name, bounds), value in
                            zip(self.grid.items(), raw_hyperparameters)}
         # Try these hyperparameters
         model = self.model_class(**ChainMap(hyperparameters, self.fixed_hyperparameters))
         model.train(self.X_train, self.y_train, validation_data=self.validation_data)
         score = model.score(self.metric, *self.validation_data)
         # Record hyperparameters and validation loss
         self.experiment.historical_data.append_sample_points(
             [SamplePoint(point=hyperparameters.values(), value=score)])
         # If these hyperparameters were the best so far, store this model
         if self.maximize == (score > self.best_score):
             self.best_score = score
             self.best_model = model
             self.best_hyperparameters = hyperparameters
コード例 #15
0
ファイル: TreeNode.py プロジェクト: Recmo/Codecup-2015-Ayu
def itterate():
	print str(datetime.datetime.now())
	
	# Use MOE to determine what is the point with highest Expected Improvement to use next
	next_point_to_sample = gp_next_points(exp)[0]
	print next_point_to_sample
	# By default we only ask for one point
	
	# Sample the point from our objective function, we can replace this with any function
	value_of_next_point = function_to_minimize(next_point_to_sample)
	
	print next_point_to_sample, value_of_next_point
	
	# Store the sample
	with open('TreeNode.csv', 'a') as csvfile:
		csvfile.write(", ".join([str(k)for k in next_point_to_sample + [value_of_next_point]] ) +"\n")
	
	# Add the information about the point to the experiment historical data to inform the GP
	exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, offset + value_of_next_point * scale, variance)])
コード例 #16
0
ファイル: combined_example.py プロジェクト: yonromai/MOE
def run_example(num_to_sample=20, verbose=True, testapp=None, **kwargs):
    """Run the combined example."""
    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
コード例 #17
0
def run_example(num_points_to_sample=20, verbose=True, **kwargs):
    """Run the example, aksing MOE for ``num_points_to_sample`` optimal points to sample."""
    exp = Experiment([[0, 2], [0, 4]])  # 2D experiment, we build a tensor product domain
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        SamplePoint([0, 0], function_to_minimize([0, 0]), 0.05),  # Iterables of the form [point, f_val, f_var] are also allowed
        ])

    # Sample num_points_to_sample points
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)])  # We can add some noise
def do_svc_linear_MOE(num_points_to_sample, X_train, y_train, verbose=True, **kwargs):
    exp_svc_linear = Experiment([[1.0000e-05, 1.0]])  # C_range = [0.1, 10000] is divided to be in [0.1, 1] range
    best_point = []
    best_point_value = 0.
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with hnighest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp_svc_linear, rest_host='localhost', rest_port=6543, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from objective function
        C = next_point_to_sample[0] * 10000.0
        svc_linear = svm.LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=0.0001, C=C, multi_class='ovr',
            fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000)
        score_cv = cross_validation.cross_val_score(svc_linear, X_train, y_train, cv=10, scoring='accuracy')
        value_of_next_point = np.mean(score_cv) 
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP; 
        # - infront of value_of_next_point is due to fact that moe minimize and max accuracy is of interest in HAR classification
        exp_svc_linear.historical_data.append_sample_points([SamplePoint(next_point_to_sample, -value_of_next_point, .000001)])  # We can add some noise
def do_svc_rbf_MOE(num_points_to_sample, X_train, y_train, verbose=True, **kwargs):
    exp_svc_rbf = Experiment([[1.0000e-05, 1], [1.0000e-08, 1]])  # C_range = [0.1, 10000] is divided to be in [0.1, 1] range
    best_point = []
    best_point_value = 0.
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp_svc_rbf, rest_host='localhost', rest_port=6543, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from objective function
        C = next_point_to_sample[0] * 10000.0   
        gamma =   next_point_to_sample[1]  
        svc_rbf = svm.SVC(C=C, kernel='rbf', degree=3, gamma=gamma, coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200,
                 class_weight=None, verbose=False, max_iter=-1, random_state=None)
        score_cv = cross_validation.cross_val_score(svc_rbf, X_train, y_train, cv=10, scoring='accuracy')
        value_of_next_point = np.mean(score_cv)
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP
        exp_svc_rbf.historical_data.append_sample_points([SamplePoint(next_point_to_sample, -value_of_next_point, 0.0001)])  # We can add some noise
def do_abc_MOE(num_points_to_sample, X_train, y_train, verbose=True, **kwargs):
    exp_abc = Experiment([[0.005, 1], [0.1, 1]])  # n_estimators_range = [5, 1000] is normalized 
    best_point = []
    best_point_value = 0.    
    for _ in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(exp_abc, rest_host='localhost', rest_port=6543, **kwargs)[0]  # By default we only ask for one point
        # Sample the point from objective function
        n_estimators = int(round(next_point_to_sample[0] * 1000.0))  
        learning_rate =  next_point_to_sample[1]   
        abc = AdaBoostClassifier((DecisionTreeClassifier(max_depth=2)),n_estimators=n_estimators, learning_rate=learning_rate)
        score_cv = cross_validation.cross_val_score(abc, X_train, y_train, cv=10, scoring='accuracy')
        value_of next_point =  np.mean(score_cv) 
        if value_of_next_point > best_point_value:
            best_point_value = value_of_next_point
            best_point = next_point_to_sample          
        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)
        # Add the information about the point to the experiment historical data to inform the GP
        exp_abc.historical_data.append_sample_points([SamplePoint(next_point_to_sample, -value_of_next_point, 0.0001)])  # We can add some noise
    best_point[0] = int(round(best_point[0] * 1000))        
    return best_point, best_point_value
コード例 #21
0
def itterate():
    print str(datetime.datetime.now())

    # Use MOE to determine what is the point with highest Expected Improvement to use next
    next_point_to_sample = gp_next_points(exp)[0]
    print next_point_to_sample
    # By default we only ask for one point
    x = [(n - argoffset) / argscale for n in next_point_to_sample]

    # Sample the point from our objective function, we can replace this with any function
    value_of_next_point = function_to_minimize(next_point_to_sample)

    print x, value_of_next_point

    # Store the sample
    with open('moveheuristic.csv', 'a') as csvfile:
        csvfile.write(", ".join([str(k)
                                 for k in x + [value_of_next_point]]) + "\n")

    # Add the information about the point to the experiment historical data to inform the GP
    exp.historical_data.append_sample_points([
        SamplePoint(next_point_to_sample, offset + value_of_next_point * scale,
                    variance)
    ])
コード例 #22
0
ファイル: perov_moe.py プロジェクト: HGeerlings/eratosthenes
def run_example(num_points_to_sample=1000, verbose=True, **kwargs):
    """Run the example, asking MOE for ``num_points_to_sample`` optimal points to sample."""

    exp = Experiment([[1, 52], [0, 6], [1, 52]])  # 2D experiment, we build a tensor product domain
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        SamplePoint([26, 2, 46], get_fitness([26, 2, 35]), 0.5),  # Iterables of the form [point, f_val, f_var] are also allowed
        ])
    # Sample num_points_to_sample points
    for i in range(num_points_to_sample):
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = map(round, gp_next_points(exp, **kwargs)[0])  # in [A, X, B] form, rounded integers
        value_of_next_point = get_fitness(next_point_to_sample)

        if verbose:
            if in_results(next_point_to_sample):
                print '***', "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point), '***'
            else:
                print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        bank[i,0:3] = next_point_to_sample
        bank[i,3]  = value_of_next_point
        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)])  # We can add some noise
コード例 #23
0
 def get_next_hyperparameters(self):
     try:
         return gp_next_points(self.experiment)[0]
     except BadStatusLine:
         raise RuntimeError('MOE server is not running!')
コード例 #24
0
ファイル: combined_example.py プロジェクト: thurday/MOE
def run_example(num_to_sample=20, verbose=True, testapp=None, gp_next_points_kwargs=None, gp_hyper_opt_kwargs=None, gp_mean_var_kwargs=None, **kwargs):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    else:
        gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    else:
        gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    else:
        gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
コード例 #25
0
ファイル: test_moe.py プロジェクト: kyleabeauchamp/DBayes
    print(sigma0.value)
    for k, temperature in enumerate(temperatures):
        observed = measurements[k]
        predicted = indexed_data.density.ix[(q0_val, sigma0_val, temperature)]
        tau = (observed * relative_error) ** -2.
        var = pymc.Normal("obs_%d" % k, mu=predicted, tau=tau, observed=True, value=observed)
        print(predicted, observed, tau, var.logp)
        variables.append(var)
    
    model = pymc.MCMC(variables)
    return model.logp

a, b = data[keys].iloc[0].values
logp = objective(a, b)

get_bounds = lambda variable: (variable.parents["lower"], variable.parents["upper"])

experiment_bounds = [get_bounds(q0), get_bounds(sigma0)]
exp = Experiment(experiment_bounds)

for (q0_val, sigma0_val) in data.set_index(keys).index:
    value = objective(q0_val, sigma0_val)
    print(q0_val, sigma0_val, value)
    error = 0.001
    exp.historical_data.append_sample_points([[(q0_val, sigma0_val), value, error]])


covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points())
next_point_to_sample = gp_next_points(exp, covariance_info=covariance_info)
print next_point_to_sample
コード例 #26
0
ファイル: combined_example.py プロジェクト: jdc08161063/qKG
def run_example(
        num_to_sample=20,
        verbose=True,
        testapp=None,
        gp_next_points_kwargs=None,
        gp_hyper_opt_kwargs=None,
        gp_mean_var_kwargs=None,
        **kwargs
):
    """Run the combined example.

    :param num_to_sample: Number of points for MOE to suggest and then sample [20]
    :type num_to_sample: int > 0
    :param verbose: Whether to print information to the screen [True]
    :type verbose: bool
    :param testapp: Whether to use a supplied test pyramid application or a rest server [None]
    :type testapp: Pyramid test application
    :param gp_next_points_kwargs: Optional kwargs to pass to gp_next_points endpoint
    :type gp_next_points_kwargs: dict
    :param gp_hyper_opt_kwargs: Optional kwargs to pass to gp_hyper_opt_kwargs endpoint
    :type gp_hyper_opt_kwargs: dict
    :param gp_mean_var_kwargs: Optional kwargs to pass to gp_mean_var_kwargs endpoint
    :type gp_mean_var_kwargs: dict
    :param kwargs: Optional kwargs to pass to all endpoints
    :type kwargs: dict

    """
    # Set and combine all optional kwargs
    # Note that the more specific kwargs take precedence (and will override general kwargs)
    if gp_next_points_kwargs is None:
        gp_next_points_kwargs = {}
    gp_next_points_kwargs = dict(kwargs.items() + gp_next_points_kwargs.items())

    if gp_hyper_opt_kwargs is None:
        gp_hyper_opt_kwargs = {}
    gp_hyper_opt_kwargs = dict(kwargs.items() + gp_hyper_opt_kwargs.items())

    if gp_mean_var_kwargs is None:
        gp_mean_var_kwargs = {}
    gp_mean_var_kwargs = dict(kwargs.items() + gp_mean_var_kwargs.items())

    exp = Experiment([[0, 2], [0, 4]])
    # Bootstrap with some known or already sampled point(s)
    exp.historical_data.append_sample_points([
        [[0, 0], function_to_minimize([0, 0]), 0.01],  # sampled points have the form [point_as_a_list, objective_function_value, value_variance]
        ])

    # Sample points
    for i in range(num_to_sample):
        covariance_info = {}
        if i > 0 and i % 5 == 0:
            covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points(), testapp=testapp, **gp_hyper_opt_kwargs)

            if verbose:
                print "Updated covariance_info with {0:s}".format(str(covariance_info))
        # Use MOE to determine what is the point with highest Expected Improvement to use next
        next_point_to_sample = gp_next_points(
                exp,
                covariance_info=covariance_info,
                testapp=testapp,
                **gp_next_points_kwargs
                )[0]  # By default we only ask for one point
        # Sample the point from our objective function, we can replace this with any function
        value_of_next_point = function_to_minimize(next_point_to_sample)

        if verbose:
            print "Sampled f({0:s}) = {1:.18E}".format(str(next_point_to_sample), value_of_next_point)

        # Add the information about the point to the experiment historical data to inform the GP
        exp.historical_data.append_sample_points([[next_point_to_sample, value_of_next_point, 0.01]])  # We can add some noise

    points_to_evaluate = [[x, x] for x in numpy.arange(0, 1, 0.1)]  # uniform grid of points
    mean, var = gp_mean_var(
            exp.historical_data.to_list_of_sample_points(),  # Historical data to inform Gaussian Process
            points_to_evaluate,  # We will calculate the mean and variance of the GP at these points
            testapp=testapp,
            **gp_mean_var_kwargs
            )

    if verbose:
        print "GP mean at (0, 0), (0.1, 0.1), ...: {0:s}".format(str(mean))
コード例 #27
0
                          mu=predicted,
                          tau=tau,
                          observed=True,
                          value=observed)
        print(predicted, observed, tau, var.logp)
        variables.append(var)

    model = pymc.MCMC(variables)
    return model.logp


a, b = data[keys].iloc[0].values
logp = objective(a, b)

get_bounds = lambda variable: (variable.parents["lower"], variable.parents[
    "upper"])

experiment_bounds = [get_bounds(q0), get_bounds(sigma0)]
exp = Experiment(experiment_bounds)

for (q0_val, sigma0_val) in data.set_index(keys).index:
    value = objective(q0_val, sigma0_val)
    print(q0_val, sigma0_val, value)
    error = 0.001
    exp.historical_data.append_sample_points([[(q0_val, sigma0_val), value,
                                               error]])

covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points())
next_point_to_sample = gp_next_points(exp, covariance_info=covariance_info)
print next_point_to_sample
コード例 #28
0
ファイル: moetest.py プロジェクト: RWArunde/ideal-guacamole
from moe.easy_interface.experiment import Experiment
from moe.easy_interface.simple_endpoint import gp_next_points
from moe.optimal_learning.python.data_containers import SamplePoint
import math

def f(v,w,x,y,z):
        return (x+y)**2 + 4*x -2*y + math.sin(v) + 0.3 * math.cos(z) + (w-3)**2

exp = Experiment([[-10,10],[-10,10],[-10,10],[-10,10],[-10,10]])

for c in range(100):
    try:
        x = gp_next_points(exp)
    except:
        print "500"
        continue
    y = f(x[0][0], x[0][1], x[0][2], x[0][3], x[0][4])
    print x, y
    exp.historical_data.append_sample_points([SamplePoint(x,y,0.001)])