def test_kmeans_grid_search_over_validation_datasets(self): """ test_kmeans_grid_search_over_validation_datasets performs the following: a. build H2O kmeans models using grid search. b. For each model built using grid search, print out the total_sum_squares errors. c. If an exception was thrown, mark the test as failed. """ print("*******************************************************************************************") print("test_kmeans_grid_search_over_validation_datasets for kmeans ") h2o.cluster_info() print("Hyper-parameters used here is {0}".format(self.hyper_params)) # try: # start grid search grid_model = H2OGridSearch(H2OKMeansEstimator(), hyper_params=self.hyper_params) grid_model.train(x=self.x_indices, training_frame=self.training1_data) for each_model in grid_model: summary_list = each_model._model_json["output"]["validation_metrics"] if (summary_list is not None) and (summary_list._metric_json is not None): grid_model_metrics = summary_list._metric_json['totss'] print("total sum of squares of a model is: {0}".format(grid_model_metrics)) else: print('model._model_json["output"]["validation_metrics"] of a model is None for some reason....')
def test_kmeans_fields(self): """ test_kmeans_grid_search_over_validation_datasets performs the following: a. build H2O kmeans models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. b. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O kmeans model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. c. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") h2o.cluster_info() good_params_list = {'init': 'Furthest', 'seed': 1464887902, 'max_iterations': 10, 'k': 10} good_model_params = {'max_runtime_secs': 0.04326415543999999} good_model = H2OKMeansEstimator(**good_params_list) good_model.train(x=self.x_indices, training_frame=self.training1_data, **good_model_params) bad_params_list = {'init': 'Random', 'seed': 1464888628, 'k': 6, 'max_iterations': 0} bad_model_params = {'max_runtime_secs': 0.007948218600000001} bad_model = H2OKMeansEstimator(**bad_params_list) bad_model.train(x=self.x_indices, training_frame=self.training1_data, **bad_model_params) print("good_model._model_json['output']['model_summary'] type is {0}. " "bad_model._model_json['output']['model_summary'] type is " "{1}".format(type(good_model._model_json['output']['model_summary']), type(bad_model._model_json['output']['model_summary']))) print("They are not equal for some reason....")
def test_deeplearning_fieldnames(self): """ test_deeplearning_fieldnames performs the following: a. build a deeplearning model with good parameters in good_params_list and good_model_params; b. build a deeplearning model with bad parameters in bad_params_list and bad_model_params; c. look at the length of the field in _model_json['output']['model_summary'].cell_values d. print out the two fields. """ print("*******************************************************************************************") h2o.cluster_info() good_params_list = {'epochs': 10.0, 'seed': 1464835583, 'nfolds': 5, 'hidden_dropout_ratios': -0.07120188, 'fold_assignment': 'AUTO', 'hidden': 6, 'distribution': 'gaussian'} good_model_params = {'max_runtime_secs': 108.65307012692} good_model = H2ODeepLearningEstimator(**good_params_list) good_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **good_model_params) bad_params_list = {'hidden': 6, 'epochs': -2.0, 'seed': 1464825861, 'fold_assignment': 'AUTO', 'hidden_dropout_ratios': -0.07120188, 'nfolds': 5, 'distribution': 'gaussian'} bad_model_params = {'max_runtime_secs': 98.58063693984} bad_model = H2ODeepLearningEstimator(**bad_params_list) bad_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **bad_model_params) print("good_model._model_json['output']['scoring_history'].cell_values length is {0}. " "bad_model._model_json['output']['scoring_history'].cell_values length is " "{1}".format(len(good_model._model_json['output']['scoring_history'].cell_values), len(bad_model._model_json['output']['scoring_history'].cell_values))) print("They are not equal for some reason....") print("Good model cell values is:\n {0}\n Bad model cell values is:\n " "{1}\n".format(good_model._model_json['output']['scoring_history'].cell_values, bad_model._model_json['output']['scoring_history'].cell_values))
def test2_illegal_name_value(self): """ This function will make sure that only valid parameter names and values should be included in the hyper-parameter dict for grid search. It will randomly go into the hyper_parameters that we have specified, either change the hyper_parameter name or insert an illegal value into the hyper_parameter list, check to make sure that the test failed with error messages. The following error conditions will be created depending on the error_number generated: error_number = 0: randomly alter the name of a hyper-parameter name; error_number = 1: randomly choose a hyper-parameter and remove all elements in its list error_number = 2: add randomly generated new hyper-parameter names with random list error_number other: randomly choose a hyper-parameter and insert an illegal type into it :return: None """ print("*******************************************************************************************") print("test2_illegal_name_value for GLM " + self.family) h2o.cluster_info() error_number = np.random.random_integers(0, 3, 1) # randomly choose an error error_hyper_params = \ pyunit_utils.insert_error_grid_search(self.hyper_params, self.gridable_parameters, self.gridable_types, error_number[0]) print("test2_illegal_name_value: the bad hyper-parameters are: ") print(error_hyper_params) # copied from Eric to catch execution run errors and not quit try: grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=error_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) if error_number[0] > 2: # grid search should not failed in this case and build same number of models as test1. if not (len(grid_model) == self.correct_model_number): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_illegal_name_value failed. Number of model generated is " "incorrect.") else: print("test2_illegal_name_value passed.") else: # other errors should cause exceptions being thrown and if not, something is wrong. self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_illegal_name_value failed: exception should have been thrown for illegal" "parameter name or empty hyper-parameter parameter list but did not!") except: print("test2_illegal_name_value passed: exception is thrown for illegal parameter name or empty" "hyper-parameter parameter list.") self.test_num += 1
def test_rf_gridsearch_sorting_metrics(self): """ test_rf_gridsearch_sorting_metrics performs the following: b. build H2O random forest models using grid search. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. Check and make sure that the models are returned sorted with the correct cross-validation metrics. """ print("*******************************************************************************************") print("test_rf_gridsearch_sorting_metrics for random forest ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2ORandomForestEstimator(nfolds=self.nfolds, seed=self.seed, score_tree_interval=0), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) result_table = grid_model._grid_json["summary_table"] stopping_metric_index = result_table.col_header.index(self.training_metric) model_index = 0 grid_model_metrics = [] diff = 0 # calculate difference between gridsearch model metrics and manually extracted model. diff_train = 0 # calculate difference between training and cross-validation metrics # grab performance metric for each model of grid_model and collect correct sorting metrics by hand for each_model in grid_model: grid_model_metric = result_table.cell_values[model_index][stopping_metric_index] grid_model_metrics.append(grid_model_metric) manual_metric = each_model._model_json["output"]["cross_validation_metrics"]._metric_json["logloss"] diff += abs(grid_model_metric - manual_metric) manual_training_metric = each_model._model_json["output"]["training_metrics"]._metric_json["logloss"] diff_train += abs(grid_model_metric-manual_training_metric) print("grid model logloss: {0}, grid model training logloss: " "{1}".format(grid_model_metric, manual_training_metric)) model_index += 1 if (diff > self.diff) or not(grid_model_metrics == sorted(grid_model_metrics)) or (diff_train < self.diff): self.test_failed = 1 print("test_rf_gridsearch_sorting_metrics for random forest has failed!") if self.test_failed == 0: print("test_rf_gridsearch_sorting_metrics for random forest has passed!") except: if self.possible_number_models > 0: print("test_rf_gridsearch_sorting_metrics for random forest failed: exception was thrown for " "no reason.") self.test_failed += 1
def test1_glm_grid_search_over_params(self): """ This test is used to exercise the gridsearch and exercise all its parameters that are griddable. Furthermore, for each model built by gridsearch, we will build an equivalent model manually with the same parameters and compare the gridsearch model with our manually built model to make sure their performances are close. """ print("*******************************************************************************************") print("test1_glm_grid_search_over_params for GLM " + self.family) h2o.cluster_info() # start grid search grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built if not (self.correct_model_number == self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") if (self.test_failed_array[self.test_num] == 0): # only proceed if previous test passed # add parameters into params_dict. Use this to build parameters for model params_dict = {} params_dict["family"] = self.family params_dict["nfolds"] = self.nfolds # compare performance of model built by gridsearch with manually built model for each_model in grid_model: # grab parameters used by grid search and build a dict out of it params_list = pyunit_utils.extract_used_params(self.hyper_params.keys(), each_model.params, params_dict) manual_model = H2OGeneralizedLinearEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # compute and compare test metrics between the two models test_grid_model_metrics = each_model.model_performance(test_data=self.training2_data) test_manual_model_metrics = manual_model.model_performance(test_data=self.training2_data) # just compare the mse in this case within tolerance: if abs(test_grid_model_metrics.mse() - test_manual_model_metrics.mse()) > self.allowed_diff: self.test_failed += 1 # count total number of tests that have failed self.test_failed_array[self.test_num] += 1 print("test1_glm_grid_search_over_params for GLM failed: grid search model and manually " "built H2O model differ too much in test MSE!") break self.test_num += 1 if self.test_failed == 0: print("test1_glm_grid_search_over_params for GLM has passed!")
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params performs the following: run gridsearch model and then build each model manually and see if we receive the same error messages. """ print("*******************************************************************************************") print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # add parameters into params_dict. Use this to manually build model, one at a time params_dict = dict() params_dict["nfolds"] = self.nfolds manual_model = [None] * len(grid_model) model_index = 0 for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 if "validation_frame" in params_list: model_params["validation_frame"] = params_list["validation_frame"] del params_list["validation_frame"] if "eps_prob" in params_list: model_params["eps_prob"] = params_list["eps_prob"] del params_list["eps_prob"] if "min_prob" in params_list: model_params["min_prob"] = params_list["min_prob"] del params_list["min_prob"] manual_model[model_index] = H2ONaiveBayesEstimator(**params_list) manual_model[model_index].train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) model_index += 1
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params performs the following: run gridsearch model and then build each model manually and see if we receive the same error messages. """ print( "*******************************************************************************************" ) print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() print("Hyper-parameters used here is {0}".format( self.final_hyper_params)) # # start grid search # grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), # hyper_params=self.final_hyper_params) # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # add parameters into params_dict. Use this to manually build model, one at a time params_dict = dict() params_dict["nfolds"] = self.nfolds params_list = dict() params_list["fold_assignment"] = self.final_hyper_params[ "fold_assignment"][0] # params_list["max_runtime_secs"] = self.final_hyper_params["max_runtime_secs"][1] params_list["max_runtime_secs"] = 10 # this will return full NB model # the field manual_model._model_json['output']['cross_validation_metrics_summary'].cell_values will be empty params_list[ "max_runtime_secs"] = 0.001 # this will not return full NB model params_list["laplace"] = self.final_hyper_params["laplace"][0] print("Hyper-parameters used here is {0}\n".format(params_list)) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 manual_model = H2ONaiveBayesEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) print("Done!")
def test1_glm_random_grid_search_model_number(self, metric_name): """ This test is used to make sure the randomized gridsearch will generate all models specified in the hyperparameters if no stopping condition is given in the search criterion. We will compare the performance between the randomized gridsearch and normal gridsearch to make sure they generate the same number of models and their performances are similar. :param metric_name: string to denote what grid search model should be sort by :return: None """ print( "*******************************************************************************************" ) print("test1_glm_random_grid_search_model_number for GLM " + self.family) h2o.cluster_info() # setup_data our stopping condition here, random discrete and find all models search_criteria = { 'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": round(time.time()) } print("GLM Binomial grid search_criteria: {0}".format(search_criteria)) # fire off random grid-search random_grid_model = \ H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # compare number of models built from both gridsearch if not (len(random_grid_model) == self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print( "test1_glm_random_grid_search_model_number for GLM: failed, number of models generated" "possible model number {0} and randomized gridsearch model number {1} are not " "equal.".format(self.possible_number_models, len(random_grid_model))) if self.test_failed_array[self.test_num] == 0: print("test1_glm_random_grid_search_model_number for GLM: passed!") self.test_num += 1 sys.stdout.flush() # gset max_allowed_runtime as total run time to build all models * (1+fraction) self.max_grid_runtime = pyunit_utils.find_grid_runtime( random_grid_model.models)
def test_kmeans_hangup(self): """ train a kmeans model with some parameters that will make the system hang. """ print("*******************************************************************************************") h2o.cluster_info() good_params_list = {'seed': 1464837706, 'max_iterations': 50, 'init': 'Furthest', 'k': 5} good_model_params = {'max_runtime_secs': 0.001} good_model = H2OKMeansEstimator(**good_params_list) good_model.train(x=self.x_indices, training_frame=self.training1_data, **good_model_params) print("Finished.")
def test2_glm_random_grid_search_max_model(self): """ This test is used to test the stopping condition max_model_number in the randomized gridsearch. The max_models parameter is randomly generated. If it is higher than the actual possible number of models that can be generated with the current hyper-space parameters, randomized grid search should generate all the models. Otherwise, grid search shall return a model that equals to the max_model setting. """ print("*******************************************************************************************") print("test2_glm_random_grid_search_max_model for GLM " + self.family) h2o.cluster_info() # setup_data our stopping condition here self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models)) search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number, "seed": round(time.time())} print("GLM Gaussian grid search_criteria: {0}".format(search_criteria)) print("Possible number of models built is {0}".format(self.possible_number_models)) # fire off random grid-search grid_model = \ H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) number_model_built = len(grid_model) # count actual number of models built print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"], number_model_built)) if self.possible_number_models >= self.max_model_number: # stopping condition restricts model number if not (number_model_built == self.max_model_number): print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} " "does not match stopping condition number{1}.".format(number_model_built, self.max_model_number)) self.test_failed += 1 self.test_failed_array[self.test_num] = 1 else: print("test2_glm_random_grid_search_max_model for GLM: passed.") else: # stopping condition is too loose if not (number_model_built == self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal " "to possible model number {1}.".format(number_model_built, self.possible_number_models)) else: print("test2_glm_random_grid_search_max_model for GLM: passed.") self.test_num += 1 sys.stdout.flush()
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params performs the following: run gridsearch model and then build each model manually and see if we receive the same error messages. """ print("*******************************************************************************************") print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # # start grid search # grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), # hyper_params=self.final_hyper_params) # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # add parameters into params_dict. Use this to manually build model, one at a time params_dict = dict() params_dict["nfolds"] = self.nfolds manual_model = [None] * self.possible_number_models model_index = 0 for fold_v in self.final_hyper_params["fold_assignment"]: for max_t in self.final_hyper_params["max_runtime_secs"]: for laplace_v in self.final_hyper_params["laplace"]: params_list = dict() params_list["fold_assignment"] = fold_v params_list["max_runtime_secs"] = max_t params_list["laplace"] = laplace_v print("Hyper-parameters used here is {0}\n".format(params_list)) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 manual_model[model_index] = H2ONaiveBayesEstimator(**params_list) manual_model[model_index].train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) model_index += 1
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params performs the following: run gridsearch model and then build each model manually and see if we receive the same error messages. """ print("*******************************************************************************************") print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # # start grid search # grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), # hyper_params=self.final_hyper_params) # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # add parameters into params_dict. Use this to manually build model, one at a time params_dict = dict() params_dict["nfolds"] = self.nfolds params_list = dict() params_list["fold_assignment"] = self.final_hyper_params["fold_assignment"][0] # params_list["max_runtime_secs"] = self.final_hyper_params["max_runtime_secs"][1] params_list["max_runtime_secs"] = 10 # this will return full NB model # the field manual_model._model_json['output']['cross_validation_metrics_summary'].cell_values will be empty params_list["max_runtime_secs"] = 0.001 # this will not return full NB model params_list["laplace"] = self.final_hyper_params["laplace"][0] print("Hyper-parameters used here is {0}\n".format(params_list)) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 manual_model = H2ONaiveBayesEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) print("Done!")
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better): """ This function will test the last stopping condition using metrics. :param metric_name: metric we want to use to test the last stopping condition :param bigger_is_better: higher metric value indicates better model performance :return: None """ print("*******************************************************************************************") print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family) h2o.cluster_info() search_criteria = { "strategy": "RandomDiscrete", "stopping_metric": metric_name, "stopping_tolerance": random.uniform(1e-8, self.max_tolerance), "stopping_rounds": random.randint(1, self.max_stopping_rounds), "seed": int(round(time.time())), } print("GLM Binomial grid search_criteria: {0}".format(search_criteria)) # add max_runtime_secs back into hyper-parameters to limit model runtime. self.hyper_params["max_runtime_secs"] = [0.3] # arbitrarily set # fire off random grid-search grid_model = H2OGridSearch( H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria, ) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # bool indicating if randomized grid search has calculated the early stopping condition correctly stopped_correctly = pyunit_utils.evaluate_metrics_stopping( grid_model.models, metric_name, bigger_is_better, search_criteria, self.possible_number_models ) if stopped_correctly: print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ") else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ") self.test_num += 1
def test3_glm_random_grid_search_max_runtime_secs(self): """ This function will test the stopping criteria max_runtime_secs. For each model built, the field run_time actually denote the time in ms used to build the model. We will add up the run_time from all models and check against the stopping criteria max_runtime_secs. Since each model will check its run time differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On the other hand, deeplearning may check the time it has spent after every epoch of training. If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain percentage, we will consider the test a success. :return: None """ print("*******************************************************************************************") print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family) h2o.cluster_info() # setup_data our stopping condition here max_run_time_secs = random.uniform(0, self.max_grid_runtime * self.allowed_scaled_overtime) search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs, "seed": round(time.time())} # search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8} print("GLM Binomial grid search_criteria: {0}".format(search_criteria)) # fire off random grid-search grid_model = \ H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model) if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff): print("test3_glm_random_grid_search_max_runtime_secs: passed!") elif len(grid_model) == 1: # will always generate 1 model print("test3_glm_random_grid_search_max_runtime_secs: passed!") else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}" " seconds which exceeds allowed time {1}".format(actual_run_time_secs, max_run_time_secs*(1+self.allowed_diff))) self.test_num += 1 sys.stdout.flush()
def test1_glm_random_grid_search_model_number(self, metric_name): """ This test is used to make sure the randomized gridsearch will generate all models specified in the hyperparameters if no stopping condition is given in the search criterion. :param metric_name: string to denote what grid search model should be sort by :return: None """ print("*******************************************************************************************") print("test1_glm_random_grid_search_model_number for GLM " + self.family) h2o.cluster_info() # setup_data our stopping condition here, random discrete and find all models search_criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 0, "seed": int(round(time.time()))} print("GLM Binomial grid search_criteria: {0}".format(search_criteria)) # fire off random grid-search random_grid_model = H2OGridSearch( H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params, search_criteria=search_criteria, ) random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # compare number of models built from both gridsearch if not (len(random_grid_model) == self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print( "test1_glm_random_grid_search_model_number for GLM: failed, number of models generated" "possible model number {0} and randomized gridsearch model number {1} are not " "equal.".format(self.possible_number_models, len(random_grid_model)) ) else: self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) # time taken to build all models if self.test_failed_array[self.test_num] == 0: print("test1_glm_random_grid_search_model_number for GLM: passed!") self.test_num += 1 sys.stdout.flush()
def test_kmeans_hangup(self): """ train a kmeans model with some parameters that will make the system hang. """ print( "*******************************************************************************************" ) h2o.cluster_info() good_params_list = { 'seed': 1464837706, 'max_iterations': 50, 'init': 'Furthest', 'k': 5 } good_model_params = {'max_runtime_secs': 0.001} good_model = H2OKMeansEstimator(**good_params_list) good_model.train(x=self.x_indices, training_frame=self.training1_data, **good_model_params) print("Finished.")
def test_kmeans_hangup(self): """ train a kmeans model with some parameters that will make the system hang. """ print("*******************************************************************************************") h2o.cluster_info() good_params_list = {'seed': 1464837706, 'max_iterations': 50, 'init': 'Furthest', 'k': 5} good_model_params = {'max_runtime_secs': 0.005857068399999999} good_model = H2OKMeansEstimator(**good_params_list) good_model.train(x=self.x_indices, training_frame=self.training1_data, **good_model_params) bad_params_list = {'seed': 1464837574, 'max_iterations': 10, 'k': 10, 'init': 'Furthest'} bad_model_params= {'max_runtime_secs': 0.00519726792} bad_model = H2OKMeansEstimator(**bad_params_list) bad_model.train(x=self.x_indices, training_frame=self.training1_data, **bad_model_params) print("good_model._model_json['output']['model_summary'] type is {0}. " "bad_model._model_json['output']['model_summary'] type is " "{1}".format(type(good_model._model_json['output']['model_summary']), type(bad_model._model_json['output']['model_summary']))) print("They are not equal for some reason....")
def test_gbm_grid_search_over_params(self): """ test_gbm_grid_search_over_params performs the following: a. Next, build H2O GBM models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. b. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O GBM model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. c. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_gbm_grid_search_over_params for GBM " + self.family) h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2OGradientBoostingEstimator(distribution=self.family, nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 print("test_gbm_grid_search_over_params for GBM failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") else: # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["distribution"] = self.family params_dict["nfolds"] = self.nfolds total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare MSE performance of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 if "r2_stopping" in params_list: model_params["r2_stopping"] = params_list["r2_stopping"] del params_list["r2_stopping"] if "validation_frame" in params_list: model_params["validation_frame"] = params_list["validation_frame"] del params_list["validation_frame"] if "learn_rate_annealing" in params_list: model_params["learn_rate_annealing"] = params_list["learn_rate_annealing"] del params_list["learn_rate_annealing"] # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) manual_model = H2OGradientBoostingEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json['output']['model_summary'] tree_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_trees')] if max_runtime > 0: # shortest possible time it takes to build this model if (max_runtime < self.min_runtime_per_tree) or (tree_num <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric] manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric] # just compare the mse in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) and \ (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff): print("test_gbm_grid_search_over_params for GBM warning: grid search model mdetric ({0}) " "and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics, manual_model_metrics)) total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_gbm_grid_search_over_params for GBM failed: time taken to manually build models is {0}." " Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) else: print("time taken to manually build all models is {0}. Maximum allowed time is " "{1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_gbm_grid_search_over_params for GBM has passed!") except: if self.possible_number_models > 0: print("test_gbm_grid_search_over_params for GBM failed: exception was thrown for no reason.") self.test_failed += 1
def test2_illegal_name_value(self): """ test2_illegal_name_value: test for condition 1 and 2. Randomly go into the hyper_parameters that we have specified, either a. randomly alter the name of a hyper-parameter name (fatal, exception will be thrown) b. randomly choose a hyper-parameter and remove all elements in its list (fatal) c. add randomly generated new hyper-parameter names with random list (fatal) d: randomly choose a hyper-parameter and insert an illegal type into it (non fatal, model built with legal hyper-parameters settings only and error messages printed out for illegal hyper-parameters settings) The following error conditions will be created depending on the error_number generated: error_number = 0: randomly alter the name of a hyper-parameter name; error_number = 1: randomly choose a hyper-parameter and remove all elements in its list error_number = 2: add randomly generated new hyper-parameter names with random list error_number = 3: randomly choose a hyper-parameter and insert an illegal type into it :return: None """ print("*******************************************************************************************") print("test2_illegal_name_value for GLM " + self.family) h2o.cluster_info() error_number = np.random.random_integers(0, 3, 1) # randomly choose an error print("Test 2 error_number is {0}".format(error_number[0])) error_hyper_params = \ pyunit_utils.insert_error_grid_search(self.hyper_params, self.gridable_parameters, self.gridable_types, error_number[0]) print("test2_illegal_name_value: the bad hyper-parameters are: ") print(error_hyper_params) # copied from Eric to catch execution run errors and not quit try: grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=error_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) if error_number[0] > 2: # grid search should not failed in this case and check number of models built. if not (len(grid_model) == self.true_correct_model_number): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_illegal_name_value failed. Number of model generated is " "incorrect.") else: print("test2_illegal_name_value passed.") else: # other errors should cause exceptions being thrown and if not, something is wrong. self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_illegal_name_value failed: exception should have been thrown for illegal" "parameter name or empty hyper-parameter parameter list but did not!") except: if (error_number[0] <= 2) and (error_number[0] >= 0): print("test2_illegal_name_value passed: exception is thrown for illegal parameter name or empty" "hyper-parameter parameter list.") else: self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test2_illegal_name_value failed: exception should not have been thrown but did!") self.test_num += 1
def test_kmeans_grid_search_over_params(self): """ test_kmeans_grid_search_over_params performs the following: a. build H2O kmeans models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. b. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O kmeans model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. c. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_kmeans_grid_search_over_params for kmeans ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2OKMeansEstimator(), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if (self.correct_model_number - self.possible_number_models)>0.9: # wrong grid model number self.test_failed += 1 print("test_kmeans_grid_search_over_params for kmeans failed: number of models built by gridsearch: {0}" " does not equal to all possible combinations of hyper-parameters: " "{1}".format(self.correct_model_number, self.possible_number_models)) else: # add parameters into params_dict. Use this to manually build model params_dict = dict() total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare training metric performance of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() num_iter = 0 # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) manual_model = H2OKMeansEstimator(**params_list) manual_model.train(x=self.x_indices, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json['output']['model_summary'] if summary_list is not None: num_iter = summary_list["number_of_iterations"][0] # compute and compare test metrics between the two models if not(each_model._model_json["output"]["model_summary"] is None): grid_model_metrics = \ each_model._model_json["output"]["model_summary"]["total_sum_of_squares"][0] manual_model_metrics = \ manual_model._model_json["output"]["model_summary"]["total_sum_of_squares"][0] # just compare the training metrics in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) and \ (abs(grid_model_metrics - manual_model_metrics) / grid_model_metrics > self.allowed_diff): print("test_kmeans_grid_search_over_params for kmeans warning: grid search model " "metric ({0}) and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics, manual_model_metrics)) if max_runtime > 0: # collect allowed max_runtime_secs info if (max_runtime < self.min_runtime_per_iter) or (num_iter <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_kmeans_grid_search_over_params for kmeans failed: time taken to manually build models" " is {0}. Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) else: print("time taken to manually build all models is {0}. Maximum allowed time is " "{1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_kmeans_grid_search_over_params for kmeans has passed!") except Exception as e: if self.possible_number_models > 0: print("test_kmeans_grid_search_over_params for kmeans failed: exception ({0}) was thrown for no reason.".format(e)) self.test_failed += 1
def test_kmeans_grid_search_over_params(self): """ test_kmeans_grid_search_over_params performs the following: a. build H2O kmeans models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. b. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O kmeans model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. c. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_kmeans_grid_search_over_params for kmeans ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2OKMeansEstimator(), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 print("test_kmeans_grid_search_over_params for kmeans failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") else: # add parameters into params_dict. Use this to manually build model params_dict = dict() total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare training metric performance of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() num_iter = 0 # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) manual_model = H2OKMeansEstimator(**params_list) manual_model.train(x=self.x_indices, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json['output']['model_summary'] if not(summary_list == None): num_iter = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')] # compute and compare test metrics between the two models if not(each_model._model_json["output"]["model_summary"] == None): grid_model_metrics = \ each_model._model_json["output"]["model_summary"].cell_values[0][summary_list.col_header.index('total_sum_of_squares')] manual_model_metrics = \ manual_model._model_json["output"]["model_summary"].cell_values[0][summary_list.col_header.index('total_sum_of_squares')] # just compare the training metrics in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) and \ (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff): print("test_kmeans_grid_search_over_params for kmeans warning: grid search model " "metric ({0}) and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics, manual_model_metrics)) if max_runtime > 0: # collect allowed max_runtime_secs info if (max_runtime < self.min_runtime_per_iter) or (num_iter <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_kmeans_grid_search_over_params for kmeans failed: time taken to manually build models" " is {0}. Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) else: print("time taken to manually build all models is {0}. Maximum allowed time is " "{1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_kmeans_grid_search_over_params for kmeans has passed!") except: if self.possible_number_models > 0: print("test_kmeans_grid_search_over_params for kmeans failed: exception was thrown for no reason.") self.test_failed += 1
def test_deeplearning_grid_search_over_params(self): """ test_deeplearning_fieldnames performs the following: a. build H2O deeplearning models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O deeplearning model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. d. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_deeplearning_fieldnames for deeplearning " + self.family) h2o.cluster_info() # start grid search # grid_model = H2OGridSearch(H2ODeepLearningEstimator(nfolds=self.nfolds, seed=self.seed), # hyper_params=self.final_hyper_params) # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # # self.correct_model_number = len(grid_model) # store number of models built try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2ODeepLearningEstimator(nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["distribution"] = self.family params_dict["nfolds"] = self.nfolds total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare MSE performance of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 if "elastic_averaging_moving_rate" in params_list: model_params["elastic_averaging_moving_rate"] = params_list["elastic_averaging_moving_rate"] del params_list["elastic_averaging_moving_rate"] if "validation_frame" in params_list: model_params["validation_frame"] = params_list["validation_frame"] del params_list["validation_frame"] if "elastic_averaging_regularization" in params_list: model_params["elastic_averaging_regularization"] = params_list["elastic_averaging_regularization"] del params_list["elastic_averaging_regularization"] manual_model = H2ODeepLearningEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json["output"]["scoring_history"] if len(summary_list.cell_values) < 3: num_iterations = 1 else: num_iterations = summary_list.cell_values[2][summary_list.col_header.index('iterations')] if max_runtime > 0: # shortest possible time it takes to build this model if (max_runtime < self.min_runtime_per_iteration) or (num_iterations <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric] manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric] # just compare the mse in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) \ and abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff: print("test_deeplearning_fieldnames for deeplearning warning: grid search " "model metric ({0}) and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics, manual_model_metrics)) total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_deeplearning_fieldnames for deeplearning failed: time taken to manually build" " models is {0}. Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) else: print("time taken to manually build all models is {0}. Maximum allowed time is " "{1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_deeplearning_fieldnames for deeplearning has passed!") except: if len(grid_model) > 0: print("test_deeplearning_fieldnames for deeplearning failed: exception was thrown for " "no reason.") self.test_failed += 1
def test1_glm_grid_search_over_params(self): """ test1_glm_grid_search_over_params: test for condition 1 and performs the following: a. grab all truely griddable parameters and randomly or manually set the parameter values. b. Next, build H2O GLM models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O GLM model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. d. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test1_glm_grid_search_over_params for GLM " + self.family) h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params_bad)) # start grid search grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds), hyper_params=self.final_hyper_params_bad) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test_glm_search_over_params for GLM failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") else: # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["family"] = self.family params_dict["nfolds"] = self.nfolds total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare MSE performance of model built by gridsearch with manually built model for each_model in grid_model: # grab parameters used by grid search and build a dict out of it params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # some parameters are to be added in .train() if "lambda" in list(params_list): params_list["Lambda"] = params_list["lambda"] del params_list["lambda"] # need to taken out max_runtime_secs, stopping_rounds, stopping_tolerance # # from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] if "stopping_rounds" in params_list: model_params["stopping_rounds"] = params_list["stopping_rounds"] del params_list["stopping_rounds"] if "stopping_tolerance" in params_list: model_params["stopping_tolerance"] = params_list["stopping_tolerance"] del params_list["stopping_tolerance"] manual_model = H2OGeneralizedLinearEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json['output']['model_summary'] iteration_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')] if model_params["max_runtime_secs"] > 0: # shortest possible time it takes to build this model if (model_params["max_runtime_secs"] < self.min_runtime_per_epoch) or (iteration_num <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += model_params["max_runtime_secs"] true_run_time_limits += model_params["max_runtime_secs"] # compute and compare test metrics between the two models grid_model_metrics = each_model.model_performance(test_data=self.training2_data) manual_model_metrics = manual_model.model_performance(test_data=self.training2_data) # just compare the mse in this case within tolerance: if not((type(grid_model_metrics.mse()) == str) or (type(manual_model_metrics.mse()) == str)): if (abs(grid_model_metrics.mse()) > 0) \ and abs(grid_model_metrics.mse() - manual_model_metrics.mse())/grid_model_metrics.mse() > self.allowed_diff: print("test1_glm_grid_search_over_params for GLM warning: grid search model metric ({0}) " "and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics.mse(), manual_model_metrics.mse())) total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print("test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") # make sure the max_runtime_secs is working to restrict model built time, GLM does not respect that. if not(manual_run_runtime <= total_run_time_limits): # self.test_failed += 1 # self.test_failed_array[self.test_num] = 1 print("test1_glm_grid_search_over_params for GLM warning: allow time to build models: {0}, actual " "time taken: {1}".format(total_run_time_limits, manual_run_runtime)) self.test_num += 1 if self.test_failed == 0: print("test1_glm_grid_search_over_params for GLM has passed!") except: if self.possible_number_models > 0: print("test1_glm_grid_search_over_params for GLM failed: exception was thrown for no reason.")
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params performs the following: a. build H2O naivebayes models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. b. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O naivebayes model. Logloss are calculated from a test set to compare the performance of grid search model and our manually built model. If their metrics are close, declare test success. Otherwise, declare test failure. c. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure as well. """ print("*******************************************************************************************") print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by " "gridsearch does not equal to all possible combinations of hyper-parameters") else: # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["nfolds"] = self.nfolds params_dict["score_tree_interval"] = 0 total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare performance metric of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 if "validation_frame" in params_list: model_params["validation_frame"] = params_list["validation_frame"] del params_list["validation_frame"] if "score_tree_interval" in params_list: model_params["score_tree_interval"] = params_list["score_tree_interval"] del params_list["score_tree_interval"] if "eps_prob" in params_list: model_params["eps_prob"] = params_list["eps_prob"] del params_list["eps_prob"] if "min_prob" in params_list: model_params["min_prob"] = params_list["min_prob"] del params_list["min_prob"] # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) manual_model = H2ONaiveBayesEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime if max_runtime > 0: # shortest possible time it takes to build this model if (max_runtime < self.model_run_time): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models test_grid_model_metrics = \ each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric] test_manual_model_metrics = \ manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric] # just compare the mse in this case within tolerance: if (each_model_runtime > 0) and \ (abs(model_runtime - each_model_runtime)/each_model_runtime < self.allowed_runtime_diff) \ and (abs(test_grid_model_metrics - test_manual_model_metrics) > self.allowed_diff): self.test_failed += 1 # count total number of tests that have failed print("test_naivebayes_grid_search_over_params for naivebayes failed: grid search model and manually " "built H2O model differ too much in test MSE!") break total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build models is {0}." " Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_naivebayes_grid_search_over_params for naivebayes has passed!") except: if self.possible_number_models > 0: print("test_naivebayes_grid_search_over_params for naivebayes failed: exception was thrown for no reason.") self.test_failed += 1
def test1_glm_grid_search_over_params(self): """ This test is used to exercise the gridsearch and exercise all its parameters that are griddable. Furthermore, for each model built by gridsearch, we will build an equivalent model manually with the same parameters and compare the gridsearch model with our manually built model to make sure their performances are close. """ print( "*******************************************************************************************" ) print("test1_glm_grid_search_over_params for GLM " + self.family) h2o.cluster_info() # start grid search grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator( family=self.family, nfolds=self.nfolds), hyper_params=self.hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len( grid_model) # store number of models built if not (self.correct_model_number == self.possible_number_models): self.test_failed += 1 self.test_failed_array[self.test_num] = 1 print( "test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters" ) if (self.test_failed_array[self.test_num] == 0 ): # only proceed if previous test passed # add parameters into params_dict. Use this to build parameters for model params_dict = {} params_dict["family"] = self.family params_dict["nfolds"] = self.nfolds # compare performance of model built by gridsearch with manually built model for each_model in grid_model: # grab parameters used by grid search and build a dict out of it params_list = pyunit_utils.extract_used_params( self.hyper_params.keys(), each_model.params, params_dict) manual_model = H2OGeneralizedLinearEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) # compute and compare test metrics between the two models test_grid_model_metrics = each_model.model_performance( test_data=self.training2_data) test_manual_model_metrics = manual_model.model_performance( test_data=self.training2_data) # just compare the mse in this case within tolerance: if abs(test_grid_model_metrics.mse() - test_manual_model_metrics.mse()) > self.allowed_diff: self.test_failed += 1 # count total number of tests that have failed self.test_failed_array[self.test_num] += 1 print( "test1_glm_grid_search_over_params for GLM failed: grid search model and manually " "built H2O model differ too much in test MSE!") break self.test_num += 1 if self.test_failed == 0: print("test1_glm_grid_search_over_params for GLM has passed!")
############################################## XG Boost ########################################################################### from h2o.estimators.xgboost import H2OXGBoostEstimator model_xg = H2OXGBoostEstimator(ntrees=100, max_depth=6, min_rows=1) #Other parameters Refernce: http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/xgboost.html model_xg.train(x=features_h2o, y="noOfLanes", training_frame=train_h2o, validation_frame=valid_h2o) model_xg.params print(model_xg) pred = model_xg.predict(test_h2o) #Check this not working fine pred.head() ########Save models in h2o model_path = h2o.save_model(model=model, path="/tmp/mymodel", force=True) #model is like model_dl, model_rf print(model_path) #OUTPUT: /tmp/mymodel/DeepLearning_model_python_1441838096933 #########Load models in h2o saved_model_loaded = h2o.load_model(model_path) ##########Check h2o version h2o.cluster_info() ######### h2o close session h2o.shutdown() h2o.cluster().shutdown()
def test_PCA_grid_search_over_params(self): """ test_pca_grid_search_over_params: test for condition 1 and performs the following: a. build H2O PCA models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O PCA model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. d. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_PCA_grid_search_over_params for PCA ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2OPCA(pca_method=self.pca_method), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 print("test_PCA_grid_search_over_params for PCA failed: number of models built by gridsearch " "does not equal to all possible combinations of hyper-parameters") else: # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["pca_method"] = self.pca_method total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare performance metric of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) manual_model = H2OPCA(**params_list) manual_model.train(x=self.x_indices, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime if max_runtime > 0: # shortest possible time it takes to build this model if max_runtime < self.model_run_time: total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models grid_model_metrics = \ sum(each_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]]) manual_model_metrics = \ sum(manual_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]]) # just compare the mse in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) and \ (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff): print("test_PCA_grid_search_over_params for PCA warning: grid search model mdetric ({0}) " "and manually built H2O model metric ({1}) differ too much" "!".format(grid_model_metrics, manual_model_metrics)) total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_PCA_grid_search_over_params for PCA failed: time taken to manually build models is {0}." " Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits)) else: print("time taken to manually build all models is {0}. Maximum allowed time is " "{1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_PCA_grid_search_over_params for PCA has passed!") except: if self.possible_number_models > 0: print("test_PCA_grid_search_over_params for PCA failed: exception was thrown for no reason.") self.test_failed += 1
def test_naivebayes_grid_search_over_params(self): """ test_naivebayes_grid_search_over_params the following: a. grab all truely griddable parameters and randomly or manually set the parameter values. b. Next, build H2O naivebayes models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O naivebayes model. Training metrics are calculated from the gridsearch model and the manually built model. If their metrics differ by too much, print a warning message but don't fail the test. d. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure. """ print("*******************************************************************************************") print("test_naivebayes_grid_search_over_params for naivebayes ") h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # make sure the correct number of models are built by gridsearch if not (self.correct_model_number == self.possible_number_models): # wrong grid model number self.test_failed += 1 print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by " "gridsearch {0} does not equal to all possible combinations of hyper-parameters " "{1}".format(self.correct_model_number, self.possible_number_models)) else: # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["nfolds"] = self.nfolds total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 gridsearch_runtime = 0.0 # compare performance metric of model built by gridsearch with manually built model for each_model in grid_model: params_list = grid_model.get_hyperparams_dict(each_model._id) params_list.update(params_dict) model_params = dict() # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: model_params["max_runtime_secs"] = params_list["max_runtime_secs"] max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 if "validation_frame" in params_list: model_params["validation_frame"] = params_list["validation_frame"] del params_list["validation_frame"] if "eps_prob" in params_list: model_params["eps_prob"] = params_list["eps_prob"] del params_list["eps_prob"] if "min_prob" in params_list: model_params["min_prob"] = params_list["min_prob"] del params_list["min_prob"] # make sure manual model was provided the same max_runtime_secs as the grid model each_model_runtime = pyunit_utils.find_grid_runtime([each_model]) gridsearch_runtime += each_model_runtime manual_model = H2ONaiveBayesEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, **model_params) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime if max_runtime > 0: # shortest possible time it takes to build this model if (max_runtime < self.model_run_time): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models grid_model_metrics = \ each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric] manual_model_metrics = \ manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric] # just compare the mse in this case within tolerance: if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)): if (abs(grid_model_metrics) > 0) \ and (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff): print("test_naivebayes_grid_search_over_params for naivebayes WARNING\ngrid search model " "{0}: {1}, time taken to build (secs): {2}\n and manually built H2O model {3}: {4}, " "time taken to build (secs): {5}\ndiffer too much!" "".format(self.training_metric, grid_model_metrics, each_model_runtime, self.training_metric, manual_model_metrics, model_runtime)) print("Time taken for gridsearch to build all models (sec): {0}\n Time taken to manually build all " "models (sec): {1}, total run time limits (sec): " "{2}".format(gridsearch_runtime, manual_run_runtime, total_run_time_limits)) total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # make sure the max_runtime_secs is working to restrict model built time if not(manual_run_runtime <= total_run_time_limits): self.test_failed += 1 print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build " "models is {0}. Maximum allowed time " "is {1}".format(manual_run_runtime, total_run_time_limits)) if self.test_failed == 0: print("test_naivebayes_grid_search_over_params for naivebayes has passed!") except: if self.possible_number_models > 0: print("test_naivebayes_grid_search_over_params for naivebayes failed: exception was thrown for " "no reason.") self.test_failed += 1
def test_gbm_grid_search_over_params(self): """ test_gbm_grid_search_over_params: test for condition 1 and performs the following: a. grab all truely griddable parameters and randomly or manually set the parameter values. b. Next, build H2O GBM models using grid search. Count and make sure models are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters values. We should instead get a warning/error message printed out. c. For each model built using grid search, we will extract the parameters used in building that model and manually build a H2O GLM model. MSEs are calculated from a test set to compare the performance of grid search model and our manually built model. If their MSEs are close, declare test success. Otherwise, declare test failure. d. we will check and make sure the models are built within the max_runtime_secs time limit that was set for it as well. If max_runtime_secs was exceeded, declare test failure as well. """ print("*******************************************************************************************") print("test_gbm_grid_search_over_params for GBM " + self.family) h2o.cluster_info() try: print("Hyper-parameters used here is {0}".format(self.final_hyper_params)) # start grid search grid_model = H2OGridSearch(H2OGradientBoostingEstimator(nfolds=self.nfolds), hyper_params=self.final_hyper_params) grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data) self.correct_model_number = len(grid_model) # store number of models built # check the total time taken to build grid search models total_gridsearch_runtime = pyunit_utils.find_grid_runtime(grid_model) # add parameters into params_dict. Use this to manually build model params_dict = dict() params_dict["family"] = self.family params_dict["nfolds"] = self.nfolds total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs true_run_time_limits = 0.0 manual_run_runtime = 0.0 # compare MSE performance of model built by gridsearch with manually built model for each_model in grid_model: # grab parameters used by grid search and build a dict out of it params_list = pyunit_utils.extract_used_params(self.final_hyper_params.keys(), each_model.params, params_dict) # need to taken out max_runtime_secs from model parameters, it is now set in .train() if "max_runtime_secs" in params_list: max_runtime = params_list["max_runtime_secs"] del params_list["max_runtime_secs"] else: max_runtime = 0 manual_model = H2OGeneralizedLinearEstimator(**params_list) manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data, max_runtime_secs=max_runtime) # collect the time taken to manually built all models model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model manual_run_runtime += model_runtime summary_list = manual_model._model_json['output']['model_summary'] iteration_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')] if max_runtime > 0: # shortest possible time it takes to build this model if (max_runtime < self.min_runtime_per_tree) or (iteration_num <= 1): total_run_time_limits += model_runtime else: total_run_time_limits += max_runtime true_run_time_limits += max_runtime # compute and compare test metrics between the two models test_grid_model_metrics = each_model.model_performance(test_data=self.training2_data) test_manual_model_metrics = manual_model.model_performance(test_data=self.training2_data) # just compare the mse in this case within tolerance: if abs(test_grid_model_metrics.mse() - test_manual_model_metrics.mse()) > self.allowed_diff: self.test_failed += 1 # count total number of tests that have failed self.test_failed_array[self.test_num] += 1 print("test_gbm_grid_search_over_params for GLM failed: grid search model and manually " "built H2O model differ too much in test MSE!") break total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction) # # # make sure the correct number of models are built by gridsearch # if not (self.correct_model_number == self.possible_number_models): # wrong grid model number # self.test_failed += 1 # self.test_failed_array[self.test_num] = 1 # print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch " # "does not equal to all possible combinations of hyper-parameters") # make sure the max_runtime_secs is working to restrict model built time # if not((total_gridsearch_runtime <= total_run_time_limits) and # (manual_run_runtime <= total_run_time_limits)): # self.test_failed += 1 # self.test_failed_array[self.test_num] = 1 # print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch " # "does not equal to all possible combinations of hyper-parameters") # # if self.test_failed == 0: # print("test_gbm_grid_search_over_params for GLM has passed!") except: if self.possible_number_models > 0: print("test_gbm_grid_search_over_params for GLM failed: exception was thrown for no reason.")