コード例 #1
0
    def test3_glm_random_grid_search_max_runtime_secs(self):
        """
        This function will test the stopping criteria max_runtime_secs.  For each model built, the field
        run_time actually denote the time in ms used to build the model.  We will add up the run_time from all
        models and check against the stopping criteria max_runtime_secs.  Since each model will check its run time
        differently, there is some inaccuracies in the actual run time.  For example, if we give a model 10 ms to
        build.  The GLM may check and see if it has used up all the time for every 10 epochs that it has run.  On
        the other hand, deeplearning may check the time it has spent after every epoch of training.

        If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
        percentage, we will consider the test a success.

        :return: None
        """
        print(
            "*******************************************************************************************"
        )
        print("test3_glm_random_grid_search_max_runtime_secs for GLM " +
              self.family)
        h2o.cluster_info()

        # setup_data our stopping condition here
        max_run_time_secs = random.uniform(
            0, self.max_grid_runtime * self.allowed_scaled_overtime)
        search_criteria = {
            'strategy': 'RandomDiscrete',
            'max_runtime_secs': max_run_time_secs,
            "seed": round(time.time())
        }
        # search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}

        print("GLM Binomial grid search_criteria: {0}".format(search_criteria))

        # fire off random grid-search
        grid_model = \
            H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
                          hyper_params=self.hyper_params, search_criteria=search_criteria)
        grid_model.train(x=self.x_indices,
                         y=self.y_index,
                         training_frame=self.training1_data)

        actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)

        if actual_run_time_secs <= search_criteria["max_runtime_secs"] * (
                1 + self.allowed_diff):
            print("test3_glm_random_grid_search_max_runtime_secs: passed!")
        elif len(grid_model) == 1:  # will always generate 1 model
            print("test3_glm_random_grid_search_max_runtime_secs: passed!")
        else:
            self.test_failed += 1
            self.test_failed_array[self.test_num] = 1
            print(
                "test3_glm_random_grid_search_max_runtime_secs: failed.  Model takes time {0}"
                " seconds which exceeds allowed time {1}".format(
                    actual_run_time_secs,
                    max_run_time_secs * (1 + self.allowed_diff)))
        self.test_num += 1
        sys.stdout.flush()
コード例 #2
0
    def test1_glm_random_grid_search_model_number(self, metric_name):
        """
        This test is used to make sure the randomized gridsearch will generate all models specified in the
        hyperparameters if no stopping condition is given in the search criterion.  We will compare the
        performance between the randomized gridsearch and normal gridsearch to make sure they generate the same
        number of models and their performances are similar.

        :param metric_name: string to denote what grid search model should be sort by

        :return: None
        """
        print(
            "*******************************************************************************************"
        )
        print("test1_glm_random_grid_search_model_number for GLM " +
              self.family)
        h2o.cluster_info()

        # setup_data our stopping condition here, random discrete and find all models
        search_criteria = {
            'strategy': 'RandomDiscrete',
            "stopping_rounds": 0,
            "seed": round(time.time())
        }
        print("GLM Binomial grid search_criteria: {0}".format(search_criteria))

        # fire off random grid-search
        random_grid_model = \
            H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
                          hyper_params=self.hyper_params, search_criteria=search_criteria)
        random_grid_model.train(x=self.x_indices,
                                y=self.y_index,
                                training_frame=self.training1_data)

        # compare number of models built from both gridsearch
        if not (len(random_grid_model) == self.possible_number_models):
            self.test_failed += 1
            self.test_failed_array[self.test_num] = 1
            print(
                "test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
                "possible model number {0} and randomized gridsearch model number {1} are not "
                "equal.".format(self.possible_number_models,
                                len(random_grid_model)))

        if self.test_failed_array[self.test_num] == 0:
            print("test1_glm_random_grid_search_model_number for GLM: passed!")

        self.test_num += 1
        sys.stdout.flush()

        # gset max_allowed_runtime as total run time to build all models * (1+fraction)
        self.max_grid_runtime = pyunit_utils.find_grid_runtime(
            random_grid_model.models)
コード例 #3
0
    def setup_grid_params(self):
        """
        This function setup the randomized gridsearch parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
        2. It will find the intersection of parameters that are both griddable and used by GLM.
        3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.one_model_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.one_model_time))

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # give the user opportunity to pre-assign hyper parameters for fixed values
        self.hyper_params = {}
        self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
        self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']

        # randomly generate griddable parameters
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)


        # change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1.
        if "lambda" in list(self.hyper_params):
            self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]

        time_scale = self.max_runtime_scale * self.one_model_time
        # change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1.
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
                                                     self.hyper_params["max_runtime_secs"]]

        # number of possible models being built:
        self.possible_number_models = pyunit_utils.count_models(self.hyper_params)

        # save hyper-parameters in sandbox and current test directories.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.hyper_params)
コード例 #4
0
    def test3_glm_random_grid_search_max_runtime_secs(self):
        """
        This function will test the stopping criteria max_runtime_secs.  For each model built, the field
        run_time actually denote the time in ms used to build the model.  We will add up the run_time from all
        models and check against the stopping criteria max_runtime_secs.  Since each model will check its run time
        differently, there is some inaccuracies in the actual run time.  For example, if we give a model 10 ms to
        build.  The GLM may check and see if it has used up all the time for every 10 epochs that it has run.  On
        the other hand, deeplearning may check the time it has spent after every epoch of training.

        If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
        percentage, we will consider the test a success.

        :return: None
        """
        print("*******************************************************************************************")
        print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
        h2o.cluster_info()

        # setup_data our stopping condition here
        max_run_time_secs = random.uniform(0, self.max_grid_runtime * self.allowed_scaled_overtime)
        search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
                           "seed": round(time.time())}
        # search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}

        print("GLM Binomial grid search_criteria: {0}".format(search_criteria))

        # fire off random grid-search
        grid_model = \
            H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
                          hyper_params=self.hyper_params, search_criteria=search_criteria)
        grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)

        if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
            print("test3_glm_random_grid_search_max_runtime_secs: passed!")
        elif len(grid_model) == 1:  # will always generate 1 model
            print("test3_glm_random_grid_search_max_runtime_secs: passed!")
        else:
            self.test_failed += 1
            self.test_failed_array[self.test_num] = 1
            print("test3_glm_random_grid_search_max_runtime_secs: failed.  Model takes time {0}"
                  " seconds which exceeds allowed time {1}".format(actual_run_time_secs,
                                                                   max_run_time_secs*(1+self.allowed_diff)))
        self.test_num += 1
        sys.stdout.flush()
    def test1_glm_random_grid_search_model_number(self, metric_name):
        """
        This test is used to make sure the randomized gridsearch will generate all models specified in the
        hyperparameters if no stopping condition is given in the search criterion.

        :param metric_name: string to denote what grid search model should be sort by

        :return: None
        """
        print("*******************************************************************************************")
        print("test1_glm_random_grid_search_model_number for GLM " + self.family)
        h2o.cluster_info()

        # setup_data our stopping condition here, random discrete and find all models
        search_criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 0, "seed": int(round(time.time()))}
        print("GLM Binomial grid search_criteria: {0}".format(search_criteria))

        # fire off random grid-search
        random_grid_model = H2OGridSearch(
            H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
            hyper_params=self.hyper_params,
            search_criteria=search_criteria,
        )
        random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        # compare number of models built from both gridsearch
        if not (len(random_grid_model) == self.possible_number_models):
            self.test_failed += 1
            self.test_failed_array[self.test_num] = 1
            print(
                "test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
                "possible model number {0} and randomized gridsearch model number {1} are not "
                "equal.".format(self.possible_number_models, len(random_grid_model))
            )
        else:
            self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model)  # time taken to build all models

        if self.test_failed_array[self.test_num] == 0:
            print("test1_glm_random_grid_search_model_number for GLM: passed!")

        self.test_num += 1
        sys.stdout.flush()
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
        2. It will find the intersection of parameters that are both griddable and used by GLM.
        3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_iteration = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')]

        if num_iteration == 0:
            self.min_runtime_per_epoch = run_time
        else:
            self.min_runtime_per_epoch = run_time/num_iteration

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params_bad, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params_bad,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val*self.alpha_scale, self.min_real_val*self.alpha_scale)

        # scale the value of lambda parameters
        if "lambda" in list(self.hyper_params_bad):
            self.hyper_params_bad["lambda"] = [self.lambda_scale * x for x in self.hyper_params_bad["lambda"]]

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * run_time
        if "max_runtime_secs" in list(self.hyper_params_bad):
            self.hyper_params_bad["max_runtime_secs"] = [time_scale * x for x
                                                         in self.hyper_params_bad["max_runtime_secs"]]

        [self.possible_number_models, self.final_hyper_params_bad] = \
            pyunit_utils.check_and_count_models(self.hyper_params_bad, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        if ("max_runtime_secs" not in list(self.final_hyper_params_bad)) and \
                ("max_runtime_secs" in list(self.hyper_params_bad)):
            self.final_hyper_params_bad["max_runtime_secs"] = self.hyper_params_bad["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params_bad["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models * len_good_time

        # Stratified is illegal for Gaussian GLM
        self.possible_number_models = self.possible_number_models * self.scale_model

        # randomly generate griddable parameters with only good values
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, 0,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, 0)

        # scale the value of lambda parameters
        if "lambda" in list(self.hyper_params):
            self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]

        # scale the max_runtime_secs parameters
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        [self.true_correct_model_number, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            self.true_correct_model_number = self.true_correct_model_number * \
                                             len(self.final_hyper_params["max_runtime_secs"])

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename_bad,
                                                 self.final_hyper_params_bad)

        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
コード例 #7
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GBM.
        2. It will find the intersection of parameters that are both griddable and used by GBM.
        3. There are several extra parameters that are used by GBM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGradientBoostingEstimator(distribution=self.family)
        model.train(x=self.x_indices,
                    y=self.y_index,
                    training_frame=self.training1_data)

        run_time = pyunit_utils.find_grid_runtime([model
                                                   ])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(
            run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_trees = summary_list.cell_values[0][summary_list.col_header.index(
            'number_of_trees')]

        if num_trees == 0:
            self.min_runtime_per_tree = run_time
        else:
            self.min_runtime_per_tree = run_time / num_trees

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [
                time_scale * x for x in self.hyper_params["max_runtime_secs"]
            ]

        self.possible_number_models = self.check_and_count_models()

        self.final_hyper_params["max_runtime_secs"] = self.hyper_params[
            "max_runtime_secs"]

        # calculate true possible_number_models and exclude the bad parameters since they will not
        # result in any models being built
        # alpha_len = len(self.hyper_params["alpha"])
        # lambda_len = len(self.hyper_params["lambda"])
        time_len = len(self.hyper_params["max_runtime_secs"])
        # len_good_alpha = len([x for x in self.hyper_params["alpha"] if (x >= 0) and (x <= 1)])
        # len_good_lambda = len([x for x in self.hyper_params["lambda"] if (x >= 0)])
        len_good_time = len(
            [x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])

        self.possible_number_models = int(self.possible_number_models *
                                          len_good_time / time_len)

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir,
                                                 self.sandbox_dir,
                                                 self.json_filename,
                                                 self.final_hyper_params)
コード例 #8
0
    def test_gbm_grid_search_over_params(self):
        """
        test_gbm_grid_search_over_params: test for condition 1 and performs the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O GLM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GLM model.  MSEs are calculated from a test set
           to compare the performance of grid search model and our manually built model.  If their MSEs
           are close, declare test success.  Otherwise, declare test failure.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure as well.
        """

        print(
            "*******************************************************************************************"
        )
        print("test_gbm_grid_search_over_params for GLM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(
                self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(
                H2OGradientBoostingEstimator(nfolds=self.nfolds),
                hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices,
                             y=self.y_index,
                             training_frame=self.training1_data)

            self.correct_model_number = len(
                grid_model)  # store number of models built

            # check the total time taken to build grid search models
            total_gridsearch_runtime = pyunit_utils.find_grid_runtime(
                grid_model)

            # add parameters into params_dict.  Use this to manually build model
            params_dict = dict()
            params_dict["family"] = self.family
            params_dict["nfolds"] = self.nfolds
            total_run_time_limits = 0.0  # calculate upper bound of max_runtime_secs
            true_run_time_limits = 0.0
            manual_run_runtime = 0.0

            # compare MSE performance of model built by gridsearch with manually built model
            for each_model in grid_model:

                # grab parameters used by grid search and build a dict out of it
                params_list = pyunit_utils.extract_used_params(
                    self.final_hyper_params.keys(), each_model.params,
                    params_dict)

                # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                if "max_runtime_secs" in params_list:
                    max_runtime = params_list["max_runtime_secs"]
                    del params_list["max_runtime_secs"]
                else:
                    max_runtime = 0

                manual_model = H2OGeneralizedLinearEstimator(**params_list)
                manual_model.train(x=self.x_indices,
                                   y=self.y_index,
                                   training_frame=self.training1_data,
                                   max_runtime_secs=max_runtime)

                # collect the time taken to manually built all models
                model_runtime = pyunit_utils.find_grid_runtime(
                    [manual_model])  # time taken to build this model
                manual_run_runtime += model_runtime

                summary_list = manual_model._model_json['output'][
                    'model_summary']
                iteration_num = summary_list.cell_values[0][
                    summary_list.col_header.index('number_of_iterations')]

                if max_runtime > 0:
                    # shortest possible time it takes to build this model
                    if (max_runtime <
                            self.min_runtime_per_tree) or (iteration_num <= 1):
                        total_run_time_limits += model_runtime
                    else:
                        total_run_time_limits += max_runtime

                true_run_time_limits += max_runtime

                # compute and compare test metrics between the two models
                test_grid_model_metrics = each_model.model_performance(
                    test_data=self.training2_data)
                test_manual_model_metrics = manual_model.model_performance(
                    test_data=self.training2_data)

                # just compare the mse in this case within tolerance:
                if abs(test_grid_model_metrics.mse() -
                       test_manual_model_metrics.mse()) > self.allowed_diff:
                    self.test_failed += 1  # count total number of tests that have failed
                    self.test_failed_array[self.test_num] += 1
                    print(
                        "test_gbm_grid_search_over_params for GLM failed: grid search model and manually "
                        "built H2O model differ too much in test MSE!")
                    break

            total_run_time_limits = max(
                total_run_time_limits,
                true_run_time_limits) * (1 + self.extra_time_fraction)

            # # # make sure the correct number of models are built by gridsearch
            # if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
            #     self.test_failed += 1
            #     self.test_failed_array[self.test_num] = 1
            #     print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch "
            #           "does not equal to all possible combinations of hyper-parameters")

            # make sure the max_runtime_secs is working to restrict model built time
            # if not((total_gridsearch_runtime <= total_run_time_limits) and
            #            (manual_run_runtime <= total_run_time_limits)):
            #     self.test_failed += 1
            #     self.test_failed_array[self.test_num] = 1
            #     print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch "
            #           "does not equal to all possible combinations of hyper-parameters")
            #
            # if self.test_failed == 0:
            #     print("test_gbm_grid_search_over_params for GLM has passed!")
        except:
            if self.possible_number_models > 0:
                print(
                    "test_gbm_grid_search_over_params for GLM failed: exception was thrown for no reason."
                )
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GBM.
        2. It will find the intersection of parameters that are both griddable and used by GBM.
        3. There are several extra parameters that are used by GBM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGradientBoostingEstimator(distribution=self.family, seed=self.seed, nfolds=self.nfolds)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_trees = summary_list.cell_values[0][summary_list.col_header.index('number_of_trees')]

        if num_trees == 0:
            self.min_runtime_per_tree = self.model_run_time
        else:
            self.min_runtime_per_tree = self.model_run_time / num_trees

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        if "fold_assignment" in list(self.final_hyper_params):
            self.possible_number_models = self.possible_number_models * self.scale_model

        self.final_hyper_params["seed"] = [self.seed]     # added see to make test more repeatable

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by naivebayes.
        2. It will find the intersection of parameters that are both griddable and used by naivebayes.
        3. There are several extra parameters that are used by naivebayes that are denoted as griddable but actually
        are not.  These parameters have to be discovered manually and they are captured in
        self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2ONaiveBayesEstimator(nfolds=self.nfolds, compute_metrics=True)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameter and others as well to make sure they make sense
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        final_hyper_params_keys = list(self.final_hyper_params)
        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in final_hyper_params_keys) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        # need to check that min_prob >= 1e-10
        if "min_prob" in final_hyper_params_keys:
            old_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 0)])
            good_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 1e-10)])
            if (old_len_prob > 0):
                self.possible_number_models = self.possible_number_models*good_len_prob/old_len_prob
            else:
                self.possible_number_models = 0

        if "laplace" in final_hyper_params_keys:
            self.final_hyper_params["laplace"] = [self.laplace_scale * x for x
                                                  in self.hyper_params["laplace"]]

            # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
    def test_naivebayes_grid_search_over_params(self):
        """
        test_naivebayes_grid_search_over_params performs the following:
        a. build H2O naivebayes models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O naivebayes model.  Logloss are calculated from a test set
           to compare the performance of grid search model and our manually built model.  If their metrics
           are close, declare test success.  Otherwise, declare test failure.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure as well.
        """
        print("*******************************************************************************************")
        print("test_naivebayes_grid_search_over_params for naivebayes ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds),
                                   hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by "
                      "gridsearch does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["nfolds"] = self.nfolds
                params_dict["score_tree_interval"] = 0
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list["validation_frame"]
                        del params_list["validation_frame"]

                    if "score_tree_interval" in params_list:
                        model_params["score_tree_interval"] = params_list["score_tree_interval"]
                        del params_list["score_tree_interval"]

                    if "eps_prob" in params_list:
                        model_params["eps_prob"] = params_list["eps_prob"]
                        del params_list["eps_prob"]

                    if "min_prob" in params_list:
                        model_params["min_prob"] = params_list["min_prob"]
                        del params_list["min_prob"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2ONaiveBayesEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.model_run_time):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    test_grid_model_metrics = \
                        each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
                    test_manual_model_metrics = \
                        manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if (each_model_runtime > 0) and \
                            (abs(model_runtime - each_model_runtime)/each_model_runtime < self.allowed_runtime_diff) \
                            and (abs(test_grid_model_metrics - test_manual_model_metrics) > self.allowed_diff):
                        self.test_failed += 1             # count total number of tests that have failed
                        print("test_naivebayes_grid_search_over_params for naivebayes failed: grid search model and manually "
                              "built H2O model differ too much in test MSE!")
                        break

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build models is {0}."
                          "  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_naivebayes_grid_search_over_params for naivebayes has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_naivebayes_grid_search_over_params for naivebayes failed: exception was thrown for no reason.")
                self.test_failed += 1
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by naivebayes.
        2. It will find the intersection of parameters that are both griddable and used by naivebayes.
        3. There are several extra parameters that are used by naivebayes that are denoted as griddable but actually
        are not.  These parameters have to be discovered manually and they are captured in
        self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2ONaiveBayesEstimator(nfolds=self.nfolds, compute_metrics=True)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameter and others as well to make sure they make sense
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        final_hyper_params_keys = list(self.final_hyper_params)
        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in final_hyper_params_keys) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        # need to check that min_prob >= 1e-10
        if "min_prob" in final_hyper_params_keys:
            old_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 0)])
            good_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 1e-10)])
            if (old_len_prob > 0):
                self.possible_number_models = self.possible_number_models*good_len_prob/old_len_prob
            else:
                self.possible_number_models = 0

        if "laplace" in final_hyper_params_keys:
            self.final_hyper_params["laplace"] = [self.laplace_scale * x for x
                                                  in self.hyper_params["laplace"]]

            # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
    def test_naivebayes_grid_search_over_params(self):
        """
        test_naivebayes_grid_search_over_params the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O naivebayes models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O naivebayes model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """
        print("*******************************************************************************************")
        print("test_naivebayes_grid_search_over_params for naivebayes ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds),
                                   hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by "
                      "gridsearch {0} does not equal to all possible combinations of hyper-parameters "
                      "{1}".format(self.correct_model_number, self.possible_number_models))
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0
                gridsearch_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list["validation_frame"]
                        del params_list["validation_frame"]

                    if "eps_prob" in params_list:
                        model_params["eps_prob"] = params_list["eps_prob"]
                        del params_list["eps_prob"]

                    if "min_prob" in params_list:
                        model_params["min_prob"] = params_list["min_prob"]
                        del params_list["min_prob"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])
                    gridsearch_runtime += each_model_runtime

                    manual_model = H2ONaiveBayesEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.model_run_time):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = \
                        each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
                    manual_model_metrics = \
                        manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) \
                            and (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
                            print("test_naivebayes_grid_search_over_params for naivebayes WARNING\ngrid search model "
                                  "{0}: {1}, time taken to build (secs): {2}\n and manually built H2O model {3}: {4}, "
                                  "time taken to build (secs): {5}\ndiffer too much!"
                                  "".format(self.training_metric, grid_model_metrics, each_model_runtime,
                                            self.training_metric, manual_model_metrics, model_runtime))

                print("Time taken for gridsearch to build all models (sec): {0}\n Time taken to manually build all "
                      "models (sec): {1}, total run time limits (sec): "
                      "{2}".format(gridsearch_runtime, manual_run_runtime, total_run_time_limits))
                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)


                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build "
                          "models is {0}.  Maximum allowed time "
                          "is {1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_naivebayes_grid_search_over_params for naivebayes has passed!")
        except Exception as e:
            if self.possible_number_models > 0:
                print("test_naivebayes_grid_search_over_params for naivebayes failed: exception ({0}) was thrown for "
                      "no reason.".format(e))
                self.test_failed += 1
コード例 #14
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by PCA.
        2. It will find the intersection of parameters that are both griddable and used by PCA.
        3. There are several extra parameters that are used by PCA that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OPCA(k=10, transform="NONE", pca_method=self.pca_method)
        model.train(x=self.x_indices, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        if 'max_iterations' in list(self.hyper_params):
            self.hyper_params['max_iterations'] = [self.max_iter_scale * x for x in self.hyper_params['max_iterations']]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        # must include k in hyper-parameters
        if ('k' not in list(self.final_hyper_params)) and ('k' in list(self.hyper_params)):
            self.final_hyper_params["k"] = self.hyper_params["k"]
            len_good_k = len([x for x in self.hyper_params["k"] if (x > 0)])
            self.possible_number_models = self.possible_number_models*len_good_k

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
コード例 #15
0
    def test_deeplearning_grid_search_over_params(self):
        """
        test_deeplearning_fieldnames performs the following:
        a. build H2O deeplearning models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O deeplearning model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print(
            "*******************************************************************************************"
        )
        print("test_deeplearning_fieldnames for deeplearning " + self.family)
        h2o.cluster_info()

        # start grid search
        # grid_model = H2OGridSearch(H2ODeepLearningEstimator(nfolds=self.nfolds, seed=self.seed),
        #                             hyper_params=self.final_hyper_params)
        # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
        #
        # self.correct_model_number = len(grid_model)     # store number of models built

        try:
            print("Hyper-parameters used here is {0}".format(
                self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(
                H2ODeepLearningEstimator(nfolds=self.nfolds),
                hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices,
                             y=self.y_index,
                             training_frame=self.training1_data)

            # add parameters into params_dict.  Use this to manually build model
            params_dict = dict()
            params_dict["distribution"] = self.family
            params_dict["nfolds"] = self.nfolds
            total_run_time_limits = 0.0  # calculate upper bound of max_runtime_secs
            true_run_time_limits = 0.0
            manual_run_runtime = 0.0

            # compare MSE performance of model built by gridsearch with manually built model
            for each_model in grid_model:

                params_list = grid_model.get_hyperparams_dict(each_model._id)
                params_list.update(params_dict)

                model_params = dict()

                # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                if "max_runtime_secs" in params_list:
                    model_params["max_runtime_secs"] = params_list[
                        "max_runtime_secs"]
                    max_runtime = params_list["max_runtime_secs"]
                    del params_list["max_runtime_secs"]
                else:
                    max_runtime = 0

                if "elastic_averaging_moving_rate" in params_list:
                    model_params[
                        "elastic_averaging_moving_rate"] = params_list[
                            "elastic_averaging_moving_rate"]
                    del params_list["elastic_averaging_moving_rate"]

                if "validation_frame" in params_list:
                    model_params["validation_frame"] = params_list[
                        "validation_frame"]
                    del params_list["validation_frame"]

                if "elastic_averaging_regularization" in params_list:
                    model_params[
                        "elastic_averaging_regularization"] = params_list[
                            "elastic_averaging_regularization"]
                    del params_list["elastic_averaging_regularization"]

                manual_model = H2ODeepLearningEstimator(**params_list)
                manual_model.train(x=self.x_indices,
                                   y=self.y_index,
                                   training_frame=self.training1_data,
                                   **model_params)

                # collect the time taken to manually built all models
                model_runtime = pyunit_utils.find_grid_runtime(
                    [manual_model])  # time taken to build this model
                manual_run_runtime += model_runtime

                summary_list = manual_model._model_json["output"][
                    "scoring_history"]
                if len(summary_list.cell_values) < 3:
                    num_iterations = 1
                else:
                    num_iterations = summary_list.cell_values[2][
                        summary_list.col_header.index('iterations')]

                if max_runtime > 0:
                    # shortest possible time it takes to build this model
                    if (max_runtime < self.min_runtime_per_iteration) or (
                            num_iterations <= 1):
                        total_run_time_limits += model_runtime
                    else:
                        total_run_time_limits += max_runtime

                true_run_time_limits += max_runtime

                # compute and compare test metrics between the two models
                grid_model_metrics = each_model.model_performance(
                )._metric_json[self.training_metric]
                manual_model_metrics = manual_model.model_performance(
                )._metric_json[self.training_metric]

                # just compare the mse in this case within tolerance:
                if not ((type(grid_model_metrics) == str) or
                        (type(manual_model_metrics) == str)):
                    if (abs(grid_model_metrics) > 0) \
                            and abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff:
                        print(
                            "test_deeplearning_fieldnames for deeplearning warning: grid search "
                            "model metric ({0}) and manually built H2O model metric ({1}) differ too much"
                            "!".format(grid_model_metrics,
                                       manual_model_metrics))

            total_run_time_limits = max(
                total_run_time_limits,
                true_run_time_limits) * (1 + self.extra_time_fraction)

            # make sure the max_runtime_secs is working to restrict model built time
            if not (manual_run_runtime <= total_run_time_limits):
                self.test_failed += 1
                print(
                    "test_deeplearning_fieldnames for deeplearning failed: time taken to manually build"
                    " models is {0}.  Maximum allowed time is {1}".format(
                        manual_run_runtime, total_run_time_limits))
            else:
                print(
                    "time taken to manually build all models is {0}. Maximum allowed time is "
                    "{1}".format(manual_run_runtime, total_run_time_limits))

            if self.test_failed == 0:
                print(
                    "test_deeplearning_fieldnames for deeplearning has passed!"
                )
        except:
            if len(grid_model) > 0:
                print(
                    "test_deeplearning_fieldnames for deeplearning failed: exception was thrown for "
                    "no reason.")
                self.test_failed += 1
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GLRM.
        2. It will find the intersection of parameters that are both griddable and used by GLRM.
        3. There are several extra parameters that are used by GLRM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGeneralizedLowRankEstimator(k=10,
                                               loss="Quadratic",
                                               gamma_x=random.uniform(0, 1),
                                               gamma_y=random.uniform(0, 1),
                                               transform="DEMEAN")
        model.train(x=self.training1_data.names,
                    training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime(
            [model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(
            self.model_run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_iter = summary_list.cell_values[0][summary_list.col_header.index(
            'number_of_iterations')]

        self.min_runtime_per_iter = self.model_run_time / num_iter

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        hyper_params_list = list(self.hyper_params)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in hyper_params_list:
            self.hyper_params["max_runtime_secs"] = [
                time_scale * x for x in self.hyper_params["max_runtime_secs"]
            ]

        # scale up the max_iterations to 100
        if "max_iterations" in hyper_params_list:
            self.hyper_params["max_iterations"] = [
                self.iter_scale * x
                for x in self.hyper_params["max_iterations"]
            ]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params[
                "max_runtime_secs"]
            len_good_time = len(
                [x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models * len_good_time

        if "k" not in list(self.final_hyper_params):  # must add this one
            self.final_hyper_params["k"] = self.hyper_params["k"]
            len_good_k = len(
                [x for x in self.final_hyper_params["k"] if (x >= 1)])
            self.possible_number_models = self.possible_number_models * len_good_k

        self.final_hyper_params["seed"] = [
            self.seed
        ]  # added see to make test more repeatable

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir,
                                                 self.sandbox_dir,
                                                 self.json_filename,
                                                 self.final_hyper_params)
コード例 #17
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by deeplearning.
        2. It will find the intersection of parameters that are both griddable and used by deeplearning.
        3. There are several extra parameters that are used by deeplearning that are denoted as griddable but actually
        is not.  These parameters have to be discovered manually and they These are captured in
        self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2ODeepLearningEstimator(distribution=self.family,
                                         seed=self.seed,
                                         nfolds=self.nfolds,
                                         hidden=[10, 10, 10])
        model.train(x=self.x_indices,
                    y=self.y_index,
                    training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime(
            [model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(
            self.model_run_time))

        summary_list = model._model_json["output"]["scoring_history"]
        num_iterations = summary_list.cell_values[2][
            summary_list.col_header.index('iterations')]

        if num_iterations == 0:
            self.min_runtime_per_iteration = self.model_run_time
        else:
            self.min_runtime_per_iteration = self.model_run_time / num_iterations

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameter and others as well to make sure they make sense
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [
                time_scale * x for x in self.hyper_params["max_runtime_secs"]
            ]

        if "epsilon" in list(self.hyper_params):
            self.hyper_params["epsilon"] = [
                1e-4 * x for x in self.hyper_params["epsilon"]
            ]

        if "input_dropout_ratio" in list(self.hyper_params):
            self.hyper_params["input_dropout_ratio"] = [
                0.5 * x for x in self.hyper_params["input_dropout_ratio"]
            ]

        if "hidden_dropout_ratio" in list(self.hyper_params):
            self.hyper_params["hidden_dropout_ratio"] = [
                0.5 * x for x in self.hyper_params["hidden_dropout_ratio"]
            ]

        if "hidden" in list(self.hyper_params):  # need to change this up
            # randomly generate the number of layers in the network
            num_layer = random.randint(1, 3)

            # for each layer, randomly generate the number of nodes in it
            self.hyper_params["hidden"] = [
                random.randint(1, self.max_int_val)
                for p in range(0, num_layer)
            ]

        if "epochs" in self.hyper_params:
            self.hyper_params["epochs"] = [
                random.randint(self.min_int_val, self.max_int_val)
                for p in range(0, self.max_int_number)
            ]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, [], [],
                                                [], [],
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params[
                "max_runtime_secs"]

        self.final_hyper_params["seed"] = [
            self.seed
        ]  # added see to make test more repeatable

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir,
                                                 self.sandbox_dir,
                                                 self.json_filename,
                                                 self.final_hyper_params)
コード例 #18
0
    def test1_glm_grid_search_over_params(self):
        """
        test1_glm_grid_search_over_params: test for condition 1 and performs the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O GLM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GLM model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print(
            "*******************************************************************************************"
        )
        print("test1_glm_grid_search_over_params for GLM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(
                self.final_hyper_params_bad))

            # start grid search
            grid_model = H2OGridSearch(
                H2OGeneralizedLinearEstimator(family=self.family,
                                              nfolds=self.nfolds),
                hyper_params=self.final_hyper_params_bad)
            grid_model.train(x=self.x_indices,
                             y=self.y_index,
                             training_frame=self.training1_data)

            self.correct_model_number = len(
                grid_model)  # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number
                    == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                self.test_failed_array[self.test_num] = 1
                print(
                    "test_glm_search_over_params for GLM failed: number of models built by gridsearch "
                    "does not equal to all possible combinations of hyper-parameters"
                )
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["family"] = self.family
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0  # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare MSE performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    # grab parameters used by grid search and build a dict out of it
                    params_list = grid_model.get_hyperparams_dict(
                        each_model._id)
                    params_list.update(params_dict)

                    model_params = dict(
                    )  # some parameters are to be added in .train()

                    if "lambda" in list(params_list):
                        params_list["Lambda"] = params_list["lambda"]
                        del params_list["lambda"]

                    # need to taken out max_runtime_secs, stopping_rounds, stopping_tolerance
                    # # from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list[
                            "max_runtime_secs"]
                        del params_list["max_runtime_secs"]

                    if "stopping_rounds" in params_list:
                        model_params["stopping_rounds"] = params_list[
                            "stopping_rounds"]
                        del params_list["stopping_rounds"]

                    if "stopping_tolerance" in params_list:
                        model_params["stopping_tolerance"] = params_list[
                            "stopping_tolerance"]
                        del params_list["stopping_tolerance"]

                    manual_model = H2OGeneralizedLinearEstimator(**params_list)
                    manual_model.train(x=self.x_indices,
                                       y=self.y_index,
                                       training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime(
                        [manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output'][
                        'model_summary']
                    iteration_num = summary_list.cell_values[
                        "number_of_iterations"][0]

                    if model_params["max_runtime_secs"] > 0:
                        # shortest possible time it takes to build this model
                        if (model_params["max_runtime_secs"] <
                                self.min_runtime_per_epoch) or (iteration_num
                                                                <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += model_params[
                                "max_runtime_secs"]

                    true_run_time_limits += model_params["max_runtime_secs"]

                    # compute and compare test metrics between the two models
                    grid_model_metrics = each_model.model_performance(
                        test_data=self.training2_data)
                    manual_model_metrics = manual_model.model_performance(
                        test_data=self.training2_data)

                    # just compare the mse in this case within tolerance:
                    if not ((type(grid_model_metrics.mse()) == str) or
                            (type(manual_model_metrics.mse()) == str)):
                        mse = grid_model_metrics.mse()
                        if abs(mse) > 0 and abs(mse - manual_model_metrics.mse(
                        )) / mse > self.allowed_diff:
                            print(
                                "test1_glm_grid_search_over_params for GLM warning: grid search model metric ({0}) "
                                "and manually built H2O model metric ({1}) differ too much"
                                "!".format(grid_model_metrics.mse(),
                                           manual_model_metrics.mse()))

                total_run_time_limits = max(
                    total_run_time_limits,
                    true_run_time_limits) * (1 + self.extra_time_fraction)

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number
                    == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                self.test_failed_array[self.test_num] = 1
                print(
                    "test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch "
                    "does not equal to all possible combinations of hyper-parameters"
                )

            # make sure the max_runtime_secs is working to restrict model built time, GLM does not respect that.
            if not (manual_run_runtime <= total_run_time_limits):
                # self.test_failed += 1
                # self.test_failed_array[self.test_num] = 1
                print(
                    "test1_glm_grid_search_over_params for GLM warning: allow time to build models: {0}, actual "
                    "time taken: {1}".format(total_run_time_limits,
                                             manual_run_runtime))

            self.test_num += 1

            if self.test_failed == 0:
                print("test1_glm_grid_search_over_params for GLM has passed!")
        except:
            if self.possible_number_models > 0:
                print(
                    "test1_glm_grid_search_over_params for GLM failed: exception was thrown for no reason."
                )
コード例 #19
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
        2. It will find the intersection of parameters that are both griddable and used by GLM.
        3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGeneralizedLinearEstimator(family=self.family,
                                              nfolds=self.nfolds)
        model.train(x=self.x_indices,
                    y=self.y_index,
                    training_frame=self.training1_data)

        run_time = pyunit_utils.find_grid_runtime([model
                                                   ])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(
            run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_iteration = summary_list.cell_values[0][
            summary_list.col_header.index("number_of_iterations")]

        if num_iteration == 0:
            self.min_runtime_per_epoch = run_time
        else:
            self.min_runtime_per_epoch = run_time / num_iteration

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params_bad, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params_bad,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val*self.alpha_scale, self.min_real_val*self.alpha_scale)

        # scale the value of lambda parameters
        if "lambda" in list(self.hyper_params_bad):
            self.hyper_params_bad["lambda"] = [
                self.lambda_scale * x for x in self.hyper_params_bad["lambda"]
            ]

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * run_time
        if "max_runtime_secs" in list(self.hyper_params_bad):
            self.hyper_params_bad["max_runtime_secs"] = [
                time_scale * x
                for x in self.hyper_params_bad["max_runtime_secs"]
            ]

        [self.possible_number_models, self.final_hyper_params_bad] = \
            pyunit_utils.check_and_count_models(self.hyper_params_bad, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        if ("max_runtime_secs" not in list(self.final_hyper_params_bad)) and \
                ("max_runtime_secs" in list(self.hyper_params_bad)):
            self.final_hyper_params_bad[
                "max_runtime_secs"] = self.hyper_params_bad["max_runtime_secs"]
            len_good_time = len([
                x for x in self.hyper_params_bad["max_runtime_secs"]
                if (x >= 0)
            ])
            self.possible_number_models = self.possible_number_models * len_good_time

        # Stratified is illegal for Gaussian GLM
        self.possible_number_models = self.possible_number_models * self.scale_model

        # randomly generate griddable parameters with only good values
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, 0,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, 0)

        # scale the value of lambda parameters
        if "lambda" in list(self.hyper_params):
            self.hyper_params["lambda"] = [
                self.lambda_scale * x for x in self.hyper_params["lambda"]
            ]

        # scale the max_runtime_secs parameters
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [
                time_scale * x for x in self.hyper_params["max_runtime_secs"]
            ]

        [self.true_correct_model_number, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params[
                "max_runtime_secs"]
            self.true_correct_model_number = \
                self.true_correct_model_number * len(self.final_hyper_params["max_runtime_secs"])

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir,
                                                 self.sandbox_dir,
                                                 self.json_filename_bad,
                                                 self.final_hyper_params_bad)

        pyunit_utils.write_hyper_parameters_json(self.current_dir,
                                                 self.sandbox_dir,
                                                 self.json_filename,
                                                 self.final_hyper_params)
コード例 #20
0
    def test1_glm_grid_search_over_params(self):
        """
        test1_glm_grid_search_over_params: test for condition 1 and performs the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O GLM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GLM model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test1_glm_grid_search_over_params for GLM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params_bad))

            # start grid search
            grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
                                       hyper_params=self.final_hyper_params_bad)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                self.test_failed_array[self.test_num] = 1
                print("test_glm_search_over_params for GLM failed: number of models built by gridsearch "
                      "does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["family"] = self.family
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare MSE performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    # grab parameters used by grid search and build a dict out of it
                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()   # some parameters are to be added in .train()

                    if "lambda" in list(params_list):
                        params_list["Lambda"] = params_list["lambda"]
                        del params_list["lambda"]

                    # need to taken out max_runtime_secs, stopping_rounds, stopping_tolerance
                    # # from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]

                    if "stopping_rounds" in params_list:
                        model_params["stopping_rounds"] = params_list["stopping_rounds"]
                        del params_list["stopping_rounds"]

                    if "stopping_tolerance" in params_list:
                        model_params["stopping_tolerance"] = params_list["stopping_tolerance"]
                        del params_list["stopping_tolerance"]

                    manual_model = H2OGeneralizedLinearEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    iteration_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')]

                    if model_params["max_runtime_secs"] > 0:
                        # shortest possible time it takes to build this model
                        if (model_params["max_runtime_secs"] < self.min_runtime_per_epoch) or (iteration_num <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += model_params["max_runtime_secs"]

                    true_run_time_limits += model_params["max_runtime_secs"]

                    # compute and compare test metrics between the two models
                    grid_model_metrics = each_model.model_performance(test_data=self.training2_data)
                    manual_model_metrics = manual_model.model_performance(test_data=self.training2_data)

                # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics.mse()) == str) or (type(manual_model_metrics.mse()) == str)):
                        if (abs(grid_model_metrics.mse()) > 0) \
            and abs(grid_model_metrics.mse() - manual_model_metrics.mse())/grid_model_metrics.mse() > self.allowed_diff:
                            print("test1_glm_grid_search_over_params for GLM warning: grid search model metric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics.mse(), manual_model_metrics.mse()))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                self.test_failed_array[self.test_num] = 1
                print("test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch "
                    "does not equal to all possible combinations of hyper-parameters")

            # make sure the max_runtime_secs is working to restrict model built time, GLM does not respect that.
            if not(manual_run_runtime <= total_run_time_limits):
 #               self.test_failed += 1
 #               self.test_failed_array[self.test_num] = 1
                print("test1_glm_grid_search_over_params for GLM warning: allow time to build models: {0}, actual "
                      "time taken: {1}".format(total_run_time_limits, manual_run_runtime))

            self.test_num += 1

            if self.test_failed == 0:
                print("test1_glm_grid_search_over_params for GLM has passed!")
        except:
            if self.possible_number_models > 0:
                print("test1_glm_grid_search_over_params for GLM failed: exception was thrown for no reason.")
コード例 #21
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GBM.
        2. It will find the intersection of parameters that are both griddable and used by GBM.
        3. There are several extra parameters that are used by GBM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGradientBoostingEstimator(distribution=self.family, seed=self.seed, nfolds=self.nfolds)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_trees = summary_list["number_of_trees"][0]

        if num_trees == 0:
            self.min_runtime_per_tree = self.model_run_time
        else:
            self.min_runtime_per_tree = self.model_run_time / num_trees

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        if "fold_assignment" in list(self.final_hyper_params):
            self.possible_number_models = self.possible_number_models * self.scale_model

        self.final_hyper_params["seed"] = [self.seed]     # added see to make test more repeatable

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
コード例 #22
0
    def test_glrm_grid_search_over_params(self):
        """
        test_glrm_grid_search_over_params performs the following:
        a. build H2O GLRM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GLRM model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_glrm_grid_search_over_params for GLRM ")
        h2o.cluster_info()

        if self.possible_number_models > 0:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OGeneralizedLowRankEstimator(), hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_glrm_grid_search_over_params for GLRM failed: number of models built by gridsearch: {1} "
                      "does not equal to all possible combinations of hyper-parameters: "
                      "{1}".format(self.correct_model_number, self.possible_number_models))
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                # params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare objective performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    manual_model = H2OGeneralizedLowRankEstimator(**params_list)
                    manual_model.train(x=self.training1_data.names, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    num_iter = summary_list["number_of_iterations"][0]

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.min_runtime_per_iter) or (num_iter <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = each_model._model_json['output']['objective']
                    manual_model_metrics = manual_model._model_json['output']['objective']

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == unicode) or (type(manual_model_metrics) == unicode)):
                        if (abs(grid_model_metrics) > 0) and \
                                abs(grid_model_metrics - manual_model_metrics) / grid_model_metrics > self.allowed_diff:
                            print("test_glrm_grid_search_over_params for GLRM warning: grid search model mdetric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics, manual_model_metrics))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_glrm_grid_search_over_params for GLRM failed: time taken to manually build models "
                          "is {0}.  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_glrm_grid_search_over_params for GLRM has passed!")
コード例 #23
0
    def test_kmeans_grid_search_over_params(self):
        """
        test_kmeans_grid_search_over_params performs the following:
        a. build H2O kmeans models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O kmeans model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_kmeans_grid_search_over_params for kmeans ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OKMeansEstimator(), hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if (self.correct_model_number - self.possible_number_models)>0.9:  # wrong grid model number
                self.test_failed += 1
                print("test_kmeans_grid_search_over_params for kmeans failed: number of models built by gridsearch: {0}"
                      " does not equal to all possible combinations of hyper-parameters: "
                      "{1}".format(self.correct_model_number, self.possible_number_models))
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare training metric performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()
                    num_iter = 0

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OKMeansEstimator(**params_list)
                    manual_model.train(x=self.x_indices, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    if summary_list is not None:
                        num_iter = summary_list["number_of_iterations"][0]

                        # compute and compare test metrics between the two models
                        if not(each_model._model_json["output"]["model_summary"] is None):
                            grid_model_metrics = \
                                each_model._model_json["output"]["model_summary"]["total_sum_of_squares"][0]
                            manual_model_metrics = \
                                manual_model._model_json["output"]["model_summary"]["total_sum_of_squares"][0]

                            # just compare the training metrics in this case within tolerance:
                            if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                                if (abs(grid_model_metrics) > 0) and \
                                        (abs(grid_model_metrics - manual_model_metrics) / grid_model_metrics >
                                             self.allowed_diff):
                                    print("test_kmeans_grid_search_over_params for kmeans warning: grid search model "
                                          "metric ({0}) and manually built H2O model metric ({1}) differ too much"
                                          "!".format(grid_model_metrics, manual_model_metrics))

                    if max_runtime > 0:
                                # collect allowed max_runtime_secs info
                        if (max_runtime < self.min_runtime_per_iter) or (num_iter <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_kmeans_grid_search_over_params for kmeans failed: time taken to manually build models"
                          " is {0}.  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_kmeans_grid_search_over_params for kmeans has passed!")
        except Exception as e:
            if self.possible_number_models > 0:
                print("test_kmeans_grid_search_over_params for kmeans failed: exception ({0}) was thrown for no reason.".format(e))
                self.test_failed += 1
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GBM.
        2. It will find the intersection of parameters that are both griddable and used by GBM.
        3. There are several extra parameters that are used by GBM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGradientBoostingEstimator(distribution=self.family)
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_trees = summary_list.cell_values[0][summary_list.col_header.index('number_of_trees')]

        if num_trees == 0:
            self.min_runtime_per_tree = run_time
        else:
            self.min_runtime_per_tree = run_time / num_trees

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        self.possible_number_models = self.check_and_count_models()

        self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]

        # calculate true possible_number_models and exclude the bad parameters since they will not
        # result in any models being built
        # alpha_len = len(self.hyper_params["alpha"])
        # lambda_len = len(self.hyper_params["lambda"])
        time_len = len(self.hyper_params["max_runtime_secs"])
        # len_good_alpha = len([x for x in self.hyper_params["alpha"] if (x >= 0) and (x <= 1)])
        # len_good_lambda = len([x for x in self.hyper_params["lambda"] if (x >= 0)])
        len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])

        self.possible_number_models = int(self.possible_number_models*len_good_time/time_len)

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
    def test_kmeans_grid_search_over_params(self):
        """
        test_kmeans_grid_search_over_params performs the following:
        a. build H2O kmeans models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O kmeans model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_kmeans_grid_search_over_params for kmeans ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OKMeansEstimator(), hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_kmeans_grid_search_over_params for kmeans failed: number of models built by gridsearch "
                      "does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare training metric performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()
                    num_iter = 0

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OKMeansEstimator(**params_list)
                    manual_model.train(x=self.x_indices, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    if not(summary_list == None):
                        num_iter = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')]

                        # compute and compare test metrics between the two models
                        if not(each_model._model_json["output"]["model_summary"] == None):
                            grid_model_metrics = \
each_model._model_json["output"]["model_summary"].cell_values[0][summary_list.col_header.index('total_sum_of_squares')]
                            manual_model_metrics = \
manual_model._model_json["output"]["model_summary"].cell_values[0][summary_list.col_header.index('total_sum_of_squares')]

                        # just compare the training metrics in this case within tolerance:
                            if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                                if (abs(grid_model_metrics) > 0) and \
                                        (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics >
                                             self.allowed_diff):
                                    print("test_kmeans_grid_search_over_params for kmeans warning: grid search model "
                                          "metric ({0}) and manually built H2O model metric ({1}) differ too much"
                                          "!".format(grid_model_metrics, manual_model_metrics))

                    if max_runtime > 0:
                                # collect allowed max_runtime_secs info
                        if (max_runtime < self.min_runtime_per_iter) or (num_iter <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_kmeans_grid_search_over_params for kmeans failed: time taken to manually build models"
                          " is {0}.  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_kmeans_grid_search_over_params for kmeans has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_kmeans_grid_search_over_params for kmeans failed: exception was thrown for no reason.")
                self.test_failed += 1
    def test_deeplearning_grid_search_over_params(self):
        """
        test_deeplearning_fieldnames performs the following:
        a. build H2O deeplearning models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O deeplearning model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_deeplearning_fieldnames for deeplearning " + self.family)
        h2o.cluster_info()

        # start grid search
        # grid_model = H2OGridSearch(H2ODeepLearningEstimator(nfolds=self.nfolds, seed=self.seed),
        #                             hyper_params=self.final_hyper_params)
        # grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
        #
        # self.correct_model_number = len(grid_model)     # store number of models built

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2ODeepLearningEstimator(nfolds=self.nfolds),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            # add parameters into params_dict.  Use this to manually build model
            params_dict = dict()
            params_dict["distribution"] = self.family
            params_dict["nfolds"] = self.nfolds
            total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
            true_run_time_limits = 0.0
            manual_run_runtime = 0.0

            # compare MSE performance of model built by gridsearch with manually built model
            for each_model in grid_model:

                params_list = grid_model.get_hyperparams_dict(each_model._id)
                params_list.update(params_dict)

                model_params = dict()

                # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                if "max_runtime_secs" in params_list:
                    model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                    max_runtime = params_list["max_runtime_secs"]
                    del params_list["max_runtime_secs"]
                else:
                    max_runtime = 0

                if "elastic_averaging_moving_rate" in params_list:
                    model_params["elastic_averaging_moving_rate"] = params_list["elastic_averaging_moving_rate"]
                    del params_list["elastic_averaging_moving_rate"]

                if "validation_frame" in params_list:
                    model_params["validation_frame"] = params_list["validation_frame"]
                    del params_list["validation_frame"]

                if "elastic_averaging_regularization" in params_list:
                    model_params["elastic_averaging_regularization"] = params_list["elastic_averaging_regularization"]
                    del params_list["elastic_averaging_regularization"]

                manual_model = H2ODeepLearningEstimator(**params_list)
                manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                   **model_params)

                # collect the time taken to manually built all models
                model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                manual_run_runtime += model_runtime

                summary_list = manual_model._model_json["output"]["scoring_history"]
                if len(summary_list.cell_values) < 3:
                    num_iterations = 1
                else:
                    num_iterations = summary_list.cell_values[2][summary_list.col_header.index('iterations')]

                if max_runtime > 0:
                    # shortest possible time it takes to build this model
                    if (max_runtime < self.min_runtime_per_iteration) or (num_iterations <= 1):
                        total_run_time_limits += model_runtime
                    else:
                        total_run_time_limits += max_runtime

                true_run_time_limits += max_runtime

                # compute and compare test metrics between the two models
                grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric]
                manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric]

                # just compare the mse in this case within tolerance:
                if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                    if (abs(grid_model_metrics) > 0) \
                            and abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff:
                        print("test_deeplearning_fieldnames for deeplearning warning: grid search "
                              "model metric ({0}) and manually built H2O model metric ({1}) differ too much"
                              "!".format(grid_model_metrics, manual_model_metrics))

            total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

            # make sure the max_runtime_secs is working to restrict model built time
            if not(manual_run_runtime <= total_run_time_limits):
                self.test_failed += 1
                print("test_deeplearning_fieldnames for deeplearning failed: time taken to manually build"
                      " models is {0}.  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
            else:
                print("time taken to manually build all models is {0}. Maximum allowed time is "
                      "{1}".format(manual_run_runtime, total_run_time_limits))

            if self.test_failed == 0:
                print("test_deeplearning_fieldnames for deeplearning has passed!")
        except:
            if len(grid_model) > 0:
                print("test_deeplearning_fieldnames for deeplearning failed: exception was thrown for "
                      "no reason.")
                self.test_failed += 1
    def test_gbm_grid_search_over_params(self):
        """
        test_gbm_grid_search_over_params: test for condition 1 and performs the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O GBM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GLM model.  MSEs are calculated from a test set
           to compare the performance of grid search model and our manually built model.  If their MSEs
           are close, declare test success.  Otherwise, declare test failure.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure as well.
        """

        print("*******************************************************************************************")
        print("test_gbm_grid_search_over_params for GBM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OGradientBoostingEstimator(nfolds=self.nfolds),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # check the total time taken to build grid search models
            total_gridsearch_runtime = pyunit_utils.find_grid_runtime(grid_model)

            # add parameters into params_dict.  Use this to manually build model
            params_dict = dict()
            params_dict["family"] = self.family
            params_dict["nfolds"] = self.nfolds
            total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
            true_run_time_limits = 0.0
            manual_run_runtime = 0.0

            # compare MSE performance of model built by gridsearch with manually built model
            for each_model in grid_model:

                # grab parameters used by grid search and build a dict out of it
                params_list = pyunit_utils.extract_used_params(self.final_hyper_params.keys(), each_model.params,
                                                               params_dict)

                # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                if "max_runtime_secs" in params_list:
                    max_runtime = params_list["max_runtime_secs"]
                    del params_list["max_runtime_secs"]
                else:
                    max_runtime = 0

                manual_model = H2OGeneralizedLinearEstimator(**params_list)
                manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                   max_runtime_secs=max_runtime)

                # collect the time taken to manually built all models
                model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                manual_run_runtime += model_runtime

                summary_list = manual_model._model_json['output']['model_summary']
                iteration_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_iterations')]

                if max_runtime > 0:
                    # shortest possible time it takes to build this model
                    if (max_runtime < self.min_runtime_per_tree) or (iteration_num <= 1):
                        total_run_time_limits += model_runtime
                    else:
                        total_run_time_limits += max_runtime

                true_run_time_limits += max_runtime

                # compute and compare test metrics between the two models
                test_grid_model_metrics = each_model.model_performance(test_data=self.training2_data)
                test_manual_model_metrics = manual_model.model_performance(test_data=self.training2_data)

                # just compare the mse in this case within tolerance:
                if abs(test_grid_model_metrics.mse() - test_manual_model_metrics.mse()) > self.allowed_diff:
                    self.test_failed += 1             # count total number of tests that have failed
                    self.test_failed_array[self.test_num] += 1
                    print("test_gbm_grid_search_over_params for GLM failed: grid search model and manually "
                          "built H2O model differ too much in test MSE!")
                    break

            total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

            # # # make sure the correct number of models are built by gridsearch
            # if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
            #     self.test_failed += 1
            #     self.test_failed_array[self.test_num] = 1
            #     print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch "
            #           "does not equal to all possible combinations of hyper-parameters")

            # make sure the max_runtime_secs is working to restrict model built time
            # if not((total_gridsearch_runtime <= total_run_time_limits) and
            #            (manual_run_runtime <= total_run_time_limits)):
            #     self.test_failed += 1
            #     self.test_failed_array[self.test_num] = 1
            #     print("test_gbm_grid_search_over_params for GLM failed: number of models built by gridsearch "
            #           "does not equal to all possible combinations of hyper-parameters")
            #
            # if self.test_failed == 0:
            #     print("test_gbm_grid_search_over_params for GLM has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_gbm_grid_search_over_params for GLM failed: exception was thrown for no reason.")
コード例 #28
0
    def test_PCA_grid_search_over_params(self):
        """
        test_pca_grid_search_over_params: test for condition 1 and performs the following:
        a. build H2O PCA models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O PCA model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """
        print("*******************************************************************************************")
        print("test_PCA_grid_search_over_params for PCA ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OPCA(pca_method=self.pca_method),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if (self.correct_model_number - self.possible_number_models)>0.9:  # wrong grid model number
                self.test_failed += 1
                print("test_PCA_grid_search_over_params for PCA failed: number of models built by gridsearch: {0} "
                      "does not equal to all possible combinations of hyper-parameters: "
                      "{1}".format(self.correct_model_number, self.possible_number_models))
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["pca_method"] = self.pca_method
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OPCA(**params_list)
                    manual_model.train(x=self.x_indices, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if max_runtime < self.model_run_time:
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = \
                        sum(each_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]])
                    manual_model_metrics = \
                        sum(manual_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]])

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) and \
                                (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
                            print("test_PCA_grid_search_over_params for PCA warning: grid search model mdetric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics, manual_model_metrics))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_PCA_grid_search_over_params for PCA failed: time taken to manually build models is {0}."
                          "  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_PCA_grid_search_over_params for PCA has passed!")
        except Exception as e:
            if self.possible_number_models > 0:
                print("test_PCA_grid_search_over_params for PCA failed: exception ({0}) was thrown for no reason.".format(e))
                self.test_failed += 1
コード例 #29
0
    def test_naivebayes_grid_search_over_params(self):
        """
        test_naivebayes_grid_search_over_params performs the following:
        a. build H2O naivebayes models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O naivebayes model.  Logloss are calculated from a test set
           to compare the performance of grid search model and our manually built model.  If their metrics
           are close, declare test success.  Otherwise, declare test failure.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure as well.
        """
        print(
            "*******************************************************************************************"
        )
        print("test_naivebayes_grid_search_over_params for naivebayes ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(
                self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(
                H2ONaiveBayesEstimator(nfolds=self.nfolds),
                hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices,
                             y=self.y_index,
                             training_frame=self.training1_data)

            self.correct_model_number = len(
                grid_model)  # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number
                    == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print(
                    "test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by "
                    "gridsearch does not equal to all possible combinations of hyper-parameters"
                )
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["nfolds"] = self.nfolds
                params_dict["score_tree_interval"] = 0
                total_run_time_limits = 0.0  # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(
                        each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list[
                            "max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list[
                            "validation_frame"]
                        del params_list["validation_frame"]

                    if "score_tree_interval" in params_list:
                        model_params["score_tree_interval"] = params_list[
                            "score_tree_interval"]
                        del params_list["score_tree_interval"]

                    if "eps_prob" in params_list:
                        model_params["eps_prob"] = params_list["eps_prob"]
                        del params_list["eps_prob"]

                    if "min_prob" in params_list:
                        model_params["min_prob"] = params_list["min_prob"]
                        del params_list["min_prob"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime(
                        [each_model])

                    manual_model = H2ONaiveBayesEstimator(**params_list)
                    manual_model.train(x=self.x_indices,
                                       y=self.y_index,
                                       training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime(
                        [manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.model_run_time):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    test_grid_model_metrics = \
                        each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
                    test_manual_model_metrics = \
                        manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if (each_model_runtime > 0) and \
                            (abs(model_runtime - each_model_runtime)/each_model_runtime < self.allowed_runtime_diff) \
                            and (abs(test_grid_model_metrics - test_manual_model_metrics) > self.allowed_diff):
                        self.test_failed += 1  # count total number of tests that have failed
                        print(
                            "test_naivebayes_grid_search_over_params for naivebayes failed: grid search model and manually "
                            "built H2O model differ too much in test MSE!")
                        break

                total_run_time_limits = max(
                    total_run_time_limits,
                    true_run_time_limits) * (1 + self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not (manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print(
                        "test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build models is {0}."
                        "  Maximum allowed time is {1}".format(
                            manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print(
                        "test_naivebayes_grid_search_over_params for naivebayes has passed!"
                    )
        except:
            if self.possible_number_models > 0:
                print(
                    "test_naivebayes_grid_search_over_params for naivebayes failed: exception was thrown for no reason."
                )
                self.test_failed += 1
コード例 #30
0
    def test_gbm_grid_search_over_params(self):
        """
        test_gbm_grid_search_over_params performs the following:
        a. Next, build H2O GBM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GBM model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_gbm_grid_search_over_params for GBM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OGradientBoostingEstimator(distribution=self.family, nfolds=self.nfolds),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_gbm_grid_search_over_params for GBM failed: number of models built by gridsearch "
                      "does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["distribution"] = self.family
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare MSE performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "r2_stopping" in params_list:
                        model_params["r2_stopping"] = params_list["r2_stopping"]
                        del params_list["r2_stopping"]

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list["validation_frame"]
                        del params_list["validation_frame"]

                    if "learn_rate_annealing" in params_list:
                        model_params["learn_rate_annealing"] = params_list["learn_rate_annealing"]
                        del params_list["learn_rate_annealing"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OGradientBoostingEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    tree_num = summary_list.cell_values["number_of_trees"][0]

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.min_runtime_per_tree) or (tree_num <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric]
                    manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) and \
                                (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):

                            print("test_gbm_grid_search_over_params for GBM warning: grid search model mdetric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics, manual_model_metrics))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_gbm_grid_search_over_params for GBM failed: time taken to manually build models is {0}."
                          "  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_gbm_grid_search_over_params for GBM has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_gbm_grid_search_over_params for GBM failed: exception was thrown for no reason.")
                self.test_failed += 1
    def test_naivebayes_grid_search_over_params(self):
        """
        test_naivebayes_grid_search_over_params the following:
        a. grab all truely griddable parameters and randomly or manually set the parameter values.
        b. Next, build H2O naivebayes models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O naivebayes model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """
        print("*******************************************************************************************")
        print("test_naivebayes_grid_search_over_params for naivebayes ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds),
                                   hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by "
                      "gridsearch {0} does not equal to all possible combinations of hyper-parameters "
                      "{1}".format(self.correct_model_number, self.possible_number_models))
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0
                gridsearch_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list["validation_frame"]
                        del params_list["validation_frame"]

                    if "eps_prob" in params_list:
                        model_params["eps_prob"] = params_list["eps_prob"]
                        del params_list["eps_prob"]

                    if "min_prob" in params_list:
                        model_params["min_prob"] = params_list["min_prob"]
                        del params_list["min_prob"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])
                    gridsearch_runtime += each_model_runtime

                    manual_model = H2ONaiveBayesEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.model_run_time):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = \
                        each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
                    manual_model_metrics = \
                        manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) \
                            and (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
                            print("test_naivebayes_grid_search_over_params for naivebayes WARNING\ngrid search model "
                                  "{0}: {1}, time taken to build (secs): {2}\n and manually built H2O model {3}: {4}, "
                                  "time taken to build (secs): {5}\ndiffer too much!"
                                  "".format(self.training_metric, grid_model_metrics, each_model_runtime,
                                            self.training_metric, manual_model_metrics, model_runtime))

                print("Time taken for gridsearch to build all models (sec): {0}\n Time taken to manually build all "
                      "models (sec): {1}, total run time limits (sec): "
                      "{2}".format(gridsearch_runtime, manual_run_runtime, total_run_time_limits))
                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)


                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build "
                          "models is {0}.  Maximum allowed time "
                          "is {1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_naivebayes_grid_search_over_params for naivebayes has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_naivebayes_grid_search_over_params for naivebayes failed: exception was thrown for "
                      "no reason.")
                self.test_failed += 1
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by deeplearning.
        2. It will find the intersection of parameters that are both griddable and used by deeplearning.
        3. There are several extra parameters that are used by deeplearning that are denoted as griddable but actually
        is not.  These parameters have to be discovered manually and they These are captured in
        self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2ODeepLearningEstimator(distribution=self.family, seed=self.seed, nfolds=self.nfolds,
                                         hidden=[10, 10, 10])
        model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        summary_list = model._model_json["output"]["scoring_history"]
        num_iterations = summary_list.cell_values[2][summary_list.col_header.index('iterations')]

        if num_iterations == 0:
            self.min_runtime_per_iteration = self.model_run_time
        else:
            self.min_runtime_per_iteration = self.model_run_time / num_iterations

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        # scale the max_runtime_secs parameter and others as well to make sure they make sense
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in list(self.hyper_params):
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        if "epsilon" in list(self.hyper_params):
            self.hyper_params["epsilon"] = [1e-4 * x for x in self.hyper_params["epsilon"]]

        if "input_dropout_ratio" in list(self.hyper_params):
            self.hyper_params["input_dropout_ratio"] = [0.5 * x for x in self.hyper_params["input_dropout_ratio"]]

        if "hidden_dropout_ratio" in list(self.hyper_params):
            self.hyper_params["hidden_dropout_ratio"] = [0.5 * x for x in self.hyper_params["hidden_dropout_ratio"]]

        if "hidden" in list(self.hyper_params):     # need to change this up
            # randomly generate the number of layers in the network
            num_layer = random.randint(1,3)

            # for each layer, randomly generate the number of nodes in it
            self.hyper_params["hidden"] = [random.randint(1, self.max_int_val) for p in range(0, num_layer)]

        if "epochs" in self.hyper_params:
            self.hyper_params["epochs"] = [random.randint(self.min_int_val, self.max_int_val) for p in
                                           range(0, self.max_int_number)]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)
        #
        # # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        # make correction for stratified not being a legal argument
        if "fold_assignment" in list(self.final_hyper_params):
            self.possible_number_models = self.possible_number_models * 3/4

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
コード例 #33
0
    def setup_model(self):
        """
        This function setup the gridsearch hyper-parameters that will be used later on:

        1. It will first try to grab all the parameters that are griddable and parameters used by GLRM.
        2. It will find the intersection of parameters that are both griddable and used by GLRM.
        3. There are several extra parameters that are used by GLRM that are denoted as griddable but actually is not.
        These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
        4. We generate the gridsearch hyper-parameter.  For numerical parameters, we will generate those randomly.
        For enums, we will include all of them.

        :return: None
        """
        # build bare bone model to get all parameters
        model = H2OGeneralizedLowRankEstimator(k=10, loss="Quadratic", gamma_x=random.uniform(0, 1),
                                               gamma_y=random.uniform(0, 1), transform="DEMEAN")
        model.train(x=self.training1_data.names, training_frame=self.training1_data)

        self.model_run_time = pyunit_utils.find_grid_runtime([model])  # find model train time
        print("Time taken to build a base barebone model is {0}".format(self.model_run_time))

        summary_list = model._model_json["output"]["model_summary"]
        num_iter = summary_list["number_of_iterations"][0]

        self.min_runtime_per_iter = self.model_run_time / num_iter

        # grab all gridable parameters and its type
        (self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.get_gridables(model._model_json["parameters"])

        # randomly generate griddable parameters including values outside legal range, like setting alpha values to
        # be outside legal range of 0 and 1 and etc
        (self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
            pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
                                         self.exclude_parameter_lists,
                                         self.gridable_parameters, self.gridable_types, self.gridable_defaults,
                                         random.randint(1, self.max_int_number),
                                         self.max_int_val, self.min_int_val,
                                         random.randint(1, self.max_real_number),
                                         self.max_real_val, self.min_real_val)

        hyper_params_list = list(self.hyper_params)

        # scale the max_runtime_secs parameters
        time_scale = self.time_scale * self.model_run_time
        if "max_runtime_secs" in hyper_params_list:
            self.hyper_params["max_runtime_secs"] = [time_scale * x for x
                                                     in self.hyper_params["max_runtime_secs"]]

        # scale up the max_iterations to 100
        if "max_iterations" in hyper_params_list:
            self.hyper_params["max_iterations"] = [self.iter_scale * x for x in self.hyper_params["max_iterations"]]

        # generate a new final_hyper_params which only takes a subset of all griddable parameters while
        # hyper_params take all griddable parameters and generate the grid search hyper-parameters
        [self.possible_number_models, self.final_hyper_params] = \
            pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
                                                self.params_more_than_one, self.params_zero_positive,
                                                self.max_grid_model)

        # must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
        if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
                ("max_runtime_secs" in list(self.hyper_params)):
            self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
            len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
            self.possible_number_models = self.possible_number_models*len_good_time

        if "k" not in list(self.final_hyper_params):    # must add this one
            self.final_hyper_params["k"] = self.hyper_params["k"]
            len_good_k = len([x for x in self.final_hyper_params["k"] if (x >= 1)])
            self.possible_number_models = self.possible_number_models*len_good_k

        self.final_hyper_params["seed"] = [self.seed]     # added see to make test more repeatable

        # write out the hyper-parameters used into json files.
        pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
                                                 self.final_hyper_params)
コード例 #34
0
    def test_gbm_grid_search_over_params(self):
        """
        test_gbm_grid_search_over_params performs the following:
        a. Next, build H2O GBM models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        b. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O GBM model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """

        print("*******************************************************************************************")
        print("test_gbm_grid_search_over_params for GBM " + self.family)
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OGradientBoostingEstimator(distribution=self.family, nfolds=self.nfolds),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_gbm_grid_search_over_params for GBM failed: number of models built by gridsearch "
                      "does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["distribution"] = self.family
                params_dict["nfolds"] = self.nfolds
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare MSE performance of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    if "r2_stopping" in params_list:
                        model_params["r2_stopping"] = params_list["r2_stopping"]
                        del params_list["r2_stopping"]

                    if "validation_frame" in params_list:
                        model_params["validation_frame"] = params_list["validation_frame"]
                        del params_list["validation_frame"]

                    if "learn_rate_annealing" in params_list:
                        model_params["learn_rate_annealing"] = params_list["learn_rate_annealing"]
                        del params_list["learn_rate_annealing"]

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OGradientBoostingEstimator(**params_list)
                    manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    summary_list = manual_model._model_json['output']['model_summary']
                    tree_num = summary_list.cell_values[0][summary_list.col_header.index('number_of_trees')]

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if (max_runtime < self.min_runtime_per_tree) or (tree_num <= 1):
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric]
                    manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric]

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) and \
                                (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):

                            print("test_gbm_grid_search_over_params for GBM warning: grid search model mdetric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics, manual_model_metrics))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_gbm_grid_search_over_params for GBM failed: time taken to manually build models is {0}."
                          "  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_gbm_grid_search_over_params for GBM has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_gbm_grid_search_over_params for GBM failed: exception was thrown for no reason.")
                self.test_failed += 1
コード例 #35
0
    def test_PCA_grid_search_over_params(self):
        """
        test_pca_grid_search_over_params: test for condition 1 and performs the following:
        a. build H2O PCA models using grid search.  Count and make sure models
           are only built for hyper-parameters set to legal values.  No model is built for bad hyper-parameters
           values.  We should instead get a warning/error message printed out.
        c. For each model built using grid search, we will extract the parameters used in building
           that model and manually build a H2O PCA model.  Training metrics are calculated from the
           gridsearch model and the manually built model.  If their metrics
           differ by too much, print a warning message but don't fail the test.
        d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
           for it as well.  If max_runtime_secs was exceeded, declare test failure.
        """
        print("*******************************************************************************************")
        print("test_PCA_grid_search_over_params for PCA ")
        h2o.cluster_info()

        try:
            print("Hyper-parameters used here is {0}".format(self.final_hyper_params))

            # start grid search
            grid_model = H2OGridSearch(H2OPCA(pca_method=self.pca_method),
                                       hyper_params=self.final_hyper_params)
            grid_model.train(x=self.x_indices, training_frame=self.training1_data)

            self.correct_model_number = len(grid_model)     # store number of models built

            # make sure the correct number of models are built by gridsearch
            if not (self.correct_model_number == self.possible_number_models):  # wrong grid model number
                self.test_failed += 1
                print("test_PCA_grid_search_over_params for PCA failed: number of models built by gridsearch "
                      "does not equal to all possible combinations of hyper-parameters")
            else:
                # add parameters into params_dict.  Use this to manually build model
                params_dict = dict()
                params_dict["pca_method"] = self.pca_method
                total_run_time_limits = 0.0   # calculate upper bound of max_runtime_secs
                true_run_time_limits = 0.0
                manual_run_runtime = 0.0

                # compare performance metric of model built by gridsearch with manually built model
                for each_model in grid_model:

                    params_list = grid_model.get_hyperparams_dict(each_model._id)
                    params_list.update(params_dict)

                    model_params = dict()

                    # need to taken out max_runtime_secs from model parameters, it is now set in .train()
                    if "max_runtime_secs" in params_list:
                        model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
                        max_runtime = params_list["max_runtime_secs"]
                        del params_list["max_runtime_secs"]
                    else:
                        max_runtime = 0

                    # make sure manual model was provided the same max_runtime_secs as the grid model
                    each_model_runtime = pyunit_utils.find_grid_runtime([each_model])

                    manual_model = H2OPCA(**params_list)
                    manual_model.train(x=self.x_indices, training_frame=self.training1_data,
                                       **model_params)

                    # collect the time taken to manually built all models
                    model_runtime = pyunit_utils.find_grid_runtime([manual_model])  # time taken to build this model
                    manual_run_runtime += model_runtime

                    if max_runtime > 0:
                        # shortest possible time it takes to build this model
                        if max_runtime < self.model_run_time:
                            total_run_time_limits += model_runtime
                        else:
                            total_run_time_limits += max_runtime

                    true_run_time_limits += max_runtime

                    # compute and compare test metrics between the two models
                    grid_model_metrics = \
                        sum(each_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]])
                    manual_model_metrics = \
                        sum(manual_model._model_json["output"]["model_summary"].cell_values[0][1:params_list["k"]])

                    # just compare the mse in this case within tolerance:
                    if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
                        if (abs(grid_model_metrics) > 0) and \
                                (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
                            print("test_PCA_grid_search_over_params for PCA warning: grid search model mdetric ({0}) "
                                  "and manually built H2O model metric ({1}) differ too much"
                                  "!".format(grid_model_metrics, manual_model_metrics))

                total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)

                # make sure the max_runtime_secs is working to restrict model built time
                if not(manual_run_runtime <= total_run_time_limits):
                    self.test_failed += 1
                    print("test_PCA_grid_search_over_params for PCA failed: time taken to manually build models is {0}."
                          "  Maximum allowed time is {1}".format(manual_run_runtime, total_run_time_limits))
                else:
                    print("time taken to manually build all models is {0}. Maximum allowed time is "
                          "{1}".format(manual_run_runtime, total_run_time_limits))

                if self.test_failed == 0:
                    print("test_PCA_grid_search_over_params for PCA has passed!")
        except:
            if self.possible_number_models > 0:
                print("test_PCA_grid_search_over_params for PCA failed: exception was thrown for no reason.")
                self.test_failed += 1