コード例 #1
0
ファイル: grid_search.py プロジェクト: Kendralabs/h2o-4
class H2OGridSearch(h2o_meta()):
    """
    Grid Search of a Hyper-Parameter Space for a Model

    :param model: The type of model to be explored initialized with optional parameters that will be
        unchanged across explored models.
    :param hyper_params: A dictionary of string parameters (keys) and a list of values to be explored by grid
        search (values).
    :param str grid_id: The unique id assigned to the resulting grid object. If none is given, an id will
        automatically be generated.
    :param search_criteria:  The optional dictionary of directives which control the search of the hyperparameter space.
        The dictionary can include values for: ``strategy``, ``max_models``, ``max_runtime_secs``, ``stopping_metric``, 
        ``stopping_tolerance``, ``stopping_rounds`` and ``seed``. The default strategy, "Cartesian", covers the entire space of 
        hyperparameter combinations. If you want to use cartesian grid search, you can leave the search_criteria 
        argument unspecified. Specify the "RandomDiscrete" strategy to get random search of all the combinations of 
        your hyperparameters with three ways of specifying when to stop the search: max number of models, max time, and 
        metric-based early stopping (e.g., stop if MSE hasn’t improved by 0.0001 over the 5 best models). 
        Examples below::

            >>> criteria = {"strategy": "RandomDiscrete", "max_runtime_secs": 600,
            ...             "max_models": 100, "stopping_metric": "AUTO",
            ...             "stopping_tolerance": 0.00001, "stopping_rounds": 5,
            ...             "seed": 123456}
            >>> criteria = {"strategy": "RandomDiscrete", "max_models": 42,
            ...             "max_runtime_secs": 28800, "seed": 1234}
            >>> criteria = {"strategy": "RandomDiscrete", "stopping_metric": "AUTO",
            ...             "stopping_tolerance": 0.001, "stopping_rounds": 10}
            >>> criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 5,
            ...             "stopping_metric": "misclassification",
            ...             "stopping_tolerance": 0.00001}
    :param parallelism: Level of parallelism during grid model building. 1 = sequential building (default). 
         Use the value of 0 for adaptive parallelism - decided by H2O. Any number > 1 sets the exact number of models
         built in parallel.
    :returns: a new H2OGridSearch instance

    Examples
    --------
        >>> from h2o.grid.grid_search import H2OGridSearch
        >>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
        >>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]}
        >>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters)
        >>> training_data = h2o.import_file("smalldata/logreg/benign.csv")
        >>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data)
        >>> gs.show()
    """
    def __init__(self,
                 model,
                 hyper_params,
                 grid_id=None,
                 search_criteria=None,
                 export_checkpoints_dir=None,
                 parallelism=1):
        assert_is_type(model, None, H2OEstimator,
                       lambda mdl: issubclass(mdl, H2OEstimator))
        assert_is_type(hyper_params, dict)
        assert_is_type(grid_id, None, str)
        assert_is_type(search_criteria, None, dict)
        if not (model is None or is_type(model, H2OEstimator)): model = model()
        self._id = grid_id
        self.model = model
        self.hyper_params = dict(hyper_params)
        self.search_criteria = None if search_criteria is None else dict(
            search_criteria)
        self.export_checkpoints_dir = export_checkpoints_dir
        self._parallelism = parallelism  # Degree of parallelism during model building
        self._grid_json = None
        self.models = None  # list of H2O Estimator instances
        self._parms = {}  # internal, for object recycle #
        self.parms = {}  # external#
        self._future = False  # used by __repr__/show to query job state#
        self._job = None  # used when _future is True#

    @property
    def grid_id(self):
        """A key that identifies this grid search object in H2O."""
        return self._id

    @grid_id.setter
    def grid_id(self, value):
        oldname = self.grid_id
        self._id = value
        h2o.rapids('(rename "{}" "{}")'.format(oldname, value))

    @property
    def model_ids(self):
        return [i['name'] for i in self._grid_json["model_ids"]]

    @property
    def hyper_names(self):
        return self._grid_json["hyper_names"]

    @property
    def failed_params(self):
        return self._grid_json.get("failed_params", None)

    @property
    def failure_details(self):
        return self._grid_json.get("failure_details", None)

    @property
    def failure_stack_traces(self):
        return self._grid_json.get("failure_stack_traces", None)

    @property
    def failed_raw_params(self):
        return self._grid_json.get("failed_raw_params", None)

    def start(self,
              x,
              y=None,
              training_frame=None,
              offset_column=None,
              fold_column=None,
              weights_column=None,
              validation_frame=None,
              **params):
        """
        Asynchronous model build by specifying the predictor columns, response column, and any
        additional frame-specific values.

        To block for results, call :meth:`join`.

        :param x: A list of column names or indices indicating the predictor columns.
        :param y: An index or a column name indicating the response column.
        :param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
            additional columns specified by fold, offset, and weights).
        :param offset_column: The name or index of the column in training_frame that holds the offsets.
        :param fold_column: The name or index of the column in training_frame that holds the per-row fold
            assignments.
        :param weights_column: The name or index of the column in training_frame that holds the per-row weights.
        :param validation_frame: H2OFrame with validation data to be scored on while training.
        """
        self._future = True
        self.train(x=x,
                   y=y,
                   training_frame=training_frame,
                   offset_column=offset_column,
                   fold_column=fold_column,
                   weights_column=weights_column,
                   validation_frame=validation_frame,
                   **params)

    def join(self):
        """Wait until grid finishes computing."""
        self._future = False
        self._job.poll()
        self._job = None

    def train(self,
              x=None,
              y=None,
              training_frame=None,
              offset_column=None,
              fold_column=None,
              weights_column=None,
              validation_frame=None,
              **params):
        """
        Train the model synchronously (i.e. do not return until the model finishes training).

        To train asynchronously call :meth:`start`.

        :param x: A list of column names or indices indicating the predictor columns.
        :param y: An index or a column name indicating the response column.
        :param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
            additional columns specified by fold, offset, and weights).
        :param offset_column: The name or index of the column in training_frame that holds the offsets.
        :param fold_column: The name or index of the column in training_frame that holds the per-row fold
            assignments.
        :param weights_column: The name or index of the column in training_frame that holds the per-row weights.
        :param validation_frame: H2OFrame with validation data to be scored on while training.
        """
        algo_params = locals()
        parms = self._parms.copy()
        parms.update({
            k: v
            for k, v in algo_params.items()
            if k not in ["self", "params", "algo_params", "parms"]
        })
        # dictionaries have special handling in grid search, avoid the implicit conversion
        parms[
            "search_criteria"] = None if self.search_criteria is None else str(
                self.search_criteria)
        parms["export_checkpoints_dir"] = self.export_checkpoints_dir
        parms["parallelism"] = self._parallelism
        parms["hyper_parameters"] = None if self.hyper_params is None else str(
            self.hyper_params)  # unique to grid search
        parms.update({
            k: v
            for k, v in list(self.model._parms.items()) if v is not None
        })  # unique to grid search
        parms.update(params)
        if '__class__' in parms:  # FIXME: hackt for PY3
            del parms['__class__']
        y = algo_params["y"]
        tframe = algo_params["training_frame"]
        if tframe is None: raise ValueError("Missing training_frame")
        if y is not None:
            if is_type(y, list, tuple):
                if len(y) == 1:
                    parms["y"] = y[0]
                else:
                    raise ValueError('y must be a single column reference')
        if x is None:
            if (isinstance(y, int)):
                xset = set(range(training_frame.ncols)) - {y}
            else:
                xset = set(training_frame.names) - {y}
        else:
            xset = set()
            if is_type(x, int, str): x = [x]
            for xi in x:
                if is_type(xi, int):
                    if not (-training_frame.ncols <= xi <
                            training_frame.ncols):
                        raise H2OValueError(
                            "Column %d does not exist in the training frame" %
                            xi)
                    xset.add(training_frame.names[xi])
                else:
                    if xi not in training_frame.names:
                        raise H2OValueError(
                            "Column %s not in the training frame" % xi)
                    xset.add(xi)
        x = list(xset)
        parms["x"] = x
        self.build_model(parms)

    def build_model(self, algo_params):
        """(internal)"""
        if algo_params["training_frame"] is None:
            raise ValueError("Missing training_frame")
        x = algo_params.pop("x")
        y = algo_params.pop("y", None)
        training_frame = algo_params.pop("training_frame")
        validation_frame = algo_params.pop("validation_frame", None)
        is_auto_encoder = (algo_params
                           is not None) and ("autoencoder" in algo_params
                                             and algo_params["autoencoder"])
        algo = self.model._compute_algo()  # unique to grid search
        is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
        if is_auto_encoder and y is not None:
            raise ValueError("y should not be specified for autoencoder.")
        if not is_unsupervised and y is None:
            raise ValueError("Missing response")
        if not is_unsupervised:
            y = y if y in training_frame.names else training_frame.names[y]
            self.model._estimator_type = "classifier" if training_frame.types[
                y] == "enum" else "regressor"
        self._model_build(x, y, training_frame, validation_frame, algo_params)

    def _model_build(self, x, y, tframe, vframe, kwargs):
        kwargs['training_frame'] = tframe
        if vframe is not None: kwargs["validation_frame"] = vframe
        if is_type(y, int): y = tframe.names[y]
        if y is not None: kwargs['response_column'] = y
        if not is_type(x, list, tuple): x = [x]
        if is_type(x[0], int):
            x = [tframe.names[i] for i in x]
        offset = kwargs["offset_column"]
        folds = kwargs["fold_column"]
        weights = kwargs["weights_column"]
        ignored_columns = list(
            set(tframe.names) - set(x + [y, offset, folds, weights]))
        kwargs["ignored_columns"] = None if not ignored_columns else [
            quoted(col) for col in ignored_columns
        ]
        kwargs = dict([(k, kwargs[k].frame_id if isinstance(
            kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs
                       if kwargs[k] is not None])  # gruesome one-liner
        algo = self.model._compute_algo()  # unique to grid search
        if self.grid_id is not None: kwargs["grid_id"] = self.grid_id
        rest_ver = kwargs.pop(
            "_rest_version") if "_rest_version" in kwargs else None

        grid = H2OJob(h2o.api("POST /99/Grid/%s" % algo, data=kwargs),
                      job_type=(algo + " Grid Build"))

        if self._future:
            self._job = grid
            return

        grid.poll()

        grid_json = h2o.api("GET /99/Grids/%s" % (grid.dest_key))
        failure_messages_stacks = ""
        error_index = 0
        if len(grid_json["failure_details"]) > 0:
            print("Errors/Warnings building gridsearch model\n")
            # will raise error if no grid model is returned, store error messages here

            for error_message in grid_json["failure_details"]:
                if isinstance(grid_json["failed_params"][error_index], dict):
                    for h_name in grid_json['hyper_names']:
                        print("Hyper-parameter: {0}, {1}".format(
                            h_name,
                            grid_json['failed_params'][error_index][h_name]))

                if len(grid_json["failure_stack_traces"]) > error_index:
                    print("failure_details: {0}\nfailure_stack_traces: "
                          "{1}\n".format(
                              error_message,
                              grid_json['failure_stack_traces'][error_index]))
                    failure_messages_stacks += error_message + '\n'
                error_index += 1

        self.models = [
            h2o.get_model(key['name']) for key in grid_json['model_ids']
        ]
        for model in self.models:
            model._estimator_type = self.model._estimator_type

        # get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
        # sometimes no model is returned due to bad parameter values provided by the user.
        if len(grid_json['model_ids']) > 0:
            first_model_json = h2o.api(
                "GET /%d/Models/%s" %
                (rest_ver
                 or 3, grid_json['model_ids'][0]['name']))['models'][0]
            self._resolve_grid(grid.dest_key, grid_json, first_model_json)
        else:
            if len(failure_messages_stacks) > 0:
                raise ValueError(failure_messages_stacks)
            else:
                raise ValueError(
                    "Gridsearch returns no model due to bad parameter values or other reasons...."
                )

    def _resolve_grid(self, grid_id, grid_json, first_model_json):
        model_class = H2OGridSearch._metrics_class(first_model_json)
        m = model_class()
        m._id = grid_id
        m._grid_json = grid_json
        # m._metrics_class = metrics_class
        m._parms = self._parms
        self.export_checkpoints_dir = m._grid_json["export_checkpoints_dir"]
        H2OEstimator.mixin(self, model_class)
        self.__dict__.update(m.__dict__.copy())

    def __getitem__(self, item):
        return self.models[item]

    def __iter__(self):
        nmodels = len(self.models)
        return (self[i] for i in range(nmodels))

    def __len__(self):
        return len(self.models)

    def __repr__(self):
        self.show()
        return ""

    def predict(self, test_data):
        """
        Predict on a dataset.

        :param H2OFrame test_data: Data to be predicted on.
        :returns: H2OFrame filled with predictions.
        """
        return {
            model.model_id: model.predict(test_data)
            for model in self.models
        }

    def is_cross_validated(self):
        """Return True if the model was cross-validated."""
        return {
            model.model_id: model.is_cross_validated()
            for model in self.models
        }

    def xval_keys(self):
        """Model keys for the cross-validated model."""
        return {model.model_id: model.xval_keys() for model in self.models}

    def get_xval_models(self, key=None):
        """
        Return a Model object.

        :param str key: If None, return all cross-validated models; otherwise return the model
            specified by the key.
        :returns: A model or a list of models.
        """
        return {
            model.model_id: model.get_xval_models(key)
            for model in self.models
        }

    def xvals(self):
        """Return the list of cross-validated models."""
        return {model.model_id: model.xvals for model in self.models}

    def deepfeatures(self, test_data, layer):
        """
        Obtain a hidden layer's details on a dataset.

        :param test_data: Data to create a feature space on.
        :param int layer: Index of the hidden layer.
        :returns: A dictionary of hidden layer details for each model.
        """
        return {
            model.model_id: model.deepfeatures(test_data, layer)
            for model in self.models
        }

    def weights(self, matrix_id=0):
        """
        Return the frame for the respective weight matrix.

        :param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
        :returns: an H2OFrame which represents the weight matrix identified by matrix_id
        """
        return {
            model.model_id: model.weights(matrix_id)
            for model in self.models
        }

    def biases(self, vector_id=0):
        """
        Return the frame for the respective bias vector.

        :param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
        :returns: an H2OFrame which represents the bias vector identified by vector_id
        """
        return {
            model.model_id: model.biases(vector_id)
            for model in self.models
        }

    def normmul(self):
        """Normalization/Standardization multipliers for numeric predictors."""
        return {model.model_id: model.normmul() for model in self.models}

    def normsub(self):
        """Normalization/Standardization offsets for numeric predictors."""
        return {model.model_id: model.normsub() for model in self.models}

    def respmul(self):
        """Normalization/Standardization multipliers for numeric response."""
        return {model.model_id: model.respmul() for model in self.models}

    def respsub(self):
        """Normalization/Standardization offsets for numeric response."""
        return {model.model_id: model.respsub() for model in self.models}

    def catoffsets(self):
        """
        Categorical offsets for one-hot encoding
        """
        return {model.model_id: model.catoffsets() for model in self.models}

    def model_performance(self,
                          test_data=None,
                          train=False,
                          valid=False,
                          xval=False):
        """
        Generate model metrics for this model on test_data.

        :param test_data: Data set for which model metrics shall be computed against. All three of train, valid
            and xval arguments are ignored if test_data is not None.
        :param train: Report the training metrics for the model.
        :param valid: Report the validation metrics for the model.
        :param xval: Report the validation metrics for the model.
        :return: An object of class H2OModelMetrics.
        """
        return {
            model.model_id: model.model_performance(test_data, train, valid,
                                                    xval)
            for model in self.models
        }

    def scoring_history(self):
        """
        Retrieve model scoring history.

        :returns: Score history (H2OTwoDimTable)
        """
        return {
            model.model_id: model.scoring_history()
            for model in self.models
        }

    def summary(self, header=True):
        """Print a detailed summary of the explored models."""
        table = []
        for model in self.models:
            model_summary = model._model_json["output"]["model_summary"]
            r_values = list(model_summary.cell_values[0])
            r_values[0] = model.model_id
            table.append(r_values)

        # if h2o.can_use_pandas():
        #  import pandas
        #  pandas.options.display.max_rows = 20
        #  print pandas.DataFrame(table,columns=self.col_header)
        #  return
        print()
        if header:
            print('Grid Summary:')
        print()
        H2ODisplay(table,
                   header=['Model Id'] + model_summary.col_header[1:],
                   numalign="left",
                   stralign="left")

    def show(self):
        """Print models sorted by metric."""
        hyper_combos = itertools.product(*list(self.hyper_params.values()))
        if not self.models:
            c_values = [[idx + 1, list(val)]
                        for idx, val in enumerate(hyper_combos)]
            print(
                H2OTwoDimTable(col_header=[
                    'Model', 'Hyperparameters: [' +
                    ', '.join(list(self.hyper_params.keys())) + ']'
                ],
                               table_header='Grid Search of Model ' +
                               self.model.__class__.__name__,
                               cell_values=c_values))
        else:
            print(self.sorted_metric_table())

    def varimp(self, use_pandas=False):
        """
        Pretty print the variable importances, or return them in a list/pandas DataFrame.

        :param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.

        :returns: A dictionary of lists or Pandas DataFrame instances.
        """
        return {
            model.model_id: model.varimp(use_pandas)
            for model in self.models
        }

    def residual_deviance(self, train=False, valid=False, xval=False):
        """
        Retreive the residual deviance if this model has the attribute, or None otherwise.

        :param bool train: Get the residual deviance for the training set. If both train and valid are False,
            then train is selected by default.
        :param bool valid: Get the residual deviance for the validation set. If both train and valid are True,
            then train is selected by default.
        :param bool xval: Get the residual deviance for the cross-validated models.

        :returns: the residual deviance, or None if it is not present.
        """
        return {
            model.model_id: model.residual_deviance(train, valid, xval)
            for model in self.models
        }

    def residual_degrees_of_freedom(self,
                                    train=False,
                                    valid=False,
                                    xval=False):
        """
        Retreive the residual degress of freedom if this model has the attribute, or None otherwise.

        :param bool train: Get the residual dof for the training set. If both train and valid are False, then
            train is selected by default.
        :param bool valid: Get the residual dof for the validation set. If both train and valid are True, then
            train is selected by default.
        :param bool xval: Get the residual dof for the cross-validated models.

        :returns: the residual degrees of freedom, or None if they are not present.
        """
        return {
            model.model_id:
            model.residual_degrees_of_freedom(train, valid, xval)
            for model in self.models
        }

    def null_deviance(self, train=False, valid=False, xval=False):
        """
        Retreive the null deviance if this model has the attribute, or None otherwise.

        :param bool train: Get the null deviance for the training set. If both train and valid are False, then
            train is selected by default.
        :param bool valid: Get the null deviance for the validation set. If both train and valid are True, then
            train is selected by default.
        :param bool xval: Get the null deviance for the cross-validated models.

        :returns: the null deviance, or None if it is not present.
        """
        return {
            model.model_id: model.null_deviance(train, valid, xval)
            for model in self.models
        }

    def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
        """
        Retreive the null degress of freedom if this model has the attribute, or None otherwise.

        :param bool train: Get the null dof for the training set. If both train and valid are False, then train is
            selected by default.
        :param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
            selected by default.
        :param bool xval: Get the null dof for the cross-validated models.

        :returns: the null dof, or None if it is not present.
        """
        return {
            model.model_id: model.null_degrees_of_freedom(train, valid, xval)
            for model in self.models
        }

    def pprint_coef(self):
        """Pretty print the coefficents table (includes normalized coefficients)."""
        for i, model in enumerate(self.models):
            print('Model', i)
            model.pprint_coef()
            print()

    def coef(self):
        """Return the coefficients that can be applied to the non-standardized data.

        Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly.

        """
        return {model.model_id: model.coef() for model in self.models}

    def coef_norm(self):
        """Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance.

        """
        return {model.model_id: model.coef_norm() for model in self.models}

    def r2(self, train=False, valid=False, xval=False):
        """
        Return the R^2 for this regression model.

        The R^2 value is defined to be ``1 - MSE/var``, where ``var`` is computed as ``sigma^2``.

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the R^2 value for the training data.
        :param bool valid: If valid is True, then return the R^2 value for the validation data.
        :param bool xval:  If xval is True, then return the R^2 value for the cross validation data.

        :returns: The R^2 for this regression model.
        """
        return {
            model.model_id: model.r2(train, valid, xval)
            for model in self.models
        }

    def mse(self, train=False, valid=False, xval=False):
        """
        Get the MSE(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the MSE value for the training data.
        :param bool valid: If valid is True, then return the MSE value for the validation data.
        :param bool xval:  If xval is True, then return the MSE value for the cross validation data.
        :returns: The MSE for this regression model.
        """
        return {
            model.model_id: model.mse(train, valid, xval)
            for model in self.models
        }

    def rmse(self, train=False, valid=False, xval=False):
        return {
            model.model_id: model.rmse(train, valid, xval)
            for model in self.models
        }

    def mae(self, train=False, valid=False, xval=False):
        return {
            model.model_id: model.mae(train, valid, xval)
            for model in self.models
        }

    def rmsle(self, train=False, valid=False, xval=False):
        return {
            model.model_id: model.rmsle(train, valid, xval)
            for model in self.models
        }

    def logloss(self, train=False, valid=False, xval=False):
        """
        Get the Log Loss(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the Log Loss value for the training data.
        :param bool valid: If valid is True, then return the Log Loss value for the validation data.
        :param bool xval:  If xval is True, then return the Log Loss value for the cross validation data.

        :returns: The Log Loss for this binomial model.
        """
        return {
            model.model_id: model.logloss(train, valid, xval)
            for model in self.models
        }

    def mean_residual_deviance(self, train=False, valid=False, xval=False):
        """
        Get the Mean Residual Deviances(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
        :param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
        :param bool xval:  If xval is True, then return the Mean Residual Deviance value for the cross validation data.
        :returns: The Mean Residual Deviance for this regression model.
        """
        return {
            model.model_id: model.mean_residual_deviance(train, valid, xval)
            for model in self.models
        }

    def auc(self, train=False, valid=False, xval=False):
        """
        Get the AUC(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the AUC value for the training data.
        :param bool valid: If valid is True, then return the AUC value for the validation data.
        :param bool xval:  If xval is True, then return the AUC value for the validation data.

        :returns: The AUC.
        """
        return {
            model.model_id: model.auc(train, valid, xval)
            for model in self.models
        }

    def aic(self, train=False, valid=False, xval=False):
        """
        Get the AIC(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the AIC value for the training data.
        :param bool valid: If valid is True, then return the AIC value for the validation data.
        :param bool xval:  If xval is True, then return the AIC value for the validation data.

        :returns: The AIC.
        """
        return {
            model.model_id: model.aic(train, valid, xval)
            for model in self.models
        }

    def gini(self, train=False, valid=False, xval=False):
        """
        Get the Gini Coefficient(s).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the Gini Coefficient value for the training data.
        :param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
        :param bool xval:  If xval is True, then return the Gini Coefficient value for the cross validation data.

        :returns: The Gini Coefficient for the models in this grid.
        """
        return {
            model.model_id: model.gini(train, valid, xval)
            for model in self.models
        }

    # @alias('pr_auc')
    def aucpr(self, train=False, valid=False, xval=False):
        """
        Get the aucPR (Area Under PRECISION RECALL Curve).

        If all are False (default), then return the training metric value.
        If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
        "valid", and "xval".

        :param bool train: If train is True, then return the aucpr value for the training data.
        :param bool valid: If valid is True, then return the aucpr value for the validation data.
        :param bool xval:  If xval is True, then return the aucpr value for the validation data.

        :returns: The AUCPR for the models in this grid.
        """
        return {
            model.model_id: model.aucpr(train, valid, xval)
            for model in self.models
        }

    @deprecated(replaced_by=aucpr)
    def pr_auc(self):
        pass

    def get_hyperparams(self, id, display=True):
        """
        Get the hyperparameters of a model explored by grid search.

        :param str id: The model id of the model with hyperparameters of interest.
        :param bool display: Flag to indicate whether to display the hyperparameter names.

        :returns: A list of the hyperparameters for the specified model.
        """
        idx = id if is_type(id, int) else self.model_ids.index(id)
        model = self[idx]

        # if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
        # parameter and not the main model that is returned.
        if model._is_xvalidated:
            model = h2o.get_model(model._xval_keys[0])

        res = [
            model.params[h]['actual'][0] if isinstance(
                model.params[h]['actual'], list) else model.params[h]['actual']
            for h in self.hyper_params
        ]
        if display:
            print('Hyperparameters: [' +
                  ', '.join(list(self.hyper_params.keys())) + ']')
        return res

    def get_hyperparams_dict(self, id, display=True):
        """
        Derived and returned the model parameters used to train the particular grid search model.

        :param str id: The model id of the model with hyperparameters of interest.
        :param bool display: Flag to indicate whether to display the hyperparameter names.

        :returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
        """
        idx = id if is_type(id, int) else self.model_ids.index(id)
        model = self[idx]

        model_params = dict()

        # if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
        # parameter and not the main model that is returned.
        if model._is_xvalidated:
            model = h2o.get_model(model._xval_keys[0])

        for param_name in self.hyper_names:
            model_params[param_name] = model.params[param_name]['actual'][0] if \
                isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']

        if display:
            print('Hyperparameters: [' +
                  ', '.join(list(self.hyper_params.keys())) + ']')
        return model_params

    def sorted_metric_table(self):
        """
        Retrieve summary table of an H2O Grid Search.

        :returns: The summary table as an H2OTwoDimTable or a Pandas DataFrame.
        """
        summary = self._grid_json["summary_table"]
        if summary is not None: return summary.as_data_frame()
        print("No sorted metric table for this grid search")

    @staticmethod
    def _metrics_class(model_json):
        model_type = model_json["output"]["model_category"]
        if model_type == "Binomial":
            model_class = H2OBinomialGridSearch
        elif model_type == "Clustering":
            model_class = H2OClusteringGridSearch
        elif model_type == "Regression":
            model_class = H2ORegressionGridSearch
        elif model_type == "Multinomial":
            model_class = H2OMultinomialGridSearch
        elif model_type == "Ordinal":
            model_class = H2OOrdinalGridSearch
        elif model_type == "AutoEncoder":
            model_class = H2OAutoEncoderGridSearch
        elif model_type == "DimReduction":
            model_class = H2ODimReductionGridSearch
        else:
            raise NotImplementedError(model_type)
        return model_class

    def get_grid(self, sort_by=None, decreasing=None):
        """
        Retrieve an H2OGridSearch instance.

        Optionally specify a metric by which to sort models and a sort order.
        Note that if neither cross-validation nor a validation frame is used in the grid search, then the
        training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
        ``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
        metrics will display even if a validation frame is provided.

        :param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
            ``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
            ``"f1"``, etc.
        :param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
            order (default).

        :returns: A new H2OGridSearch instance optionally sorted on the specified metric.
        """
        if sort_by is None and decreasing is None: return self

        grid_json = h2o.api("GET /99/Grids/%s" % self._id,
                            data={
                                "sort_by": sort_by,
                                "decreasing": decreasing
                            })
        grid = H2OGridSearch(self.model, self.hyper_params, self._id)
        grid.models = [
            h2o.get_model(key['name']) for key in grid_json['model_ids']
        ]  # reordered
        first_model_json = h2o.api(
            "GET /99/Models/%s" %
            grid_json['model_ids'][0]['name'])['models'][0]
        model_class = H2OGridSearch._metrics_class(first_model_json)
        m = model_class()
        m._id = self._id
        m._grid_json = grid_json
        # m._metrics_class = metrics_class
        m._parms = grid._parms
        H2OEstimator.mixin(grid, model_class)
        grid.__dict__.update(m.__dict__.copy())
        return grid

    @deprecated("grid.sort_by() is deprecated; use grid.get_grid() instead")
    def sort_by(self, metric, increasing=True):
        """Deprecated since 2016-12-12, use grid.get_grid() instead."""

        if metric[-1] != ')': metric += '()'
        c_values = [
            list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'),
                                         key=lambda k_v: k_v[1]))
        ]
        c_values.insert(1, [
            self.get_hyperparams(model_id, display=False)
            for model_id in c_values[0]
        ])
        if not increasing:
            for col in c_values:
                col.reverse()
        if metric[-2] == '(': metric = metric[:-2]
        return H2OTwoDimTable(col_header=[
            'Model Id', 'Hyperparameters: [' +
            ', '.join(list(self.hyper_params.keys())) + ']', metric
        ],
                              table_header='Grid Search Results for ' +
                              self.model.__class__.__name__,
                              cell_values=[list(x) for x in zip(*c_values)])
コード例 #2
0
ファイル: connection.py プロジェクト: timgates42/h2o-3
class H2OConnection(h2o_meta()):
    """
    Connection handle to an H2O cluster.

    Typically, you don't need to access this class directly. Instead, use :func:`h2o.connect` to
    establish a connection, and :func:`h2o.api` to make requests to the backend H2O server. However, if your
    use-case is not typical, then read on.

    Instances of this class may only be created through the static method :meth:`open`::

        hc = H2OConnection.open(...)

    Once opened, the connection remains active until the script exits (or until you explicitly :meth:`close` it).
    If the script exits with an exception, then the connection will fail to close, and the backend server will
    keep all the temporary frames and the open session.

    Alternatively, you can use this class as a context manager, which will ensure that the connection gets closed
    at the end of the ``with ...`` block even if an exception occurs::

        with H2OConnection.open() as hc:
            hc.info().pprint()

    Once the connection is established, you can send REST API requests to the server using :meth:`request`.
    """
    """
    Defines pattern matching URL in the following form ``schema://ip:port/context_path``.
    """
    url_pattern = r"^(https?)://((?:[\w-]+\.)*[\w-]+):(\d+)/?((/[\w-]+)+)?$"

    @staticmethod
    def open(server=None,
             url=None,
             ip=None,
             port=None,
             name=None,
             https=None,
             auth=None,
             verify_ssl_certificates=True,
             cacert=None,
             proxy=None,
             cookies=None,
             verbose=True,
             msgs=None,
             strict_version_check=True):
        r"""
        Establish connection to an existing H2O server.

        The connection is not kept alive, so what this method actually does is attempt to connect to the
        specified server, and check that the server is healthy and responds to REST API requests. If the H2O server
        cannot be reached, an :class:`H2OConnectionError` will be raised. On a success, this method returns a new
        :class:`H2OConnection` object, and it is the only "official" way to create instances of this class.

        There are 3 ways to specify the target to connect to (these settings are mutually exclusive):

            * pass a ``server`` option,
            * pass the full ``url`` for the connection,
            * provide a triple of parameters ``ip``, ``port``, ``https``.

        :param H2OLocalServer server: connect to the specified local server instance. There is a slight difference
            between connecting to a local server by specifying its ip and address, and connecting through
            an H2OLocalServer instance: if the server becomes unresponsive, then having access to its process handle
            will allow us to query the server status through OS, and potentially provide snapshot of the server's
            error log in the exception information.
        :param url: full url of the server to connect to.
        :param ip: target server's IP address or hostname (default "localhost").
        :param port: H2O server's port (default 54321).
        :param name: H2O cluster name.
        :param https: if True then connect using https instead of http (default False).
        :param verify_ssl_certificates: if False then SSL certificate checking will be disabled (default True). This
            setting should rarely be disabled, as it makes your connection vulnerable to man-in-the-middle attacks. When
            used, it will generate a warning from the requests library. Has no effect when ``https`` is False.
        :param cacert: Path to a CA bundle file or a directory with certificates of trusted CAs (optional).
        :param auth: authentication token for connecting to the remote server. This can be either a
            (username, password) tuple, or an authenticator (AuthBase) object. Please refer to the documentation in
            the ``requests.auth`` module.
        :param proxy: url address of a proxy server. If you do not specify the proxy, then the requests module
            will attempt to use a proxy specified in the environment (in HTTP_PROXY / HTTPS_PROXY variables). We
            check for the presence of these variables and issue a warning if they are found. In order to suppress
            that warning and use proxy from the environment, pass ``proxy="(default)"``.
        :param cookies: Cookie (or list of) to add to requests.
        :param verbose: if True, then connection progress info will be printed to the stdout.
        :param strict_version_check: If True, an error will be raised if the client and server versions don't match.
        :param msgs: custom messages to display during connection. This is a tuple (initial message, success message,
            failure message).

        :returns: A new :class:`H2OConnection` instance.
        :raises H2OConnectionError: if the server cannot be reached.
        :raises H2OServerError: if the server is in an unhealthy state (although this might be a recoverable error, the
            client itself should decide whether it wants to retry or not).
        """
        if server is not None:
            assert_is_type(server, H2OLocalServer)
            assert_is_type(
                ip, None,
                "`ip` should be None when `server` parameter is supplied")
            assert_is_type(
                url, None,
                "`url` should be None when `server` parameter is supplied")
            assert_is_type(
                name, None,
                "`name` should be None when `server` parameter is supplied")
            if not server.is_running():
                raise H2OConnectionError(
                    "Unable to connect to server because it is not running")
            ip = server.ip
            port = server.port
            scheme = server.scheme
            context_path = ''
        elif url is not None:
            assert_is_type(url, str)
            assert_is_type(
                ip, None,
                "`ip` should be None when `url` parameter is supplied")
            assert_is_type(name, str, None)
            # We don't allow any Unicode characters in the URL. Maybe some day we will...
            match = assert_matches(url, H2OConnection.url_pattern)
            scheme = match.group(1)
            ip = match.group(2)
            port = int(match.group(3))
            context_path = '' if match.group(4) is None else "%s" % (
                match.group(4))
        else:
            if ip is None: ip = str("localhost")
            if port is None: port = 54321
            if https is None: https = False
            if is_type(port, str) and port.isdigit(): port = int(port)
            assert_is_type(ip, str)
            assert_is_type(port, int)
            assert_is_type(name, str, None)
            assert_is_type(https, bool)
            assert_matches(ip, r"(?:[\w-]+\.)*[\w-]+")
            assert_satisfies(port, 1 <= port <= 65535)
            scheme = "https" if https else "http"
            context_path = ''

        if verify_ssl_certificates is None: verify_ssl_certificates = True
        assert_is_type(verify_ssl_certificates, bool)
        assert_is_type(cacert, str, None)
        assert_is_type(proxy, str, None)
        assert_is_type(auth, AuthBase, (str, str), None)
        assert_is_type(cookies, str, [str], None)
        assert_is_type(msgs, None, (str, str, str))

        conn = H2OConnection()
        conn._verbose = bool(verbose)
        conn._local_server = server
        conn._base_url = "%s://%s:%d%s" % (scheme, ip, port, context_path)
        conn._name = server.name if server else name
        conn._verify_ssl_cert = bool(verify_ssl_certificates)
        conn._cacert = cacert
        conn._auth = auth
        conn._cookies = cookies
        conn._proxies = None
        if proxy and proxy != "(default)":
            conn._proxies = {scheme: proxy}
        elif not proxy:
            # Give user a warning if there are any "*_proxy" variables in the environment. [PUBDEV-2504]
            # To suppress the warning pass proxy = "(default)".
            for name in os.environ:
                if name.lower() == scheme + "_proxy":
                    warn("Proxy is defined in the environment: %s. "
                         "This may interfere with your H2O Connection." % name)

            if "localhost" in conn.ip() or "127.0.0.1" in conn.ip():
                # Empty list will cause requests library to respect the default behavior.
                # Thus a non-existing proxy is inserted.

                conn._proxies = {
                    "http": None,
                    "https": None,
                }

        try:
            retries = 20 if server else 5
            conn._stage = 1
            conn._timeout = 3.0
            conn._cluster = conn._test_connection(retries, messages=msgs)
            # If a server is unable to respond within 1s, it should be considered a bug. However we disable this
            # setting for now, for no good reason other than to ignore all those bugs :(
            conn._timeout = None

            # create a weakref to prevent the atexit callback from keeping hard ref
            # to the connection even after manual close.
            conn_ref = ref(conn)

            def exit_close():
                con = conn_ref()
                if con and con.connected:
                    print("Closing connection %s at exit" % con.session_id)
                    con.close()

            atexit.register(exit_close)
        except Exception:
            # Reset _session_id so that we know the connection was not initialized properly.
            conn._stage = 0
            raise

        conn._cluster.check_version(strict=strict_version_check)
        return conn

    def request(self,
                endpoint,
                data=None,
                json=None,
                filename=None,
                save_to=None):
        """
        Perform a REST API request to the backend H2O server.

        :param endpoint: (str) The endpoint's URL, for example "GET /4/schemas/KeyV4"
        :param data: data payload for POST (and sometimes GET) requests. This should be a dictionary of simple
            key/value pairs (values can also be arrays), which will be sent over in x-www-form-encoded format.
        :param json: also data payload, but it will be sent as a JSON body. Cannot be used together with `data`.
        :param filename: file to upload to the server. Cannot be used with `data` or `json`.
        :param save_to: if provided, will write the response to that file (additionally, the response will be
            streamed, so large files can be downloaded seamlessly). This parameter can be either a file name,
            or a folder name. If the folder doesn't exist, it will be created automatically.

        :returns: an H2OResponse object representing the server's response (unless ``save_to`` parameter is
            provided, in which case the output file's name will be returned).
        :raises H2OConnectionError: if the H2O server cannot be reached (or connection is not initialized).
        :raises H2OServerError: if there was a server error (http 500), or server returned malformed JSON.
        :raises H2OResponseError: if the server returned an H2OErrorV3 response (e.g. if the parameters were invalid).
        """
        if self._stage == 0:
            raise H2OConnectionError(
                "Connection not initialized; run .connect() first.")
        if self._stage == -1:
            raise H2OConnectionError(
                "Connection was closed, and can no longer be used.")

        # Prepare URL
        assert_is_type(endpoint, str)
        match = assert_matches(
            str(endpoint), r"^(GET|POST|PUT|DELETE|PATCH|HEAD|TRACE) (/.*)$")
        method = match.group(1)
        urltail = match.group(2)
        url = self._base_url + urltail

        # Prepare data
        if filename is not None:
            assert_is_type(filename, str)
            assert_is_type(
                json, None,
                "Argument `json` should be None when `filename` is used.")
            assert_is_type(
                data, None,
                "Argument `data` should be None when `filename` is used.")
            assert_satisfies(
                method, method == "POST",
                "File uploads can only be done via POST method, got %s" %
                method)
        elif data is not None:
            assert_is_type(data, dict)
            assert_is_type(
                json, None,
                "Argument `json` should be None when `data` is used.")
        elif json is not None:
            assert_is_type(json, dict)

        request_data = self._prepare_data_payload(
            data) if filename is None else self._prepare_file_payload(filename)

        params = None
        if (method == "GET" or method == "DELETE") and data:
            params = request_data
            request_data = None

        stream = False
        if save_to is not None:
            assert_is_type(save_to, str, types.FunctionType)
            stream = True

        if self._cookies is not None and isinstance(self._cookies, list):
            self._cookies = ";".join(self._cookies)

        # Make the request
        start_time = time.time()
        try:
            self._log_start_transaction(endpoint, request_data, json, filename,
                                        params)
            args = self._request_args()
            resp = requests.request(method=method,
                                    url=url,
                                    data=request_data,
                                    json=json,
                                    params=params,
                                    stream=stream,
                                    **args)
            if isinstance(save_to, types.FunctionType):
                save_to = save_to(resp)
            self._log_end_transaction(start_time, resp)
            return self._process_response(resp, save_to)

        except (requests.exceptions.ConnectionError,
                requests.exceptions.HTTPError) as e:
            if self._local_server and not self._local_server.is_running():
                self._log_end_exception("Local server has died.")
                raise H2OConnectionError(
                    "Local server has died unexpectedly. RIP.")
            else:
                self._log_end_exception(e)
                raise H2OConnectionError("Unexpected HTTP error: %s" % e)
        except requests.exceptions.Timeout as e:
            self._log_end_exception(e)
            elapsed_time = time.time() - start_time
            raise H2OConnectionError("Timeout after %.3fs" % elapsed_time)
        except H2OResponseError as e:
            err = e.args[0]
            if isinstance(err, H2OErrorV3):
                err.endpoint = endpoint
                err.payload = (request_data, json, filename, params)
            raise

    def _request_args(self):
        headers = {
            "User-Agent": "H2O Python client/" + sys.version.replace("\n", ""),
            "X-Cluster": self._cluster_id,
            "Cookie": self._cookies
        }
        verify = self._cacert if self._verify_ssl_cert and self._cacert else self._verify_ssl_cert
        return {
            'headers': headers,
            'timeout': self._timeout,
            'auth': self._auth,
            'verify': verify,
            'proxies': self._proxies
        }

    @staticmethod
    def save_to_detect(resp):
        disposition = resp.headers['Content-Disposition']
        return disposition.split("filename=")[1].strip()

    def close(self):
        """
        Close an existing connection; once closed it cannot be used again.

        Strictly speaking, it is not necessary to close all connections that you opened -- we have several mechanisms
        in place that will do so automatically (``__del__()``, ``__exit__()``, and ``atexit()`` handlers), however there is also
        no good reason to make this method private.
        """
        if self._session_id:
            try:
                # If the server gone bad, we don't want to wait forever...
                if self._timeout is None: self._timeout = 1
                self.request("DELETE /4/sessions/%s" % self._session_id)
                self._print("H2O session %s closed." % self._session_id)
            except Exception as e:
                self._print("H2O session %s was not closed properly." %
                            self._session_id)
                self._log_end_exception(e)
            self._session_id = None
        self._stage = -1

    @property
    def connected(self):
        return self._stage > 0

    @property
    def session_id(self):
        """
        Return the session id of the current connection.

        The session id is issued (through an API request) the first time it is requested, but no sooner. This is
        because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
        issued, the session id will stay the same until the connection is closed.
        """
        if self._session_id is None:
            req = self.request("POST /4/sessions")
            self._session_id = req.get("session_key") or req.get("session_id")
        return CallableString(self._session_id)

    @property
    def cluster(self):
        """H2OCluster object describing the underlying cluster."""
        return self._cluster

    @property
    def base_url(self):
        """Base URL of the server, without trailing ``"/"``. For example: ``"https://example.com:54321"``."""
        return self._base_url

    @property
    def name(self):
        return self._name

    @property
    def proxy(self):
        """URL of the proxy server used for the connection (or None if there is no proxy)."""
        return self._proxies

    @property
    def local_server(self):
        """Handler to the H2OLocalServer instance (if connected to one)."""
        return self._local_server

    @property
    def requests_count(self):
        """Total number of request requests made since the connection was opened (used for debug purposes)."""
        return self._requests_counter

    @property
    def timeout_interval(self):
        """Timeout length for each request, in seconds."""
        return self._timeout

    @timeout_interval.setter
    def timeout_interval(self, v):
        assert_is_type(v, numeric, None)
        self._timeout = v

    def start_logging(self, dest=None):
        """
        Start logging all API requests to the provided destination.

        :param dest: Where to write the log: either a filename (str), or an open file handle (file). If not given,
            then a new temporary file will be created.
        """
        assert_is_type(dest, None, str, type(sys.stdout))
        if dest is None:
            dest = os.path.join(tempfile.mkdtemp(), "h2o-connection.log")
        self._print("Now logging all API requests to file %r" % dest)
        self._is_logging = True
        self._logging_dest = dest

    def stop_logging(self):
        """Stop logging API requests."""
        if self._is_logging:
            self._print("Logging stopped.")
            self._is_logging = False

    # ------------------------------------------------------------------------------------------------------------------
    # PRIVATE
    # ------------------------------------------------------------------------------------------------------------------

    def __init__(self):
        """[Private] Please use H2OConnection.connect() to create H2OConnection objects."""
        globals(
        )["__H2OCONN__"] = self  # for backward-compatibility: __H2OCONN__ is the latest instantiated object
        self._stage = 0  # 0 = not connected, 1 = connected, -1 = disconnected
        self._session_id = None  # Rapids session id; issued upon request only
        self._base_url = None  # "{scheme}://{ip}:{port}"
        self._name = None
        self._verify_ssl_cert = None
        self._cacert = None
        self._auth = None  # Authentication token
        self._proxies = None  # `proxies` dictionary in the format required by the requests module
        self._cluster_id = None
        self._cookies = None
        self._cluster = None  # H2OCluster object
        self._verbose = None  # Print detailed information about connection status
        self._requests_counter = 0  # how many API requests were made
        self._timeout = None  # timeout for a single request (in seconds)
        self._is_logging = False  # when True, log every request
        self._logging_dest = None  # where the log messages will be written, either filename or open file handle
        self._local_server = None  # H2OLocalServer instance to which we are connected (if known)
        # self.start_logging(sys.stdout)

    def _test_connection(self, max_retries=5, messages=None):
        """
        Test that the H2O cluster can be reached, and retrieve basic cluster status info.

        :param max_retries: Number of times to try to connect to the cluster (with 0.2s intervals).

        :returns: Cluster information (an H2OCluster object)
        :raises H2OConnectionError, H2OServerError:
        """
        if messages is None:
            messages = ("Connecting to H2O server at {url} ..", "successful.",
                        "failed.")
        self._print(messages[0].format(url=self._base_url), end="")
        cld = None
        errors = []
        for _ in range(max_retries):
            self._print(".", end="", flush=True)
            if self._local_server and not self._local_server.is_running():
                raise H2OServerError("Local server was unable to start")
            try:
                define_classes_from_schema(_classes_defined_from_schema_, self)
                cld = self.request("GET /3/Cloud")

                if self.name and cld.cloud_name != self.name:
                    raise H2OConnectionError(
                        "Connected to cloud %s but requested %s." %
                        (cld.cloud_name, self.name))
                if cld.consensus and cld.cloud_healthy:
                    self._print(" " + messages[1])
                    return cld
                else:
                    if cld.consensus and not cld.cloud_healthy:
                        msg = "in consensus but not healthy"
                    elif not cld.consensus and cld.cloud_healthy:
                        msg = "not in consensus but healthy"
                    else:
                        msg = "not in consensus and not healthy"
                    errors.append(
                        "Cloud is in a bad shape: %s (size = %d, bad nodes = %d)"
                        % (msg, cld.cloud_size, cld.bad_nodes))
            except (H2OConnectionError, H2OServerError) as e:
                message = str(e)
                if "\n" in message: message = message[:message.index("\n")]
                errors.append("[%s.%02d] %s: %s" %
                              (time.strftime("%M:%S"), int(time.time() * 100) %
                               100, e.__class__.__name__, message))
            # Cloud too small, or voting in progress, or server is not up yet; sleep then try again
            time.sleep(0.2)

        self._print(" " + messages[2])
        if cld and not cld.cloud_healthy:
            raise H2OServerError("Cluster reports unhealthy status")
        if cld and not cld.consensus:
            raise H2OServerError("Cluster cannot reach consensus")
        else:
            raise H2OConnectionError(
                "Could not establish link to the H2O cloud %s after %d retries\n%s"
                % (self._base_url, max_retries, "\n".join(errors)))

    @staticmethod
    def _prepare_data_payload(data):
        """
        Make a copy of the `data` object, preparing it to be sent to the server.

        The data will be sent via x-www-form-urlencoded or multipart/form-data mechanisms. Both of them work with
        plain lists of key/value pairs, so this method converts the data into such format.
        """
        if not data: return None
        res = {}
        for key, value in viewitems(data):
            if value is None:
                continue  # don't send args set to None so backend defaults take precedence
            if isinstance(value, list):
                value = stringify_list(value)
            elif isinstance(value, dict):
                if "__meta" in value and value["__meta"][
                        "schema_name"].endswith("KeyV3"):
                    value = value["name"]
                else:
                    value = stringify_dict(value)
            else:
                value = str(value)
            res[key] = value
        return res

    @staticmethod
    def _prepare_file_payload(filename):
        """
        Prepare `filename` to be sent to the server.

        The "preparation" consists of creating a data structure suitable
        for passing to requests.request().
        """
        if not filename: return None
        absfilename = os.path.abspath(filename)
        if not os.path.exists(absfilename):
            raise H2OValueError("File %s does not exist" % filename,
                                skip_frames=1)
        return open(absfilename, "rb")

    def _log_start_transaction(self, endpoint, data, json, filename, params):
        """Log the beginning of an API request."""
        # TODO: add information about the caller, i.e. which module + line of code called the .request() method
        #       This can be done by fetching current traceback and then traversing it until we find the request function
        self._requests_counter += 1
        if not self._is_logging: return
        msg = "\n---- %d --------------------------------------------------------\n" % self._requests_counter
        msg += "[%s] %s\n" % (time.strftime("%H:%M:%S"), endpoint)
        if params is not None:
            msg += "     params: {%s}\n" % ", ".join(
                "%s:%s" % item for item in viewitems(params))
        if json is not None:
            import json as j
            msg += "     json: %s\n" % j.dumps(json)
        if filename is not None:
            msg += "     file: %s\n" % filename
        elif data is not None:
            msg += "     body: {%s}\n" % ", ".join("%s:%s" % item
                                                   for item in viewitems(data))
        self._log_message(msg + "\n")

    def _log_end_transaction(self, start_time, response):
        """Log response from an API request."""
        if not self._is_logging: return
        elapsed_time = int((time.time() - start_time) * 1000)
        msg = "<<< HTTP %d %s   (%d ms)\n" % (response.status_code,
                                              response.reason, elapsed_time)
        if "Content-Type" in response.headers:
            msg += "    Content-Type: %s\n" % response.headers["Content-Type"]
        msg += response.text
        self._log_message(msg + "\n\n")

    def _log_end_exception(self, exception):
        """Log API request that resulted in an exception."""
        if not self._is_logging: return
        self._log_message(">>> %s\n\n" % str(exception))

    def _log_message(self, msg):
        """
        Log the message `msg` to the destination `self._logging_dest`.

        If this destination is a file name, then we append the message to the file and then close the file
        immediately. If the destination is an open file handle, then we simply write the message there and do not
        attempt to close it.
        """
        if is_type(self._logging_dest, str):
            with open(self._logging_dest, "at", encoding="utf-8") as f:
                f.write(msg)
        else:
            self._logging_dest.write(msg)

    @staticmethod
    def _process_response(response, save_to):
        """
        Given a response object, prepare it to be handed over to the external caller.

        Preparation steps include:
           * detect if the response has error status, and convert it to an appropriate exception;
           * detect Content-Type, and based on that either parse the response as JSON or return as plain text.
        """
        status_code = response.status_code
        if status_code == 200 and save_to:
            if save_to.startswith("~"): save_to = os.path.expanduser(save_to)
            if os.path.isdir(save_to) or save_to.endswith(os.path.sep):
                dirname = os.path.join(os.path.abspath(save_to), '')
                filename = H2OConnection._find_file_name(response)
            else:
                dirname, filename = os.path.split(os.path.abspath(save_to))
            fullname = os.path.join(dirname, filename)
            try:
                if not os.path.exists(dirname):
                    os.makedirs(dirname)
                with open(fullname, "wb") as f:
                    for chunk in response.iter_content(chunk_size=65536):
                        if chunk:  # Empty chunks may occasionally happen
                            f.write(chunk)
            except OSError as e:
                raise H2OValueError("Cannot write to file %s: %s" %
                                    (fullname, e))
            return fullname

        content_type = response.headers.get("Content-Type", "")
        if ";" in content_type:  # Remove a ";charset=..." part
            content_type = content_type[:content_type.index(";")]

        # this is needed so that response.text() works correctly
        response.encoding = response.headers.get("Character-Encoding",
                                                 response.encoding)

        # Auto-detect response type by its content-type. Decode JSON, all other responses pass as-is.
        if content_type == "application/json":
            try:
                data = response.json(object_pairs_hook=H2OResponse)
            except (JSONDecodeError,
                    requests.exceptions.ContentDecodingError) as e:
                raise H2OServerError("Malformed JSON from server (%s):\n%s" %
                                     (str(e), response.text))
        else:
            data = response.text

        # Success (200 = "Ok", 201 = "Created", 202 = "Accepted", 204 = "No Content")
        if status_code in {200, 201, 202, 204}:
            return data

        # Client errors (400 = "Bad Request", 404 = "Not Found", 412 = "Precondition Failed")
        if status_code in {400, 404, 412} and isinstance(data, H2OErrorV3):
            data.show_stacktrace = False
            raise H2OResponseError(data)

        # Server errors (notably 500 = "Server Error")
        # Note that it is possible to receive valid H2OErrorV3 object in this case, however it merely means the server
        # did not provide the correct status code.
        raise H2OServerError("HTTP %d %s:\n%s" %
                             (status_code, response.reason, data))

    @staticmethod
    def _find_file_name(response):
        cd = response.headers.get("Content-Disposition", "")
        mm = re.search(r'filename="(.*)"$', cd)
        return mm.group(1) if mm else "unknown"

    def _print(self, msg, flush=False, end="\n"):
        """Helper function to print connection status messages when in verbose mode."""
        if self._verbose:
            print2(msg, end=end, flush=flush)

    def __repr__(self):
        if self._stage == 0:
            return "<H2OConnection uninitialized>"
        elif self._stage == 1:
            sess = "session %s" % self._session_id if self._session_id else "no session"
            return "<H2OConnection to %s, %s>" % (self._base_url, sess)
        else:
            return "<H2OConnection closed>"

    def __enter__(self):
        """Called when an H2OConnection object is created within the ``with ...`` statement."""
        return self

    def __exit__(self, *args):
        """Called at the end of the ``with ...`` statement."""
        self.close()
        assert len(args) == 3  # Avoid warning about unused args...
        return False  # ensure that any exception will be re-raised
コード例 #3
0
ファイル: metrics_base.py プロジェクト: Kendralabs/h2o-4
class MetricsBase(h2o_meta()):
    """
    A parent class to house common metrics available for the various Metrics types.

    The methods here are available across different model categories.
    """

    def __init__(self, metric_json, on=None, algo=""):
        # Yep, it's messed up...
        if isinstance(metric_json, MetricsBase): metric_json = metric_json._metric_json
        self._metric_json = metric_json
        # train and valid and xval are not mutually exclusive -- could have a test. train and
        # valid only make sense at model build time.
        self._on_train = False
        self._on_valid = False
        self._on_xval = False
        self._algo = algo
        if on == "training_metrics":
            self._on_train = True
        elif on == "validation_metrics":
            self._on_valid = True
        elif on == "cross_validation_metrics":
            self._on_xval = True
        elif on is None:
            pass
        else:
            raise ValueError("on expected to be train,valid,or xval. Got: " + str(on))

    @classmethod
    def make(cls, kvs):
        """Factory method to instantiate a MetricsBase object from the list of key-value pairs."""
        return cls(metric_json=dict(kvs))

    def __repr__(self):
        # FIXME !!!  __repr__ should never print anything, but return a string
        self.show()
        return ""

    # TODO: convert to actual fields list
    def __getitem__(self, key):
        return self._metric_json.get(key)

    @staticmethod
    def _has(dictionary, key):
        return key in dictionary and dictionary[key] is not None

    def show(self):
        """Display a short summary of the metrics."""
        if self._metric_json==None:
            print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.")
            return
        metric_type = self._metric_json['__meta']['schema_type']
        types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric', 'ModelMetricsBinomialGLM',
                       'ModelMetricsBinomialGLMGeneric', 'ModelMetricsHGLMGaussianGaussian', 
                       'ModelMetricsHGLMGaussianGaussianGeneric']
        types_w_clustering = ['ModelMetricsClustering']
        types_w_mult = ['ModelMetricsMultinomial', 'ModelMetricsMultinomialGeneric']
        types_w_ord = ['ModelMetricsOrdinal', 'ModelMetricsOrdinalGeneric']
        types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGeneric', 'ModelMetricsBinomialGLM', 'ModelMetricsBinomialGLMGeneric']
        types_w_r2 = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric']
        types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric',
                                          'ModelMetricsRegression', 'ModelMetricsRegressionGeneric']
        types_w_mean_absolute_error = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric',
                                       'ModelMetricsRegression', 'ModelMetricsRegressionGeneric']
        types_w_logloss = types_w_bin + types_w_mult+types_w_ord
        types_w_dim = ["ModelMetricsGLRM"]
        types_w_anomaly = ['ModelMetricsAnomaly']

        print()
        print(metric_type + ": " + self._algo)
        reported_on = "** Reported on {} data. **"
        if self._on_train:
            print(reported_on.format("train"))
        elif self._on_valid:
            print(reported_on.format("validation"))
        elif self._on_xval:
            print(reported_on.format("cross-validation"))
        else:
            print(reported_on.format("test"))
        print()
        if metric_type not in types_w_anomaly:
            print("MSE: " + str(self.mse()))
            print("RMSE: " + str(self.rmse()))
        if metric_type in types_w_mean_absolute_error:
            print("MAE: " + str(self.mae()))
            print("RMSLE: " + str(self.rmsle()))
        if metric_type in types_w_r2:
            print("R^2: " + str(self.r2()))
        if metric_type in types_w_mean_residual_deviance:
            print("Mean Residual Deviance: " + str(self.mean_residual_deviance()))
        if metric_type in types_w_logloss:
            print("LogLoss: " + str(self.logloss()))
        if metric_type in ['ModelMetricsBinomial', 'ModelMetricsBinomialGeneric']:
            # second element for first threshold is the actual mean per class error
            print("Mean Per-Class Error: %s" % self.mean_per_class_error()[0][1])
        if metric_type in types_w_mult or metric_type in ['ModelMetricsOrdinal', 'ModelMetricsOrdinalGeneric']:
            print("Mean Per-Class Error: " + str(self.mean_per_class_error()))
        if metric_type in types_w_glm:
            if metric_type == 'ModelMetricsHGLMGaussianGaussian': # print something for HGLM
                print("Standard error of fixed columns: "+str(self.hglm_metric("sefe")))
                print("Standard error of random columns: "+str(self.hglm_metric("sere")))
                print("Coefficients for fixed columns: "+str(self.hglm_metric("fixedf")))
                print("Coefficients for random columns: "+str(self.hglm_metric("ranef")))
                print("Random column indices: "+str(self.hglm_metric("randc")))
                print("Dispersion parameter of the mean model (residual variance for LMM): "+str(self.hglm_metric("varfix")))
                print("Dispersion parameter of the random columns (variance of random columns): "+str(self.hglm_metric("varranef")))
                print("Convergence reached for algorithm: "+str(self.hglm_metric("converge")))
                print("Deviance degrees of freedom for mean part of the model: "+str(self.hglm_metric("dfrefe")))
                print("Estimates and standard errors of the linear prediction in the dispersion model: "+str(self.hglm_metric("summvc1")))
                print("Estimates and standard errors of the linear predictor for the dispersion parameter of the random columns: "+str(self.hglm_metric("summvc2")))
                print("Index of most influential observation (-1 if none): "+str(self.hglm_metric("bad")))
                print("H-likelihood: "+str(self.hglm_metric("hlik")))
                print("Profile log-likelihood profiled over random columns: "+str(self.hglm_metric("pvh")))
                print("Adjusted profile log-likelihood profiled over fixed and random effects: "+str(self.hglm_metric("pbvh")))
                print("Conditional AIC: "+str(self.hglm_metric("caic")))
            else:
                print("Null degrees of freedom: " + str(self.null_degrees_of_freedom()))
                print("Residual degrees of freedom: " + str(self.residual_degrees_of_freedom()))
                print("Null deviance: " + str(self.null_deviance()))
                print("Residual deviance: " + str(self.residual_deviance()))
                print("AIC: " + str(self.aic()))
        if metric_type in types_w_bin:
            print("AUC: " + str(self.auc()))
            print("AUCPR: " + str(self.aucpr()))
            print("Gini: " + str(self.gini()))
            self.confusion_matrix().show()
            self._metric_json["max_criteria_and_metric_scores"].show()
            if self.gains_lift():
                print(self.gains_lift())
        if metric_type in types_w_anomaly:
            print("Anomaly Score: " + str(self.mean_score()))
            print("Normalized Anomaly Score: " + str(self.mean_normalized_score()))
        if (metric_type in types_w_mult) or (metric_type in types_w_ord):
            self.confusion_matrix().show()
            self.hit_ratio_table().show()
        if metric_type in types_w_clustering:
            print("Total Within Cluster Sum of Square Error: " + str(self.tot_withinss()))
            print("Total Sum of Square Error to Grand Mean: " + str(self.totss()))
            print("Between Cluster Sum of Square Error: " + str(self.betweenss()))
            self._metric_json['centroid_stats'].show()

        if metric_type in types_w_dim:
            print("Sum of Squared Error (Numeric): " + str(self.num_err()))
            print("Misclassification Error (Categorical): " + str(self.cat_err()))
        if self.custom_metric_name():
            print("{}: {}".format(self.custom_metric_name(), self.custom_metric_value()))


    def r2(self):
        """The R squared coefficient."""
        return self._metric_json["r2"]


    def logloss(self):
        """Log loss."""
        return self._metric_json["logloss"]


    def nobs(self):
        """The number of observations."""
        return self._metric_json["nobs"]


    def mean_residual_deviance(self):
        """The mean residual deviance for this set of metrics."""
        return self._metric_json["mean_residual_deviance"]


    def auc(self):
        """The AUC for this set of metrics."""
        return self._metric_json['AUC']


    def aucpr(self):
        """The area under the precision recall curve."""
        return self._metric_json['pr_auc']


    @deprecated(replaced_by=aucpr)
    def pr_auc(self):
        pass


    def aic(self):
        """The AIC for this set of metrics."""
        return self._metric_json['AIC']


    def gini(self):
        """Gini coefficient."""
        return self._metric_json['Gini']


    def mse(self):
        """The MSE for this set of metrics."""
        return self._metric_json['MSE']


    def rmse(self):
        """The RMSE for this set of metrics."""
        return self._metric_json['RMSE']


    def mae(self):
        """The MAE for this set of metrics."""
        return self._metric_json['mae']


    def rmsle(self):
        """The RMSLE for this set of metrics."""
        return self._metric_json['rmsle']


    def residual_deviance(self):
        """The residual deviance if the model has it, otherwise None."""
        if MetricsBase._has(self._metric_json, "residual_deviance"):
            return self._metric_json["residual_deviance"]
        return None
    
    def hglm_metric(self, metric_string):
        if MetricsBase._has(self._metric_json, metric_string):
            return self._metric_json[metric_string]
        return None
    
    def residual_degrees_of_freedom(self):
        """The residual DoF if the model has residual deviance, otherwise None."""
        if MetricsBase._has(self._metric_json, "residual_degrees_of_freedom"):
            return self._metric_json["residual_degrees_of_freedom"]
        return None


    def null_deviance(self):
        """The null deviance if the model has residual deviance, otherwise None."""
        if MetricsBase._has(self._metric_json, "null_deviance"):
            return self._metric_json["null_deviance"]
        return None


    def null_degrees_of_freedom(self):
        """The null DoF if the model has residual deviance, otherwise None."""
        if MetricsBase._has(self._metric_json, "null_degrees_of_freedom"):
            return self._metric_json["null_degrees_of_freedom"]
        return None


    def mean_per_class_error(self):
        """The mean per class error."""
        return self._metric_json['mean_per_class_error']

    def custom_metric_name(self):
        """Name of custom metric or None."""
        if MetricsBase._has(self._metric_json, "custom_metric_name"):
            return self._metric_json['custom_metric_name']
        else:
            return None

    def custom_metric_value(self):
        """Value of custom metric or None."""
        if MetricsBase._has(self._metric_json, "custom_metric_value"):
            return self._metric_json['custom_metric_value']
        else:
            return None