def test_get_model_repr_pipeline_instance_model_in_pipeline( pipeline_instance_model_in_pipeline, ): model_repr = get_estimator_repr(pipeline_instance_model_in_pipeline) assert model_repr.find("...") == -1 assert model_repr == pipeline_instance_model_in_pipeline.__repr__( N_CHAR_MAX=10000).replace("\n", "").replace(" ", "")
def _score(self, method_caller, estimator, X, y_true, sample_weight=None): """Evaluate predicted target values for X relative to y_true. Parameters ---------- method_caller : callable Returns predictions given an estimator, method name, and other arguments, potentially caching results. estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = estimator.predict(X) estimator_repr = get_estimator_repr(estimator) estimator_hash = generate_estimator_hash(estimator) self._upsert_estimator_hash(estimator_repr, estimator_hash) self._save_prediction(y_pred=y_pred, estimator_label=estimator_hash, y_true=y_true) if y_pred.isna().any().any() or np.isinf(y_pred).any().any(): return np.nan else: if sample_weight is not None: return self._sign * self._score_func( y_true, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y_true, y_pred, ** self._kwargs)
def test_get_model_repr_single_model(wrapper_instance): model_repr = get_estimator_repr(wrapper_instance) assert model_repr.find("...") == -1 assert model_repr == wrapper_instance.__repr__(N_CHAR_MAX=10000).replace( "\n", "").replace(" ", "")