def outlier_candidates(regressor=None, X=None, y=None): """Measures a datapoint's influence on regression model via cook's distance. Instances with high influences could potentially be outliers. Should only be called with a fitted regressor (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (regressor) Takes in a fitted regressor. X: (arr) Training set features. y: (arr) Training set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_outlier_candidates(model, X, y) ``` """ is_missing = utils.test_missing(regressor=regressor, X=X, y=y) correct_types = utils.test_types(regressor=regressor, X=X, y=y) is_fitted = utils.test_fitted(regressor) if is_missing and correct_types and is_fitted: y = np.asarray(y) outliers_chart = calculate.outlier_candidates(regressor, X, y) wandb.log({"outlier_candidates": outliers_chart})
def residuals(regressor=None, X=None, y=None): """Measures and plots the regressor's predicted value against the residual. The marginal distribution of residuals is also calculated and plotted. Should only be called with a fitted regressor (otherwise an error is thrown). Please note this function fits variations of the model on the training set when called. Arguments: regressor: (regressor) Takes in a fitted regressor. X: (arr) Training set features. y: (arr) Training set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_residuals(model, X, y) ``` """ not_missing = utils.test_missing(regressor=regressor, X=X, y=y) correct_types = utils.test_types(regressor=regressor, X=X, y=y) is_fitted = utils.test_fitted(regressor) if not_missing and correct_types and is_fitted: y = np.asarray(y) residuals_chart = calculate.residuals(regressor, X, y) wandb.log({"residuals": residuals_chart})
def feature_importances(model=None, feature_names=None, title="Feature Importance", max_num_features=50): """Logs a plot depicting the relative importance of each feature for a classifier's decisions. Should only be called with a fitted classifer (otherwise an error is thrown). Only works with classifiers that have a feature_importances_ attribute, like trees. Arguments: model: (clf) Takes in a fitted classifier. feature_names: (list) Names for features. Makes plots easier to read by replacing feature indexes with corresponding names. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_feature_importances(model, ['width', 'height, 'length']) ``` """ not_missing = utils.test_missing(model=model) correct_types = utils.test_types(model=model) model_fitted = utils.test_fitted(model) if not_missing and correct_types and model_fitted: feature_importance_chart = calculate.feature_importances( model, feature_names) wandb.log({"feature_importances": feature_importance_chart})
def class_proportions(y_train=None, y_test=None, labels=None): """Plots the distribution of target classses in training and test sets. Useful for detecting imbalanced classes. Arguments: y_train: (arr) Training set labels. y_test: (arr) Test set labels. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_class_proportions(y_train, y_test, ['dog', 'cat', 'owl']) ``` """ not_missing = utils.test_missing(y_train=y_train, y_test=y_test) correct_types = utils.test_types(y_train=y_train, y_test=y_test) if not_missing and correct_types: y_train, y_test = np.array(y_train), np.array(y_test) class_proportions_chart = calculate.class_proportions( y_train, y_test, labels) wandb.log({"class_proportions": class_proportions_chart})
def calibration_curve(clf=None, X=None, y=None, clf_name="Classifier"): """Logs a plot depicting how well-calibrated the predicted probabilities of a classifier are. Also suggests how to calibrate an uncalibrated classifier. Compares estimated predicted probabilities by a baseline logistic regression model, the model passed as an argument, and by both its isotonic calibration and sigmoid calibrations. The closer the calibration curves are to a diagonal the better. A sine wave like curve represents an overfitted classifier, while a cosine wave like curve represents an underfitted classifier. By training isotonic and sigmoid calibrations of the model and comparing their curves we can figure out whether the model is over or underfitting and if so which calibration (sigmoid or isotonic) might help fix this. For more details, see https://scikit-learn.org/stable/auto_examples/calibration/plot_calibration_curve.html. Should only be called with a fitted classifer (otherwise an error is thrown). Please note this function fits variations of the model on the training set when called. Arguments: clf: (clf) Takes in a fitted classifier. X: (arr) Training set features. y: (arr) Training set labels. model_name: (str) Model name. Defaults to 'Classifier' Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_calibration_curve(clf, X, y, 'RandomForestClassifier') ``` """ not_missing = utils.test_missing(clf=clf, X=X, y=y) correct_types = utils.test_types(clf=clf, X=X, y=y) is_fitted = utils.test_fitted(clf) if not_missing and correct_types and is_fitted: y = np.asarray(y) if y.dtype.char == "U" or not ((y == 0) | (y == 1)).all(): wandb.termwarn( "This function only supports binary classification at the moment and therefore expects labels to be binary. Skipping calibration curve." ) return calibration_curve_chart = calculate.calibration_curves( clf, X, y, clf_name) wandb.log({"calibration_curve": calibration_curve_chart})
def confusion_matrix( y_true=None, y_pred=None, labels=None, true_labels=None, pred_labels=None, normalize=False, ): """Logs a confusion matrix to W&B. Confusion matrices depict the pattern of misclassifications by a model. Arguments: y_true: (arr) Test set labels. y_probas: (arr) Test set predicted probabilities. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_confusion_matrix(y_true, y_probas, labels) ``` """ y_true = np.asarray(y_true) y_pred = np.asarray(y_pred) not_missing = utils.test_missing(y_true=y_true, y_pred=y_pred) correct_types = utils.test_types(y_true=y_true, y_pred=y_pred) if not_missing and correct_types: confusion_matrix_chart = calculate.confusion_matrix( y_true, y_pred, labels, true_labels, pred_labels, normalize, ) wandb.log({"confusion_matrix": confusion_matrix_chart})
def learning_curve( model=None, X=None, y=None, cv=None, shuffle=False, random_state=None, train_sizes=None, n_jobs=1, scoring=None, ): """Logs a plot depicting model performance against dataset size. Please note this function fits the model to datasets of varying sizes when called. Arguments: model: (clf or reg) Takes in a fitted regressor or classifier. X: (arr) Dataset features. y: (arr) Dataset labels. For details on the other keyword arguments, see the documentation for `sklearn.model_selection.learning_curve`. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_learning_curve(model, X, y) ``` """ not_missing = utils.test_missing(model=model, X=X, y=y) correct_types = utils.test_types(model=model, X=X, y=y) if not_missing and correct_types: if train_sizes is None: train_sizes = np.linspace(0.1, 1.0, 5) y = np.asarray(y) learning_curve_chart = calculate.learning_curve( model, X, y, cv, shuffle, random_state, train_sizes, n_jobs, scoring) wandb.log({"learning_curve": learning_curve_chart})
def silhouette( clusterer=None, X=None, cluster_labels=None, labels=None, metric="euclidean", kmeans=True, ): """Measures & plots silhouette coefficients. Silhouette coefficients near +1 indicate that the sample is far away from the neighboring clusters. A value near 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that the samples might have been assigned to the wrong cluster. Should only be called with a fitted clusterer (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (clusterer) Takes in a fitted clusterer. X: (arr) Training set features. cluster_labels: (list) Names for cluster labels. Makes plots easier to read by replacing cluster indexes with corresponding names. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_silhouette(model, X_train, ['spam', 'not spam']) ``` """ not_missing = utils.test_missing(clusterer=clusterer) correct_types = utils.test_types(clusterer=clusterer) is_fitted = utils.test_fitted(clusterer) if not_missing and correct_types and is_fitted: if isinstance(X, (pd.DataFrame)): X = X.values silhouette_chart = calculate.silhouette(clusterer, X, cluster_labels, labels, metric, kmeans) wandb.log({"silhouette_plot": silhouette_chart})
def elbow_curve(clusterer=None, X=None, cluster_ranges=None, n_jobs=1, show_cluster_time=True): """Measures and plots variance explained as a function of the number of clusters. Useful in picking the optimal number of clusters. Should only be called with a fitted clusterer (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (clusterer) Takes in a fitted clusterer. X: (arr) Training set features. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_elbow_curve(model, X_train) ``` """ if not hasattr(clusterer, "n_clusters"): wandb.termlog( "n_clusters attribute not in classifier. Cannot plot elbow method." ) return not_missing = utils.test_missing(clusterer=clusterer) correct_types = utils.test_types is_fitted = utils.test_fitted(clusterer) if not_missing and correct_types and is_fitted: elbow_curve_chart = calculate.elbow_curve(clusterer, X, cluster_ranges, n_jobs, show_cluster_time) wandb.log({"elbow_curve": elbow_curve_chart})
def summary_metrics(model=None, X=None, y=None, X_test=None, y_test=None): """Logs a chart depicting summary metrics for a model. Should only be called with a fitted model (otherwise an error is thrown). Arguments: model: (clf or reg) Takes in a fitted regressor or classifier. X: (arr) Training set features. y: (arr) Training set labels. X_test: (arr) Test set features. y_test: (arr) Test set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_summary_metrics(model, X_train, y_train, X_test, y_test) ``` """ not_missing = utils.test_missing(model=model, X=X, y=y, X_test=X_test, y_test=y_test) correct_types = utils.test_types(model=model, X=X, y=y, X_test=X_test, y_test=y_test) model_fitted = utils.test_fitted(model) if not_missing and correct_types and model_fitted: metrics_chart = calculate.summary_metrics(model, X, y, X_test, y_test) wandb.log({"summary_metrics": metrics_chart})
def decision_boundaries(binary_clf=None, X=None, y=None): """Visualizes decision boundaries of a binary classifier. Works by sampling from the feature space where the classifier's uncertainty if greater than > 0.5 and projecting these point to 2D space. Useful for measuring model (decision boundary) complexity, visualizing regions where the model falters, and to determine whether any over or underfitting occured. Should only be called with a fitted **binary** classifer (otherwise an error is thrown). Please note this function fits variations of the model on the training set when called. Arguments: model: (clf) Takes in a fitted binary classifier. X_train: (arr) Training set features. y_train: (arr) Training set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_decision_boundaries(binary_classifier, X, y) ``` """ if utils.test_missing(binary_clf=binary_clf, X=X, y=y) and utils.test_types( binary_clf=binary_clf, X=X, y=y): y = np.asarray(y) # plot high-dimensional decision boundary db = DBPlot(binary_clf) db = None db.fit(X, y) ( decision_boundary_x, decision_boundary_y, decision_boundary_color, train_x, train_y, train_color, test_x, test_y, test_color, ) = db.plot() wandb.log({ "decision_boundaries": calculate.decision_boundaries( decision_boundary_x, decision_boundary_y, decision_boundary_color, train_x, train_y, train_color, test_x, test_y, test_color, ) })