Ejemplo n.º 1
0
def build_classifier(X_train, y_train, escalate_classifier, tag, save_dir):
    # under sampling
    #X_train_res, y_train_res = under_sampling(X_train, X_test)

    print("Training doc2vec_model...")
    vector_size, min_count, epochs = 300, 2, 20
    doc2vec_model = train_doc2vec(X_train, vector_size, min_count, epochs,
                                  save_dir, tag)

    X_train = feature_engineering(X_train, doc2vec_model)
    """
    # scale
    scaler = build_scaler(X_train)
    X_train = scale(scaler, X_train)
    """

    # Oversampling using SMOTE
    print("Oversampling...")
    X_train, y_train = smote_over_sampling(X_train, y_train)

    print("Fit the classifier")
    escalate_classifier.fit(X_train, y_train)

    # evaluating on training data
    fpr, tpr, model_auc = model_evaluate(escalate_classifier, X_train, y_train,
                                         False)

    print("Save the escalate classifier...")
    dump(escalate_classifier,
         open(save_dir + os.sep + "{}.joblib".format(tag), "wb"))

    return escalate_classifier, doc2vec_model
    def select_model_best_parameters(self, model_flag, X_trainval, y_trainval, parameter_list, is_rf):
        X_train, X_val, y_train, y_val = train_test_split(X_trainval, y_trainval, random_state=42)

        # Scale sentiment metric features
        X_train, X_val = self.scale_features(X_train, X_val)

        # Oversampling
        X_train_res, y_train_res = smote_over_sampling(X_train, y_train)

        best_parameter = None
        best_auc = 0

        for param_val in parameter_list:
            model = self.set_model(model_flag, param_val)
            model_auc, model_precision_recall, _ = self.fit_classifier_model(model,
                                                                             X_train_res,
                                                                             X_val,
                                                                             y_train_res,
                                                                             y_val,
                                                                             is_rf)
            if model_auc > best_auc:
                best_auc = model_auc
                best_parameter = param_val

        return best_parameter
    def build_classifier(self, model_flag, X, y):
        X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=42)

        X_trainval.columns = X.columns

        if model_flag == RANDOM_FOREST:
            is_rf = True
        else:
            is_rf = False

        parameter_grid = self.get_parameter_list(model_flag)
        best_parameter = self.select_model_best_parameters(model_flag, X_trainval, y_trainval, parameter_grid, is_rf)

        # Applying the model with best parameters on all training data and evaluate the model on test data
        X_trainval, X_test = self.scale_features(X_trainval, X_test)
        X_trainval_res, y_trainval_res = smote_over_sampling(X_trainval, y_trainval)
        model = self.set_model(model_flag, best_parameter)
        model_auc, model_precision_recall, preds = self.fit_classifier_model(model,
                                                                             X_trainval_res,
                                                                             X_test,
                                                                             y_trainval_res,
                                                                             y_test,
                                                                             is_rf)
        print("The auc score of the model is {}, precision_recall: {} ".format(model_auc, model_precision_recall))
        fpr, tpr, thresholds = roc_curve(y_test, preds)
        title = "ROC curve for escalate classifier (AUC={:.3f})".format(model_auc)
        save_file = "figs/roc_escalation_classifier_" + MODEL_NAMES[model_flag] + ".png"
        draw_roc_curve(title, save_file, [fpr], [tpr], [model_auc], MODEL_NAMES[model_flag])

        return model, best_parameter
    def multi_classifier(self, X, y, classifier, product_labels_name,
                         use_SMOTE):
        """
        Based on vectorized narrative X, and labels in y, build a multi-classifier
        to assign product type for complaints
        :param X: processed narratives
        :param y: product label
        :param classifier: the classifier to be used
        :param tf_idf_vectorizer: the tf_idf_vectorizer to be used
        :param product_labels_name: the product label names
        :param use_SMOTE: whether to use SMOTE or not
        :return: the classifier and the roc curve is drawn
        """
        X_trainval, X_test, y_trainval, y_test = train_test_split(
            X, y, random_state=0)

        print("tf-idf vectorizing...")
        X_trainval_vectorized = self.tf_idf_vectorizer.transform(X_trainval)
        X_test_vectorized = self.tf_idf_vectorizer.transform(X_test)

        multi_class_classifier = OneVsRestClassifier(classifier)

        print("Fit the model...")
        if use_SMOTE:
            X_trainval_res, y_trainval_res = smote_over_sampling(
                X_trainval_vectorized, y_trainval)
            y_score = multi_class_classifier.fit(
                X_trainval_res,
                y_trainval_res).decision_function(X_test_vectorized)
        else:
            y_score = multi_class_classifier.fit(
                X_trainval_vectorized,
                y_trainval).decision_function(X_test_vectorized)

        print("Draw the Roc curve...")
        # Compute ROC curve and ROC area for each class
        n_classes = len(product_labels_name)
        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])

        # Compute micro-average ROC curve and ROC area. Adopt micro-average ROC other
        # than macro-average ROC for imbalanced data
        fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(),
                                                  y_score.ravel())
        roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
        print("average AUC score: ", roc_auc["micro"])

        title = "ROC Curve of Product classifier AUC={:.3f}".format(
            roc_auc["micro"])
        save_file = "figs/ROC_Curve_Product.png"
        draw_micro = True
        draw_roc_curve(title, save_file, fpr, tpr, roc_auc,
                       product_labels_name, draw_micro)

        return multi_class_classifier
Ejemplo n.º 5
0
def auc_analysis_with_cv(classifier, X, y, useSMOTE, use_under_sampling):
    """
    Draw roc curves using cross validation
    :param classifier:
    :param X:
    :param y:
    :param useSMOTE:
    :param under_sampling:
    :return:
    """
    cv = StratifiedKFold(n_splits=6)
    tprs = []
    aucs = []
    mean_fpr = np.linspace(0, 1, 100)

    i = 0
    X = np.array(X)
    y = np.array(y)
    for train, test in cv.split(X, y):
        if useSMOTE == True:
            X_train_res, y_train_res = smote_over_sampling(X[train], y[train])
            probas_ = classifier.fit(X_train_res, y_train_res).predict_proba(X[test])
        elif under_sampling:
            X_train_under, y_train_under = under_sampling(X[train], y[train])
            tmp = X[test]

            # scale
            #X_train_under, X_test = scale_features(X_train_under, X[test])

            # Feature engineering by vectorizing and generate sentiment metrics
            X_train_under, X_test = feature_engineer(X_train_under, X[test], "undersampling")

            probas_ = classifier.fit(X_train_under, y_train_under).predict_proba(X[test])
        else:
            probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
        # Compute ROC curve and area the curve
        fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
        tprs.append(interp(mean_fpr, fpr, tpr))
        tprs[-1][0] = 0.0
        roc_auc = auc(fpr, tpr)
        # Store roc_auc of each cv fold
        aucs.append(roc_auc)
        plt.plot(fpr, tpr, lw=1, alpha=0.3,
                 label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))

        i += 1

    plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
             label='Chance', alpha=.8)

    mean_tpr = np.mean(tprs, axis=0)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)

    std_auc = np.std(aucs)
    plt.plot(mean_fpr, mean_tpr, color='b',
             label=r'Mean ROC (AUC = %0.2f $\pm$ %0.3f)' % (mean_auc, std_auc),
             lw=2, alpha=.8)

    std_tpr = np.std(tprs, axis=0)
    tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
    tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
    plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
                     label=r'$\pm$ 1 std. dev.')

    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic')
    plt.legend(loc="lower right")
    plt.show()

    return mean_auc, std_auc