Example #1
0
def gradeOfClassifer(kenel, slot):
    np.set_printoptions(suppress=True)
    x, lable = extrat.normData(slot)

    x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
                                                                test_size=0.6)  # 将数据集按比例划分为训练集和测试集
    # if kenel == 0:
    #     clf = svm.SVC(C=0.8, kernel="rbf", gamma=20, decision_function_shape='ovr')
    # elif kenel == 1:
    #   clf = svm.SVC(C=0.8, kernel="linear", gamma=20, decision_function_shape='ovr')
    # elif kenel == 2:
    #     clf = svm.SVC(C=0.8, kernel="poly", gamma=20, degree=3, decision_function_shape='ovr')
    # else:
    #     clf = svm.SVC(C=0.8, kernel="sigmoid", gamma=20, decision_function_shape='ovr')
    # clf = svm.SVC(C=0.8, kernel="poly", gamma=20, degree=3, decision_function_shape='ovr')
    clf = svm.SVC(C=0.8, kernel="linear", gamma=20, decision_function_shape='ovr')
    clf.fit(x_train, lable_train.ravel())  # 提供训练集和标签 训练svm
    pre = clf.predict(x_test)  # 预测

    # 查准率(正确率)
    P = metrics.precision_score(lable_test, pre, average='macro')
    # 召回率
    R = metrics.recall_score(lable_test, pre, average='macro')
    # F1分数
    F1 = metrics.f1_score(lable_test, pre, average='weighted')
    # 混淆矩阵
    M = metrics.confusion_matrix(lable_test, pre, labels=[1.0, 2.0, 3.0, 4.0])
    print("查准率" + str(P))
    print("召回率" + str(R))
    print("F1分数" + str(F1))
    print("混淆矩阵")
    print(M)
    print()
    return P
Example #2
0
def gradeOfClassifer(k, slot):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)  # 构造特征向量  x=[局部距离,坐标数量,均方差]  lable=[类别标签]

    x_train, x_test, lable_train, lable_test = train_test_split(
        x, lable, random_state=1, train_size=0.4,
        test_size=0.6)  # 将数据集按比例划分为训练集和测试集
    knn = KNeighborsClassifier(k)
    # 定义一个knn分类器对象
    knn.fit(x_train, lable_train.ravel())

    # score = knn.score(x_test, lable_test, sample_weight=None)
    # print(score)
    pre = knn.predict(x_test)  # 预测

    # 查准率(正确率)
    P = metrics.precision_score(lable_test, pre, average='macro')
    # 召回率
    R = metrics.recall_score(lable_test, pre, average='macro')
    # F1分数
    F1 = metrics.f1_score(lable_test, pre, average='weighted')
    # 混淆矩阵
    M = metrics.confusion_matrix(lable_test, pre, labels=[1.0, 2.0, 3.0])
    print("查准率" + str(P))
    print("召回率" + str(R))
    print("F1分数" + str(F1))
    print("混淆矩阵")
    print(M)
    print()
    return P
Example #3
0
def P_R(slot, k):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)  # 预处理数据

    # x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
    #                                                             test_size=0.6)  # 将数据集按比例划分为训练集和测试集
    knn = KNeighborsClassifier(k)

    # n_samples, n_features = x.shape
    #
    # random_state = np.random.RandomState(0)
    #
    # x = np.c_[x, random_state.randn(n_samples, 200 * n_features)]
    kfold = StratifiedKFold(n_splits=5)
    cv = kfold.split(x, lable)
    for i, (train, test) in enumerate(cv):

        knn.fit(x[train], lable[train])
        probas_ = knn.predict_proba(x[test])
        print(probas_)
        precision, recall, thresholds = precision_recall_curve(
            lable[test], probas_[:, 1])
        plt.plot(recall, precision, lw=1)
        break
    plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label="Luck")
    plt.xlim([-0.05, 1.05])

    plt.ylim([-0.05, 1.05])

    plt.xlabel("Recall Rate")

    plt.ylabel("Precision Rate")
    plt.show()
Example #4
0
def gradeOfClassifer(layer, slot):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)

    # 将数据集按4:6比列划分为训练集(40%)和测试集(60%)
    x_train, x_test, lable_train, lable_test = train_test_split(
        x, lable, random_state=1, train_size=0.4,
        test_size=0.6)  # 将数据集按比例划分为训练集和测试集

    # clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(14,), random_state=1, max_iter=100)
    clf = MLPClassifier(solver='lbfgs',
                        alpha=1e-5,
                        hidden_layer_sizes=(14, layer),
                        random_state=1,
                        max_iter=100)

    clf.fit(x_train, lable_train.ravel())
    pre = clf.predict(x_test)  # 预测

    # 查准率(正确率)
    P = metrics.precision_score(lable_test, pre, average='macro')
    # 召回率
    R = metrics.recall_score(lable_test, pre, average='macro')
    # F1分数
    F1 = metrics.f1_score(lable_test, pre, average='weighted')
    # 混淆矩阵
    M = metrics.confusion_matrix(lable_test, pre, labels=[1.0, 2.0, 3.0, 4.0])
    print("查准率" + str(P))
    print("召回率" + str(R))
    print("F1分数" + str(F1))
    print("混淆矩阵")
    print(M)
    print()
    return P
Example #5
0
def getKnnClassifer(slot, k=9):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)  # 预处理数据

    # x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
    #                                                             test_size=0.6)  # 将数据集按比例划分为训练集和测试集
    knn = KNeighborsClassifier(k)
    # 定义一个knn分类器对象
    # knn.fit(x_train, lable_train.ravel())
    knn.fit(x, lable.ravel())

    return knn
Example #6
0
def getSvmClassifer(slot):
    np.set_printoptions(suppress=True)
    x, lable = extrat.normData(slot)

    # x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
    #                                                             test_size=0.6)  # 将数据集按比例划分为训练集和测试集

    clf = svm.SVC(C=2, kernel="linear", gamma=20, decision_function_shape='ovr')

    # clf.fit(x_train, lable_train.ravel())  # 提供训练集和标签 训练svm

    clf.fit(x, lable.ravel())  # 提供训练集和标签 训练svm
    return clf
Example #7
0
def matrixOfClassification(kenel, slot):
    np.set_printoptions(suppress=True)
    x, lable = extrat.normData(slot)

    x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
                                                                test_size=0.6)  # 将数据集按比例划分为训练集和测试集

    if kenel == 0:
        clf = svm.SVC(C=0.8, kernel="rbf", gamma=20, decision_function_shape='ovr')
    elif kenel == 1:
        clf = svm.SVC(C=0.8, kernel="linear", gamma=20, decision_function_shape='ovr')
    # elif kenel == 2:
    #     clf = svm.SVC(C=0.8, kernel="poly", gamma=20, degree=2, decision_function_shape='ovr')
    else:
        clf = svm.SVC(C=0.8, kernel="sigmoid", gamma=20, decision_function_shape='ovr')

    clf.fit(x_train, lable_train.ravel())  # 提供训练集和标签 训练svm
    score = clf.score(x_test, lable_test)
    pre = clf.predict(x_test)

    # 四个数组代表四个类别的分类情况
    # lable1[1]表示类1的正确分类数,lable1[2]表示类1的被判定为类2,lable1[3]表示类1的被判定为类3,lable1[4]表示类1的被判定为类4
    # 其他三个数组同上
    lable1 = [0, 0, 0, 0, 0]
    lable2 = [0, 0, 0, 0, 0]
    lable3 = [0, 0, 0, 0, 0]
    lable4 = [0, 0, 0, 0, 0]

    i = 0
    while i < len(pre):
        label = int(pre[i])  # 类型:1.0   2.0   3.0   4.0  转整形
        if lable_test[i] == 1.0:
            lable1[label] += 1

        elif lable_test[i] == 2.0:
            lable2[label] += 1

        elif lable_test[i] == 3.0:
            lable3[label] += 1

        elif lable_test[i] == 4.0:
            lable4[label] += 1

        i = i + 1

    print("正确率:" + str(score))
    print(lable1)
    print(lable2)
    print(lable3)
    print(lable4)
    return score
Example #8
0
def matrixOfClassification(layer, slot):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)

    # 将数据集按4:6比列划分为训练集(40%)和测试集(60%)
    x_train, x_test, lable_train, lable_test = train_test_split(
        x, lable, random_state=1, train_size=0.4,
        test_size=0.6)  # 将数据集按比例划分为训练集和测试集

    clf = MLPClassifier(solver='lbfgs',
                        alpha=1e-5,
                        hidden_layer_sizes=(14, layer),
                        random_state=1,
                        max_iter=100)

    clf.fit(x_train, lable_train.ravel())
    score = clf.score(x_test, lable_test)
    pre = clf.predict(x_test)
    # 四个数组代表四个类别的分类情况
    # lable1[1]表示类1的正确分类数,lable1[2]表示类1的被判定为类2,lable1[3]表示类1的被判定为类3,lable1[4]表示类1的被判定为类4
    # 其他三个数组同上
    lable1 = [0, 0, 0, 0, 0]
    lable2 = [0, 0, 0, 0, 0]
    lable3 = [0, 0, 0, 0, 0]
    lable4 = [0, 0, 0, 0, 0]

    i = 0
    while i < len(pre):
        label = int(pre[i])  # 类型:1.0   2.0   3.0   4.0  转整形
        if lable_test[i] == 1.0:
            lable1[label] += 1

        elif lable_test[i] == 2.0:
            lable2[label] += 1

        elif lable_test[i] == 3.0:
            lable3[label] += 1

        elif lable_test[i] == 4.0:
            lable4[label] += 1

        i = i + 1
    print("正确率:" + str(score))
    print(lable1)
    print(lable2)
    print(lable3)
    print(lable4)
    return score
Example #9
0
def matrixOfClassification(k, slot):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)  # 预处理数据

    # x_train:训练样本   lable_train:训练样本对应的标签
    # x_test:测试样本    lable_test :测试样本对应的标签

    x_train, x_test, lable_train, lable_test = train_test_split(
        x, lable, random_state=1, train_size=0.4,
        test_size=0.6)  # 将数据集按比例划分为训练集和测试集
    knn = KNeighborsClassifier(k)
    # 定义一个knn分类器对象
    knn.fit(x_train, lable_train.ravel())

    score = knn.score(x_test, lable_test, sample_weight=None)
    pre = knn.predict(x_test)

    # 四个数组代表四个类别的分类情况
    # lable1[1]表示类1的正确分类数,lable1[2]表示类1的被判定为类2,lable1[3]表示类1的被判定为类3,lable1[4]表示类1的被判定为类4
    # 其他三个数组同上
    lable1 = [0, 0, 0, 0, 0]
    lable2 = [0, 0, 0, 0, 0]
    lable3 = [0, 0, 0, 0, 0]
    lable4 = [0, 0, 0, 0, 0]

    i = 0
    while i < len(pre):
        label = int(pre[i])  # 类型:1.0   2.0   3.0   4.0  转整形
        if lable_test[i] == 1.0:
            lable1[label] += 1

        elif lable_test[i] == 2.0:
            lable2[label] += 1

        elif lable_test[i] == 3.0:
            lable3[label] += 1

        elif lable_test[i] == 4.0:
            lable4[label] += 1
        i = i + 1

    print("正确率:" + str(score))
    print(lable1)
    print(lable2)
    print(lable3)
    print(lable4)
    return score
Example #10
0
def getMplClassifer(slot, layer=15):
    np.set_printoptions(suppress=True)

    x, lable = extrat.normData(slot)

    # # 将数据集按4:6比列划分为训练集(40%)和测试集(60%)
    # x_train, x_test, lable_train, lable_test = train_test_split(x, lable, random_state=1, train_size=0.4,
    #                                                             test_size=0.6)  # 将数据集按比例划分为训练集和测试集

    clf = MLPClassifier(solver='lbfgs',
                        alpha=1e-5,
                        hidden_layer_sizes=(14, layer),
                        random_state=1,
                        max_iter=100)

    # clf.fit(x_train, lable_train.ravel())
    clf.fit(x, lable.ravel())
    return clf