Пример #1
0
 def score(self, x_test, y_test):
     """确定当前模型的准确度
     :param x_test:
     :param y_test:
     :return:
     """
     y_predict = self.predict(x_test)
     return accuracy_score(y_test, y_predict)
Пример #2
0
    def score(self, x_test, y_test):
        """算法准确率"""
        y_predict = self.predict(x_test)
        y_predict = np.array(y_predict)

        return accuracy_score(y_test, y_predict)
"""
    
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess


### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()




#########################################################
### your code goes here ###
from sklearn.naive_bayes import GaussianNB
from metric import accuracy_score
clf = GaussianNB()
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
score = accuracy_score(labels_test,pred)
print score


#########################################################


Пример #4
0
 def acc(self, y, p):
     return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1))
Пример #5
0
 def score(self, X, y):
     predictions = self.predict(X)
     return accuracy_score(y, predictions)
Пример #6
0
 def score(self, x_test, y_test):
     y_predict = self.predict(x_test)
     return accuracy_score(y_test, y_predict)
Пример #7
0
                # 提取每一个类别下的特征值的方差 以及 均值
                sample_feature = sample[j]
                # 计算高斯密度
                likelihood = self._calculate_likelihood(
                    params["mean"], params["var"], sample_feature)
                posterior *= likelihood
            posteriors.append(posterior)
        # 求最大概率对应的类别
        index_of_max = np.argmax(posteriors)
        return self.classes[index_of_max]

    def predict(self, X):
        y_pred = []
        for sample in X:
            y = self._classify(sample)
            y_pred.append(y)
        return y_pred


if __name__ == '__main__':
    from sklearn import datasets

    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
    clf = NaiveBayes()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    print(accuracy_score(y_pred, y_test))
Пример #8
0
if __name__ == '__main__':
    X, y = gen_mult_ser(3000)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
    clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy)
    clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61)))
    clf.add(Activation('softmax'))

    tmp_X = np.argmax(X_train[0], axis=1)
    tmp_y = np.argmax(y_train[0], axis=1)
    print("Number Series Problem:")
    print("X = [" + " ".join(tmp_X.astype("str")) + "]")
    print("y = [" + " ".join(tmp_y.astype("str")) + "]")
    print()
    train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512)
    y_pred = np.argmax(clf.predict(X_test), axis=2)
    y_test = np.argmax(y_test, axis=2)
    accuracy = np.mean(accuracy_score(y_test, y_pred))
    print(accuracy)

    print()
    print("Results:")
    for i in range(5):
        tmp_X = np.argmax(X_test[i], axis=1)
        tmp_y1 = y_test[i]
        tmp_y2 = y_pred[i]
        print("X      = [" + " ".join(tmp_X.astype("str")) + "]")
        print("y_true = [" + " ".join(tmp_y1.astype("str")) + "]")
        print("y_pred = [" + " ".join(tmp_y2.astype("str")) + "]")
        print()
 def score(self, x_test, y_test):
     """当前模型的分类准确度"""
     y_predict = self.predict(x_test)
     return accuracy_score(y_test, y_predict)