Esempio n. 1
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     print("For Positive Class:")
     print("Test Accuracy: ",ev.Accuracy())
     print("Test Recall: ",ev.Recall())
     print("Test Precision: ",ev.Precision())
     print("\n")
     print("For Negative Class:")
     ev_neg = Eval([1 if i == -1 else -1 for i in Y_pred], [1 if i == -1 else -1 for i in test.Y])
     print("Test Accuracy: ",ev_neg.Accuracy())
     print("Test Recall: ",ev_neg.Recall())
     print("Test Precision: ",ev_neg.Precision())
     probality_threshold=[0.2,0.4,0.6,0.8]
     Precision=[]
     Recall=[]
     Precision.append(ev.Precision())
     Recall.append(ev.Recall())
     length=len(probality_threshold)
     for i in range(0,length):
         Y_pred = self.PredictLabel(test.X,probality_threshold[i])
         ev = Eval(Y_pred, test.Y)
         Precision.append(ev.Precision())
         Recall.append(ev.Recall())
     plt.plot(Precision,Recall)
     plt.ylabel('Recall')
     plt.xlabel('Precision')
     #plt.ylim([0.0, 1.05])
     #plt.xlim([0.0, 1.0])
     plt.title('2-class Precision-Recall curve')
     plt.show()
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     print("For Positive Class:")
     print("Test Accuracy: ", ev.Accuracy())
     print("Test Recall: ", ev.Recall())
     print("Test Precision: ", ev.Precision())
     print("\n")
     print("For Negative Class:")
     ev_neg = Eval([1 if i == -1 else -1 for i in Y_pred],
                   [1 if i == -1 else -1 for i in test.Y])
     print("Test Accuracy: ", ev_neg.Accuracy())
     print("Test Recall: ", ev_neg.Recall())
     print("Test Precision: ", ev_neg.Precision())
     ev.PvRcurve()
Esempio n. 3
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X,0,True)
     recall_pos = []
     recall_neg = []
     precision_pos = []
     precision_neg =[]
     ev = Eval(Y_pred, test.Y)
     accy = ev.Accuracy()
     #print(ev.EvalRecall())
     #print(ev.EvalPrecision())
     probThresh = [0.2,0.4,0.6,0.8]
     for i in range(len(probThresh)):
         pred = self.PredictLabel(test.X,probThresh[i],False) 
         ev = Eval(pred, test.Y)
         #ev.EvalRecall()
         #print('Threshold Value %f'%probThresh[i])
         recallP,recallN=ev.EvalRecall()
         recall_pos.append(recallP)
         recall_neg.append(recallN)
         precisionP,precisionN=ev.EvalPrecision()
         precision_pos.append(precisionP)
         precision_neg.append(precisionN)
     plt.pause(0.1)
     plt.xlabel('Recall')
     plt.ylabel('Precision')
     fig=plt.figure()
     plt.title('For +1 Label')
     plt.plot(recall_pos,precision_pos,'r')
     fig.savefig('Pos_class.png')
     plt.title('For -1 Label')
     fig1=plt.figure(1)
     plt.plot(recall_neg,precision_neg,'b')
     fig1.savefig('Neg_class.png')
        
     return accy
    def plotLimitGraph(self, test):
        x_axis = []
        accuracy = []
        for i in range(9):
            Y_pred = self.PredictLabel(test.X, (i + 1) / 10)
            ev = Eval(Y_pred, test.Y)
            accuracy.append(ev.Accuracy())
            x_axis.append((i + 1) / 10)

            # Y_pred = self.PredictLabel(test.X, (i+1)/10)
            # ev = Eval(Y_pred, test.Y)
            # accuracy.append(ev.Accuracy())
            # #Y_pred1 = np.array(Y_pred)
            # #recall_pos.append(recall_score(test.Y,Y_pred1))
            # #precision_pos.append( precision_score(test.Y,Y_pred1))
            # #Y_pred_neg = np.array([1 if i == -1 else -1 for i in Y_pred])
            # #Y_test_neg = np.array([1 if i == -1 else -1 for i in test.Y])
            # #recall_neg.append(recall_score(Y_test_neg,Y_pred_neg))
            # #precision_neg.append(precision_score(Y_test_neg,Y_pred_neg))
            # x_axis.append((i+1)/10)
            # # print(i,recall_pos,precision_pos)

        plt.title('Recall Positive Graph.')
        plt.plot(x_axis, self.pos_recall, label="Recall Positive")
        plt.plot(x_axis, self.neg_recall, label="Recall Negative")
        plt.xlabel('Threshold')
        plt.ylabel('Recall')
        plt.title('Recall Plot')
        plt.legend()
        plt.show()

        plt.plot(x_axis, self.pos_precision, label="Precision Positive")
        plt.plot(x_axis, self.neg_precision, label="Precision Negative")
        plt.xlabel('Threshold')
        plt.ylabel('Precision')
        plt.title('Precision Plot')
        plt.legend()
        plt.show()
Esempio n. 5
0
 def Eval(self, X_test, Y_test):
     Y_pred = self.Predict(X_test)
     ev = Eval(Y_pred, Y_test)
     return ev.Accuracy()
Esempio n. 6
0
X_train=X_train.drop('attacking_work_rate',1)
X_test=X_test.drop('preferred_foot',1)
X_train=X_train.drop('preferred_foot',1)

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
model= gnb.fit(X_train, y_train)
y_pred = gnb.fit(X_train, y_train).predict(X_test)
y_pred.dtype
model.score(X_test,y_test)
print("Number of mislabeled points out of a total %d points : %d"  % (X_test.shape[0],(y_test != y_pred).sum()))
from Eval import Eval
eval1 = Eval(y_pred, np.array(y_test))

print("Positive Class:")
print("Accuracy: ",eval1.Accuracy())
from sklearn.metrics import recall_score,precision_score,accuracy_score
print(recall_score(y_test,y_pred,average=None))
print(precision_score(y_test,y_pred,average=None))
print(accuracy_score(y_test,y_pred))

import pickle
filename = 'finalized_model.sav'
pickle.dump(model, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
loaded_model.score(X_test,y_test)
#X_test.to_csv('sample_players1.csv')
#y_test.to_csv('pred.csv')
#X_test.iloc[:,3:39].head(1)
#loaded_model.theta_.shape
#X_test.shape
 def EvalProbabilities(self, test, indexes):
     Y_pred = self.PredictProb(test, indexes)
     
     ev = Eval(Y_pred, test.Y[indexes])
     return ev.Accuracy()
    def EvalLabels(self, test):
        Y_pred = self.PredictLabel(test.X)

        ev = Eval(Y_pred, test.Y)
        return ev.Accuracy()
Esempio n. 9
0
 def Eval(self, test):
     #With the labels obtained and those that really are we obtain the accuracy
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     return ev.Accuracy()
 def Eval(self, test):
     y_pred = self.predictlabel(test.X)
     ev = Eval(y_pred, test.Y)
     return ev.Accuracy()
Esempio n. 11
0
 def Eval(self, X_test, Y_test):
     print "testing..."
     Y_pred = self.Predict(X_test)
     ev = Eval(Y_pred, Y_test)
     return ev.Accuracy()
Esempio n. 12
0
 def Eval(self, test):
     print("Predicting test reviews labels ...")
     Y_pred = self.PredictLabel(test.X)
     print("Evaluate the NaiveBayes model ...")
     ev = Eval(Y_pred, test.Y)
     return ev.Accuracy()
Esempio n. 13
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X)
     # Y_pred = self.PredictLabel(test,len(test))
     ev = Eval(Y_pred, test.Y)
     # ev = Eval(Y_pred.pred, test.Y)
     return [ev.Accuracy(), ev.EvalPrecition(), ev.EvalRecall()]
Esempio n. 14
0
    nb = NaiveBayes(traindata, float(sys.argv[2]))
    for i in range(10):
        res = nb.PredictProb(testdata, i)
        print(i, res)

    print("Predict Label with Threshold")
    precition = []
    recall = []

    for i in np.arange(0.1, 1.0, 0.1):
        pred = nb.PredictLabelTh(testdata, i)
        ev = Eval(pred, testdata.Y)
        precition.append(ev.EvalPrecition())
        recall.append(ev.EvalRecall())
        # ev = Eval(Y_pred.pred, test.Y)
        print('Threshold:', i, ' Accuracy: ', ev.Accuracy(), ' Precition: ',
              ev.EvalPrecition(), ' Recall: ', ev.EvalRecall())
    '''
    # Create some mock data
    t = recall
    data1 = precition
    data2 = np.arange(0.05, 1.0, 0.05)
    fig, ax1 = plt.subplots()
    color = 'tab:blue'
    ax1.set_xlabel('Recall')
    ax1.set_ylabel('Precition', color=color)
    ax1.plot(t, data1, color=color)
    ax1.tick_params(axis='y', labelcolor=color)
    ax2 = ax1.twinx()
    color = 'tab:red'
    ax2.set_ylabel('Threshold', color=color)
Esempio n. 15
0
 def Eval(self, test):
     print "testing..."
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     return ev.Accuracy()