def test_model(self,X,Y): """ Test using specific model's eval function. """ if hasattr(self.model, "evals"): labels = np.array(Y) p_labels = self.model.evals(X) success =np.sum(labels == p_labels) total_count = len(X) else: labels = [] # List of actual labels p_labels = [] # List of model's predictions success = 0 # Number of correct predictions total_count = 0 # Number of images for i in range(len(X)): x = X[i] # Test input y = Y[i] # Actual label y_ = self.model.eval(x) # Model's prediction labels.append(y) p_labels.append(y_) if y == y_: success += 1 total_count +=1 return 1.0*success/total_count, getConfusionMatrix(labels,p_labels)
def test_model(self, X, Y): """ Test using specific model's eval function. """ labels = [] # List of actual labels p_labels = [] # List of model's predictions success = 0.0 # Number of correct predictions total_count = 0.0 # Number of images for i in range(len(X)): x = X[i] # Test input y = Y[i] # Actual label y_ = self.model.eval(x) # Model's prediction labels.append(y) p_labels.append(y_) if y == y_: success += 1.0 total_count += 1.0 return success / total_count, getConfusionMatrix(labels, p_labels)