def test_PRED_MAE_load_test(self): mae = MAE() mae.load_test(self.TEST_DATA) assert_equal(len(mae.get_test()), len(self.TEST_DATA)) assert_equal(len(mae.get_ground_truth()), 0) assert_raises(ValueError, mae.compute) #Raise: GT is empty!
def test_PRED_MAE_nan(self): mae = MAE() mae.add(2.0, nan) assert_equal(mae.get_test(), []) assert_equal(mae.get_ground_truth(), [])
class TestPrediction(Test): def __init__(self): super(TestPrediction, self).__init__() # Prediction-based metrics: MAE, RMSE, Pearson self.mae = MAE(self.DATA_PRED) self.rmse = RMSE(self.DATA_PRED) self.R = 3 # Real Rating (ground truth) self.R_PRED = 2.1 # Predicted Rating # test_PRED MAE def test_PRED_MAE_compute_one(self): assert_equal(self.mae.compute(self.R, self.R_PRED), 0.9) def test_PRED_MAE_compute_one_empty_datasets(self): mae = MAE() assert_equal(mae.compute(self.R, self.R_PRED), 0.9) def test_PRED_MAE_compute_all(self): assert_equal(self.mae.compute(), 0.7) def test_PRED_MAE_nan(self): mae = MAE() mae.add(2.0, nan) assert_equal(mae.get_test(), []) assert_equal(mae.get_ground_truth(), []) def test_PRED_MAE_load(self): mae = MAE() mae.load(self.GT_DATA, self.TEST_DATA) assert_equal(mae.compute(), 0.7) def test_PRED_MAE_load_test(self): mae = MAE() mae.load_test(self.TEST_DATA) assert_equal(len(mae.get_test()), len(self.TEST_DATA)) assert_equal(len(mae.get_ground_truth()), 0) assert_raises(ValueError, mae.compute) #Raise: GT is empty! def test_PRED_MAE_load_test_and_ground_truth(self): mae = MAE() mae.load_test(self.TEST_DATA) mae.load_ground_truth(self.GT_DATA) assert_equal(mae.compute(), 0.7) def test_PRED_MAE_add_entry(self): self.mae.add(1, 4) #1: GT rating, 4: Predicted rating assert_equal(len(self.mae.get_test()), len(self.DATA_PRED)+1) assert_equal(self.mae.compute(), 1.083333) def test_PRED_MAE_different_list_sizes(self): mae = MAE() GT = [3, 1, 5, 2] # GT list has one element less than self.TEST_DATA mae.load(GT, self.TEST_DATA) assert_raises(ValueError, mae.compute) # test_PRED RMSE def test_PRED_RMSE_compute_one(self): #Even though rmse has data, we only compute these two param values assert_equal(self.rmse.compute(self.R, self.R_PRED), 0.9) def test_PRED_RMSE_compute_one_empty_datasets(self): rmse = RMSE() assert_equal(rmse.compute(self.R, self.R_PRED), 0.9) def test_PRED_RMSE_compute_all(self): assert_equal(self.rmse.compute(), 0.891067) def test_PRED_RMSE_load_test(self): rmse = RMSE() self.TEST_DATA = [2.3, 0.9, 4.9, 0.9, 1.5] rmse.load_test(self.TEST_DATA) assert_equal(len(rmse.get_test()), len(self.TEST_DATA)) def test_PRED_RMSE_add_entry(self): self.rmse.add(1,4) assert_equal(len(self.rmse.get_test()), len(self.DATA_PRED)+1) assert_equal(self.rmse.compute(), 1.470261) def test_PRED_RMSE_different_list_sizes(self): rmse = RMSE() GT = [3, 1, 5, 2] # GT list has one element less than self.TEST_DATA rmse.load(GT, self.TEST_DATA) assert_raises(ValueError, rmse.compute) def test_PRED_RMSE_numpy_array(self): rmse = RMSE() rmse.load(array(self.GT_DATA), array(self.TEST_DATA)) assert(rmse.compute(), 0.891067)
def test_PRED_MAE_load_test(self): mae = MAE() mae.load_test(self.TEST_DATA) assert_equal(len(mae.get_test()), len(self.TEST_DATA)) assert_equal(len(mae.get_ground_truth()), 0) assert_raises(ValueError, mae.compute) #Raise: GT is empty!
def test_PRED_MAE_nan(self): mae = MAE() mae.add(2.0, nan) assert_equal(mae.get_test(), []) assert_equal(mae.get_ground_truth(), [])
class TestPrediction(Test): def __init__(self): super(TestPrediction, self).__init__() # Prediction-based metrics: MAE, RMSE, Pearson self.mae = MAE(self.DATA_PRED) self.rmse = RMSE(self.DATA_PRED) self.R = 3 # Real Rating (ground truth) self.R_PRED = 2.1 # Predicted Rating # test_PRED MAE def test_PRED_MAE_compute_one(self): assert_equal(self.mae.compute(self.R, self.R_PRED), 0.9) def test_PRED_MAE_compute_one_empty_datasets(self): mae = MAE() assert_equal(mae.compute(self.R, self.R_PRED), 0.9) def test_PRED_MAE_compute_all(self): assert_equal(self.mae.compute(), 0.7) def test_PRED_MAE_nan(self): mae = MAE() mae.add(2.0, nan) assert_equal(mae.get_test(), []) assert_equal(mae.get_ground_truth(), []) def test_PRED_MAE_load(self): mae = MAE() mae.load(self.GT_DATA, self.TEST_DATA) assert_equal(mae.compute(), 0.7) def test_PRED_MAE_load_test(self): mae = MAE() mae.load_test(self.TEST_DATA) assert_equal(len(mae.get_test()), len(self.TEST_DATA)) assert_equal(len(mae.get_ground_truth()), 0) assert_raises(ValueError, mae.compute) #Raise: GT is empty! def test_PRED_MAE_load_test_and_ground_truth(self): mae = MAE() mae.load_test(self.TEST_DATA) mae.load_ground_truth(self.GT_DATA) assert_equal(mae.compute(), 0.7) def test_PRED_MAE_add_entry(self): self.mae.add(1, 4) #1: GT rating, 4: Predicted rating assert_equal(len(self.mae.get_test()), len(self.DATA_PRED) + 1) assert_equal(self.mae.compute(), 1.083333) def test_PRED_MAE_different_list_sizes(self): mae = MAE() GT = [3, 1, 5, 2] # GT list has one element less than self.TEST_DATA mae.load(GT, self.TEST_DATA) assert_raises(ValueError, mae.compute) # test_PRED RMSE def test_PRED_RMSE_compute_one(self): #Even though rmse has data, we only compute these two param values assert_equal(self.rmse.compute(self.R, self.R_PRED), 0.9) def test_PRED_RMSE_compute_one_empty_datasets(self): rmse = RMSE() assert_equal(rmse.compute(self.R, self.R_PRED), 0.9) def test_PRED_RMSE_compute_all(self): assert_equal(self.rmse.compute(), 0.891067) def test_PRED_RMSE_load_test(self): rmse = RMSE() self.TEST_DATA = [2.3, 0.9, 4.9, 0.9, 1.5] rmse.load_test(self.TEST_DATA) assert_equal(len(rmse.get_test()), len(self.TEST_DATA)) def test_PRED_RMSE_add_entry(self): self.rmse.add(1, 4) assert_equal(len(self.rmse.get_test()), len(self.DATA_PRED) + 1) assert_equal(self.rmse.compute(), 1.470261) def test_PRED_RMSE_different_list_sizes(self): rmse = RMSE() GT = [3, 1, 5, 2] # GT list has one element less than self.TEST_DATA rmse.load(GT, self.TEST_DATA) assert_raises(ValueError, rmse.compute) def test_PRED_RMSE_numpy_array(self): rmse = RMSE() rmse.load(array(self.GT_DATA), array(self.TEST_DATA)) assert (rmse.compute(), 0.891067)