def test_score_for_python_version(self): tf = PyTensorCoFi(n_factors=2) inp = [{"user": 10, "item": 100}, {"user": 10, "item": 110}, {"user": 12, "item": 120}] inp = pd.DataFrame(inp) tf.fit(inp) uid = tf.data_map[tf.get_user_column()][10] iid = tf.data_map[tf.get_item_column()][100] tf.factors[0][uid, 0] = 0 tf.factors[0][uid, 1] = 1 tf.factors[1][iid, 0] = 1 tf.factors[1][iid, 1] = 5 self.assertEqual(0*1+1*5, tf.get_score(10, 100))
def test_tensor_score_against_testfm(self): """ [recommendation.models.TensorCoFi] Test tensorcofi scores with test.fm benchmark """ evaluator = Evaluator() tc = TensorCoFi(n_users=len(self.df.user.unique()), n_items=len(self.df.item.unique()), n_factors=2) ptc = PyTensorCoFi() training, testing = testfm.split.holdoutByRandom(self.df, 0.9) items = training.item.unique() tc.fit(training) ptc.fit(training) tc_score = evaluator.evaluate_model(tc, testing, all_items=items)[0] ptc_score = evaluator.evaluate_model(ptc, testing, all_items=items)[0] assert abs(tc_score-ptc_score) < .15, \ "TensorCoFi score is not close enough to testfm benchmark (%.3f != %.3f)" % (tc_score, ptc_score)
def test_ids_returns_for_python_version(self): tf = PyTensorCoFi(n_factors=2) inp = [{"user": 10, "item": 100}, {"user": 10, "item": 110}, {"user": 12, "item": 120}] inp = pd.DataFrame(inp) tf.fit(inp) # Test the id in map uid = tf.data_map[tf.get_user_column()][10] iid = tf.data_map[tf.get_item_column()][100] self.assertEquals(uid, 0) self.assertEquals(iid, 0) # Test number of factors self.assertEquals(len(tf.factors[0][uid, :]), tf.number_of_factors) self.assertEquals(len(tf.factors[1][iid, :]), tf.number_of_factors)
def test_nogil_against_std_05(self): """ [EVALUATOR] Test the groups measure differences between python and c implementations for 5% training """ df = pd.read_csv(resource_filename(testfm.__name__, 'data/movielenshead.dat'), sep="::", header=None, names=['user', 'item', 'rating', 'date', 'title']) model = PyTensorCoFi() ev = Evaluator(False) ev_nogil = Evaluator() results = {"implementation": [], "measure": []} for i in range(SAMPLE_SIZE_FOR_TEST): training, testing = testfm.split.holdoutByRandom(df, 0.05) model.fit(training) results["implementation"].append("Cython"), results["measure"].append(ev_nogil.evaluate_model(model, testing)[0]) results["implementation"].append("Python"), results["measure"].append(ev.evaluate_model(model, testing)[0]) ##################### # ANOVA over result # ##################### assert_equality_in_groups(results, alpha=ALPHA, groups="implementation", test_var="measure")
def test_score_for_python_version(self): tf = PyTensorCoFi(n_factors=2) inp = [{ "user": 10, "item": 100 }, { "user": 10, "item": 110 }, { "user": 12, "item": 120 }] inp = pd.DataFrame(inp) tf.fit(inp) uid = tf.data_map[tf.get_user_column()][10] iid = tf.data_map[tf.get_item_column()][100] tf.factors[0][uid, 0] = 0 tf.factors[0][uid, 1] = 1 tf.factors[1][iid, 0] = 1 tf.factors[1][iid, 1] = 5 self.assertEqual(0 * 1 + 1 * 5, tf.get_score(10, 100))
def test_ids_returns_for_python_version(self): tf = PyTensorCoFi(n_factors=2) inp = [{ "user": 10, "item": 100 }, { "user": 10, "item": 110 }, { "user": 12, "item": 120 }] inp = pd.DataFrame(inp) tf.fit(inp) # Test the id in map uid = tf.data_map[tf.get_user_column()][10] iid = tf.data_map[tf.get_item_column()][100] self.assertEquals(uid, 0) self.assertEquals(iid, 0) # Test number of factors self.assertEquals(len(tf.factors[0][uid, :]), tf.number_of_factors) self.assertEquals(len(tf.factors[1][iid, :]), tf.number_of_factors)
def test_fit_for_python_version(self): tf = PyTensorCoFi(n_factors=2) tf.fit(self.df) #item and user are row vectors self.assertEqual(len(self.df.user.unique()), tf.factors[0].shape[0]) self.assertEqual(len(self.df.item.unique()), tf.factors[1].shape[0])
from testfm.evaluation.evaluator import Evaluator from pkg_resources import resource_filename from testfm.evaluation.parameter_tuning import ParameterTuning if __name__ == "__main__": eval = Evaluator( ) # Call this before loading the data to save memory (fork of process takes place) # Prepare the data df = pd.read_csv(resource_filename(testfm.__name__, 'data/movielenshead.dat'), sep="::", header=None, names=['user', 'item', 'rating', 'date', 'title']) print df.head() training, testing = testfm.split.holdoutByRandom(df, 0.9) print "Tuning the parameters." tr, validation = testfm.split.holdoutByRandom(training, 0.7) pt = ParameterTuning() pt.set_max_iterations(100) pt.set_z_value(90) tf_params = pt.get_best_params(TensorCoFi, tr, validation) print tf_params tf = TensorCoFi() tf.set_params(**tf_params) tf.fit(training) print tf.get_name().ljust(50), print eval.evaluate_model(tf, testing, all_items=training.item.unique())