def train(data, user_features=None, item_features=None, use_features=False): loss_type = "warp" # "bpr" model = LightFM(learning_rate=0.05, loss=loss_type, max_sampled=100) if use_features: model.fit_partial(data, epochs=20, user_features=friends_features, item_features=item_features) train_precision = precision_at_k(model, data, k=10, user_features=friends_features, item_features=item_features).mean() train_auc = auc_score(model, data, user_features=friends_features, item_features=item_features).mean() print(f'Precision: train {train_precision:.2f}') print(f'AUC: train {train_auc:.2f}') else: model.fit_partial(data, epochs=20) train_precision = precision_at_k(model, data, k=10).mean() train_auc = auc_score(model, data).mean() print(f'Precision: train {train_precision:.2f}') print(f'AUC: train {train_auc:.2f}') return model
def evaluate_fm(model_, te_, tr_, items_features=None, users_features=None): if not tr_.multiply(te_).nnz == 0: print('train test interaction are not fully disjoin') # Compute and print the AUC score train_auc = auc_score(model_, tr_, item_features=items_features, user_features=users_features, num_threads=NUM_THREADS).mean() print('Collaborative filtering train AUC: %s' % train_auc) test_auc = auc_score(model_, te_, train_interactions=tr_, item_features=items_features, user_features=users_features, num_threads=NUM_THREADS).mean() print('Collaborative filtering test AUC: %s' % test_auc) p_at_k_train = precision_at_k(model_, tr_, item_features=items_features, user_features=users_features, k=5, num_threads=NUM_THREADS).mean() p_at_k_test = precision_at_k(model_, te_, train_interactions=tr_, item_features=items_features, user_features=users_features, k=5, num_threads=NUM_THREADS).mean() print("Train precision: %.2f" % p_at_k_train) print("Test precision: %.2f" % p_at_k_test)
def main(): movielens = fetch_movielens() train = movielens['train'] print(type(train)) print(train.toarray()[:5, :]) test = movielens['test'] print(type(test)) print(test.toarray()[:5, :]) model = LightFM(learning_rate=0.05, loss='bpr') model.fit(train, epochs=10) train_precision = precision_at_k(model, train, k=10).mean() test_precision = precision_at_k(model, test, k=10, train_interactions=train).mean() train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test, train_interactions=train).mean() print(f'train precision: {train_precision}') print(f'test precision: {test_precision}') print(f'train auc: {train_auc}') print(f'test auc: {test_auc}') print('DONE')
def evaluate(model, train, test, hybrid=False, features=None): if hybrid: auc_train = np.mean(auc_score(model, train, item_features=features)) pre_train = np.mean(precision_at_k(model, train, item_features=features)) mrr_train = np.mean(reciprocal_rank(model, train, item_features=features)) auc_test = np.mean(auc_score(model, test, item_features=features)) pre_test = np.mean(precision_at_k(model, test, item_features=features)) mrr_test = np.mean(reciprocal_rank(model, test, item_features=features)) else: auc_train = np.mean(auc_score(model, train)) pre_train = np.mean(precision_at_k(model, train)) mrr_train = np.mean(reciprocal_rank(model, train)) auc_test = np.mean(auc_score(model, test)) pre_test = np.mean(precision_at_k(model, test)) mrr_test = np.mean(reciprocal_rank(model, test)) res_dict = {'auc_train': auc_train, 'pre_train': pre_train, 'mrr_train': mrr_train, 'auc_test': auc_test, 'pre_test': pre_test, 'mrr_test': mrr_test} print('The AUC Score is in training/validation: ', auc_train,' / ', auc_test) print('The mean precision at k Score in training/validation is: ', pre_train, ' / ', pre_test) print('The mean reciprocal rank in training/validation is: ', mrr_train, ' / ', mrr_test) print('_________________________________________________________') return res_dict
def lightfm_model(data, prec_at_k=100, train_split=0.8, epochs=10): """ Code to evaluate LightFm model Data is a scipy sparse matrix https://arxiv.org/abs/1507.08439 """ model = LightFM(learning_rate=0.05, loss='logistic') train, test = random_train_test_split(data, test_percentage=1 - train_split) model.fit(train, epochs=epochs) #, num_threads=1) train_precision = precision_at_k(model, train, k=prec_at_k) test_precision = precision_at_k(model, test, k=prec_at_k, train_interactions=train) train_auc = auc_score(model, train) test_auc = auc_score(model, test, train_interactions=train) print('Performance of LightFm Model \n') print( f'Precision \t Train: {train_precision.mean():.2f} \t Test: {test_precision.mean():.2f}' ) print( f'AUC \t\t Train: {train_auc.mean():.2f} \t Test: {test_auc.mean():.2f}' ) return (train_auc, test_auc, train_precision, test_precision, prec_at_k)
def handler(context): data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp') epochs = 50 for epoch in range(1, epochs + 1): print('Epoch: {}'.format(epoch)) model.fit_partial(data['train'], epochs=1, num_threads=1) train_acc = precision_at_k(model, data['train'], k=5).mean() test_acc = precision_at_k(model, data['test'], k=5).mean() print("Train precision: {}".format(train_acc)) print("Test precision: {}".format(test_acc)) statistics = ABEJAStatistics(num_epochs=epochs, epoch=epoch) statistics.add_stage(ABEJAStatistics.STAGE_TRAIN, float(train_acc), None) statistics.add_stage(ABEJAStatistics.STAGE_VALIDATION, float(test_acc), None) try: client.update_statistics(statistics) except Exception: pass np.save(os.path.join(ABEJA_TRAINING_RESULT_DIR, 'model.npy'), model.__dict__)
def evaluate(self, model, user_items_train): print("Splitting the data into train/test set...\n") train, test = cross_validation.random_train_test_split( user_items_train) print(train, test) print("Evaluating methods...\n") train_recall_10 = recall_at_k(model, train, k=10).mean() test_recall_10 = recall_at_k(model, test, k=10).mean() train_recall_20 = recall_at_k(model, train, k=20).mean() test_recall_20 = recall_at_k(model, test, k=20).mean() train_precision_10 = precision_at_k(model, train, k=10).mean() test_precision_10 = precision_at_k(model, test, k=10).mean() train_precision_20 = precision_at_k(model, train, k=20).mean() test_precision_20 = precision_at_k(model, test, k=20).mean() print("Train : Recall@10:{0:.3f}, Recall@20:{1:.3f}".format( train_recall_10, train_recall_20)) print("Test : Recall@10:{0:.3f}, Recall@20:{1:.3f}".format( test_recall_10, test_recall_20)) print("Train: Precision@10:{0:.3f}, Precision@20:{1:.3f}".format( train_precision_10, train_precision_20)) print("Test: Precision@10:{0:.3f}, Precision@20:{1:.3f}".format( test_precision_10, test_precision_20))
def train(): # import data data = fetch_movielens(min_rating=5.0) # https://lyst.github.io/lightfm/docs/datasets.html # min_rating (float, optional) – Minimum rating to include in the interaction matrix. # set model super parameter alpha = 1e-05 epochs = 70 num_components = 32 # https://lyst.github.io/lightfm/docs/lightfm.html # learning_schedule (string, optional) – one of (‘adagrad’, ‘adadelta’). warp_model = LightFM(no_components=num_components, loss='warp', learning_schedule='adagrad', max_sampled=100, user_alpha=alpha, item_alpha=alpha) # fitting data warp_model.fit(data['train'], epochs=30, num_threads=2) # precision on train, test data print("Train precision: %.2f" % precision_at_k(warp_model, data['train'], k=5).mean()) print("Test precision: %.2f" % precision_at_k(warp_model, data['test'], k=5).mean()) model = warp_model return model, data
def main(): movielens = fetch_movielens() train = movielens['train'] test = movielens['test'] print(train.shape) print(test.shape) model = LightFM(learning_rate=0.05, loss='bpr') model.fit(train, epochs=10) k = 10 train_precision = precision_at_k(model, train, k=k).mean() test_precision = precision_at_k(model, test, k=k).mean() print(f'precision_at_{k}(train): {train_precision}') print(f'precision_at_{k}(test) : {test_precision}') train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test).mean() print(f'auc_score(train): {train_auc}') print(f'auc_score(test) : {test_auc}') y_train_preds = model.predict_rank(train) y_test_preds = model.predict_rank(test) train_ndcg = ndcg_score(train.toarray(), y_train_preds.toarray()) test_ndcg = ndcg_score(test.toarray(), y_test_preds.toarray()) print(f'ndcg_score(train): {train_ndcg}') print(f'ndcg_score(test) : {test_ndcg}') print('DONE') return 0
def tune(): # import data data = fetch_movielens(min_rating=5.0) alpha_ = [1e-05, 1.5e-05, 2e-05] epochs_ = [50, 60, 70] num_components_ = [30, 32, 34] for alpha in alpha_: for epochs in epochs_: for num_components in num_components_: warp_model = LightFM(no_components=num_components, loss='warp', learning_schedule='adagrad', max_sampled=100, user_alpha=alpha, item_alpha=alpha) warp_model.fit(data['train'], epochs=epochs, num_threads=2) print ('alpha = ', alpha, \ 'epochs = ', epochs, \ 'num_components = ', num_components ) print("Train precision: %.2f" % precision_at_k(warp_model, data['train'], k=5).mean()) print("Test precision: %.2f" % precision_at_k(warp_model, data['test'], k=5).mean())
def test_precision_at_k(): no_users, no_items = (10, 100) train, test = _generate_data(no_users, no_items) model = LightFM(loss='bpr') model.fit_partial(train) k = 10 # Without omitting train interactions precision = evaluation.precision_at_k(model, test, k=k) expected_mean_precision = _precision_at_k(model, test, k) assert np.allclose(precision.mean(), expected_mean_precision) assert len(precision) == (test.getnnz(axis=1) > 0).sum() assert len(evaluation.precision_at_k(model, train, preserve_rows=True)) == test.shape[0] # With omitting train interactions precision = evaluation.precision_at_k(model, test, k=k, train_interactions=train) expected_mean_precision = _precision_at_k(model, test, k, train=train) assert np.allclose(precision.mean(), expected_mean_precision)
def evaluate(self, model, train, test, k=10): train_precision = precision_at_k(model, train, k=k).mean() test_precision = precision_at_k(model, test, k=k).mean() train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test).mean() return train_precision, test_precision, train_auc, test_auc
def lightfm_model(data, prec_at_k=10, train_split=0.8): """ Code to evaluate LightFm model Data is a scipy sparse matrix https://arxiv.org/abs/1507.08439 """ model = LightFM(learning_rate=0.05, loss='bpr') train, test = random_train_test_split(data, test_percentage=1 - train_split) model.fit(train, epochs=10) train_precision = precision_at_k(model, train, k=10) test_precision = precision_at_k(model, test, k=10, train_interactions=train) train_auc = auc_score(model, train) test_auc = auc_score(model, test, train_interactions=train) print('Performance of LightFm Model \n') print( f'Precision \t Train: {train_precision.mean():.2f} \t Test: {test_precision.mean():.2f}' ) print( f'AUC \t\t Train: {train_auc.mean():.2f} \t Test: {test_auc.mean():.2f}' ) fig, ax = plt.subplots(2, 2, figsize=(15, 10)) ax[0, 0].hist(train_auc, bins='auto') ax[0, 0].title.set_text('Distribution of Train AUC score over users') ax[0, 0].set_ylabel('Count') ax[0, 0].set_xlabel('AUC Score') ax[0, 1].hist(test_auc, bins='auto') ax[0, 1].title.set_text('Distribution of Test AUC score over users') ax[0, 1].set_ylabel('Count') ax[0, 1].set_xlabel('AUC Score') ax[1, 0].hist(train_precision, bins='auto') ax[1, 0].title.set_text( f'Distribution of Train Precision @ {prec_at_k} for all users') ax[1, 0].set_ylabel('Count') ax[1, 0].set_xlabel(f'Precision @ {prec_at_k}') ax[1, 1].hist(test_precision, bins='auto') ax[1, 1].title.set_text( f'Distribution of Test Precision @ {prec_at_k} for all users') ax[1, 1].set_ylabel('Count') ax[1, 1].set_xlabel(f'Precision @ {prec_at_k}') plt.show() print('\n')
def collab_filtering(): """ implements collaborative filtering version by using only the rating data from movielens dataset :return: """ data = fetch_movielens() for key, value in data.items(): print(key, type(value), value.shape) train = data['train'] test = data['test'] print( 'The dataset has %s users and %s items, ' 'with %s interactions in the test and %s interactions in the training set.' % (train.shape[0], train.shape[1], test.getnnz(), train.getnnz())) model = LightFM(learning_rate=0.05, loss='bpr') model.fit(train, epochs=50, num_threads=5) train_precision = precision_at_k(model, train, k=10).mean() test_precision = precision_at_k(model, test, k=10).mean() train_recall = recall_at_k(model, test, k=10).mean() test_recall = recall_at_k(model, test, k=10).mean() train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test).mean() print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision)) print('Recall: train %.2f, test %.2f.' % (train_recall, test_recall)) print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc)) model = LightFM(learning_rate=0.05, loss='warp') #resume training from the model's previous state model.fit_partial(train, epochs=50, num_threads=5) train_precision = precision_at_k(model, train, k=10).mean() test_precision = precision_at_k(model, test, k=10).mean() train_recall = recall_at_k(model, test, k=10).mean() test_recall = recall_at_k(model, test, k=10).mean() train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test).mean() print("*****************") print("After re-training") print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision)) print('Recall: train %.2f, test %.2f.' % (train_recall, test_recall)) print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc)) #check sample recommendation sample_recommendation(model, data, [3, 25, 450])
def eval(model, train, val): # auc print("Train auc: %.2f" % auc_score(model, train).mean()) print("Val auc: %.2f" % auc_score(model, val).mean()) # precision_at_k print("Train precision: %.2f" % precision_at_k(model, train, k=5).mean()) print("Val precision: %.2f" % precision_at_k(model, val, k=5).mean()) # recall_at_k print("Train recall: %.2f" % precision_at_k(model, train, k=5).mean()) print("Val recall: %.2f" % precision_at_k(model, val, k=5).mean())
def patk_learning_curve(model, train, test, iterarray, user_features=None, item_features=None, k=5, **fit_params): old_epoch = 0 train_patk = [] test_patk = [] warp_duration = [] # bpr_duration = [] train_warp_auc = [] test_warp_auc = [] # bpr_auc = [] headers = ['Epoch', 'train p@5', 'train_auc', 'test p@5', 'test_auc'] print_log(headers, header=True) for epoch in iterarray: more = epoch - old_epoch start = time.time() model.fit_partial(train, user_features=user_features, epochs=more, item_features=item_features, **fit_params) warp_duration.append(time.time() - start) train_warp_auc.append( auc_score(model, train, item_features=item_features).mean()) test_warp_auc.append( auc_score(model, test, item_features=item_features, train_interactions=train).mean()) this_test = precision_at_k(model, test, train_interactions=train, item_features=item_features, k=k) this_train = precision_at_k(model, train, train_interactions=None, item_features=item_features, k=k) train_patk.append(np.mean(this_train)) test_patk.append(np.mean(this_test)) row = [ epoch, train_patk[-1], train_warp_auc[-1], test_patk[-1], test_warp_auc[-1] ] print_log(row) return model, train_patk, test_patk, warp_duration, train_warp_auc, test_warp_auc
def evaluate_model(model, train, test, item_fetures=None, user_features=None, num_threads=1): train_precision = precision_at_k(model, train, k=10, user_features=user_features, item_features=item_fetures, num_threads=num_threads).mean() test_precision = precision_at_k(model, test, train_interactions=train, k=10, user_features=user_features, item_features=item_fetures, num_threads=num_threads).mean() train_auc = auc_score(model, train, user_features=user_features, item_features=item_fetures, num_threads=num_threads).mean() test_auc = auc_score(model, test, train_interactions=train, user_features=user_features, item_features=item_fetures, num_threads=num_threads).mean() print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision)) print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc)) return train_precision, test_precision, train_auc, test_auc
def measure_accuracies(model, data): print("\nMeasuring accuracies of the model...") # evaluate the precision@k metric training_precision = precision_at_k(model, data["train"], k=PRECISION_K).mean() test_precision = precision_at_k(model, data["test"], k=PRECISION_K).mean() # evaluate the AUROC metric training_auc = auc_score(model, data["train"]).mean() test_auc = auc_score(model, data["test"]).mean() # print them out print("Precision@k: training %.2f, test %.2f" % (training_precision, test_precision)) print("AUC: training %.2f, test %.2f" % (training_auc, test_auc))
def _get_metrics(model, train_set, test_set): train_set = train_set.tocsr() test_set = test_set.tocsr() train_set.data[train_set.data < 0] = 0.0 test_set.data[test_set.data < 0] = 0.0 train_set.eliminate_zeros() test_set.eliminate_zeros() return (precision_at_k(model, train_set).mean(), precision_at_k(model, test_set).mean(), auc_score(model, train_set).mean(), auc_score(model, test_set).mean())
def evaluate_model(model, metric, test, train): """ Evaluate trained model on the test set, using one of the three available accuracy metrics AUC: the probability that a randomly chosen positive example has a higher score than a randomly chosen negative example. Precision: the fraction of known positives in the first k positions of the ranked list of results. Recall: the number of positive items in the first k positions of the ranked list of results divided by the number of positive items in the test period. :param model:(LightFM, required) - model to be evaluated :param metric:(string, required) - accuracy metric to be used, one of ['auc', 'precision', 'recall'] :param test:(COO matrix, required) - known positives used to test the model :param train:(COO matrix, required) - training set; these interactions will be omitted from the score calculations to avoid re-recommending known positives. :return: test_score (float) - score computed on the test set """ try: # make sure the metric is correct assert metric in ['auc', 'precision', 'recall'] if metric == 'auc': test_score = auc_score(model, test, train).mean() elif metric == 'precision': test_score = precision_at_k(model, test, train, k=5).mean() else: test_score = recall_at_k(model, test, train, k=5).mean() return test_score except AssertionError: print('The metric provided is not correct or available!')
def best_reccomendation(): #define variables best = 0.0 best_model = '' for model in models: score = 0.0 pak_score = evaluation.precision_at_k(model, data2['test']) score += np.mean(pak_score) rak_score = evaluation.recall_at_k(model, data2['test']) score += np.mean(rak_score) auc_score = evaluation.auc_score(model, data2['test']) score += np.mean(auc_score) rr_score = evaluation.reciprocal_rank(model, data2['test']) score += np.mean(rr_score) print(score) if score >= best: best = score best_model = model return best_model
def test_LightFM_model(model, test_interactions, train_interactions, user_features, movie_features, k=5): test_precision = precision_at_k(model, test_interactions, train_interactions, k=k, user_features=user_features, item_features=movie_features, num_threads=2).mean() test_recall = recall_at_k(model, test_interactions, train_interactions, k=k, user_features=user_features, item_features=movie_features, num_threads=2).mean() test_auc = auc_score(model, test_interactions, train_interactions, user_features=user_features, item_features=movie_features, num_threads=2).mean() print('Model') print('Precision at k=', str(k), ': ', round(test_precision, 3), sep='') print('Recall at k=', str(k) + ': ', round(test_recall, 3), sep='') print('AUC: ', round(test_auc, 3), sep='') return ({ 'precision': round(test_precision, 3), 'recall': round(test_recall, 3), 'auc': round(test_auc, 3) })
def objective(self, params): epochs, learning_rate, no_components, alpha = params user_alpha = alpha item_alpha = alpha model = LightFM(loss='warp', random_state=2016, learning_rate=learning_rate, no_components=no_components, user_alpha=user_alpha, item_alpha=item_alpha) model.fit(self.train_test_user[0], epochs=epochs, num_threads=4, verbose=True) patks = evaluation.precision_at_k(model, self.train_test_user[1], train_interactions=None, k=5, num_threads=4) print("running hyperparmeter.." + datetime.datetime.now().strftime("%Y-%M-%d %H:%m")) mapatk = np.mean(patks) # Make negative because we want to _minimize_ objective out = -mapatk # Handle some weird numerical shit going on if np.abs(out + 1) < 0.01 or out < -1.0: return 0.0 else: return out
def run_BPR(split, user_item, prec_rec_at): # train_sparse = scipy.sparse.csr_matrix(train_splt.values) # test_sparse = scipy.sparse.csr_matrix(test_split.values) train = user_item test = user_item * 0 for index, row in split.iterrows(): test['u' + str(row['userId'])]['m' + str(row['movieId'])] = row['rating'] train['u' + str(row['userId'])]['m' + str(row['movieId'])] = 0 train_sparse = scipy.sparse.csr_matrix(train.values) test_sparse = scipy.sparse.csr_matrix(test.values) model = LightFM(loss='bpr') model.fit(train_sparse, epochs=30, num_threads=4) # print("####################################################################################") # print("Test precision: %.2f" % precision_at_k(model, test_sparse, k=prec_rec_at).mean()) # print("Test recall: %.2f" % recall_at_k(model, test_sparse, k=prec_rec_at).mean()) # print("####################################################################################") precision = precision_at_k(model, test_sparse, k=prec_rec_at).mean() recall = recall_at_k(model, test_sparse, k=prec_rec_at).mean() return precision, recall
def objective(params): no_components, learning_rate = params global model model = LightFM(no_components=no_components, learning_schedule='adagrad', loss='warp', learning_rate=learning_rate, random_state=0) model.fit(interactions=train, item_features=item_features, sample_weight=train_weights, epochs=3, verbose=False) test_precision = precision_at_k(model, test, k=5, item_features=item_features).mean() print("no_comp: {}, lrn_rate: {:.5f}, precision: {:.5f}".format( no_components, learning_rate, test_precision)) # test_auc = auc_score(model, test, item_features=item_features).mean() output = -test_precision if np.abs(output+1) < 0.01 or output < -1.0: output = 0.0 return output
def train (self, interactions, test_percentage=0.25, n_components=30, learning_rate = 0.5, loss='warp', model_k=15, n_jobs = 4, epoch=30, evaluate_k = 50): from lightfm.evaluation import precision_at_k from lightfm.evaluation import recall_at_k from lightfm.cross_validation import random_train_test_split train, test = random_train_test_split(interactions, test_percentage=test_percentage, random_state=None) mf_model = self.runMF(interactions = train, n_components = n_components, learning_rate = learning_rate, loss = loss, k = model_k, epoch = epoch, n_jobs = n_jobs) precise = precision_at_k(mf_model, test_interactions = test, k = evaluate_k) recall = recall_at_k(mf_model, test_interactions = test, k = evaluate_k) precise_test = precise.mean() recall_test = recall.mean() return mf_model, precise_test, recall_test
def score(self, X, y, **kwargs): test = to_sparse_matrix(X[:, 0], X[:, 1], y, self.shape) return precision_at_k(self.model, test, train_interactions=self.train, k=10).mean()
def resultados_colaborativo(self): """ Método resultados_colaboraivo. Obtiene los resultados del modelo colaborativo. Este método solo se utiliza en la interfaz de texto. """ global train, test, modelo # Se obtienen los resultados precision = precision_at_k(modelo, test, train_interactions=train, k=10, num_threads=self.CPU_THREADS).mean() auc = auc_score(modelo, test, train_interactions=train, num_threads=self.CPU_THREADS).mean() recall = recall_at_k(modelo, test, train_interactions=train, k=10, num_threads=self.CPU_THREADS).mean() reciprocal = reciprocal_rank(modelo, test, train_interactions=train, num_threads=self.CPU_THREADS).mean() # Se imprimen los resultados imprimir_resultados_clasico(precision, auc, recall, reciprocal)
def main(): # find optimal learning rate and rank bounds = [(10**-4, 1.0, 'log-uniform'),(10**-6, 10**-1, 'log-uniform')] opt_params = forest_minimize(optimal, bounds, verbose=True) opt_lr,opt_rank=opt_params.x[0],opt_params.x[1] # times and precisions for 3 data sets with data_1_train,_,data_1_test = read_train_data("0.1_percent") data_2_train,_, data_2_test = read_train_data("0.5_percent") data_3_train,_, data_3_test = read_train_data("1_percent") dataset= [[data_1_train,data_1_test],[data_2_train,data_2_test],[data_3_train,data_3_test]] times =[] precisions=[] for data in dataset: train,test = sparse_train_test(data[0],data[1]) start = time.time() model = LightFM(loss='warp', learning_rate=opt_lr,no_components=opt_rank) model.fit(train, epochs=10, verbose=True) precision = precision_at_k(model, test, k=50).mean() sec = (time.time() - start)/60 print("TIME:",sec,"PREC:", precision) times.append(sec) precisions.append(np.mean(precision)) print("times:", times, "precisions", precisions)
def experiment(type_exp): global train, test model = LightFM(learning_rate=0.05, loss=type_exp) model.fit(train, epochs=10) train_precision = precision_at_k(model, train, k=10).mean() test_precision = precision_at_k(model, test, k=10).mean() train_auc = auc_score(model, train).mean() test_auc = auc_score(model, test).mean() output = open("mid100mostRaiting5.txt", 'w') output.write("La presicion del training set es: " + str(train_precision) + "\n") output.write("La presicion del test set es: " + str(test_precision) + "\n") output.write("El auc del training set es: " + str(train_auc) + "\n") output.write("El auc del test set es: " + str(test_auc) + "\n") output.close()