示例#1
0
    def get_performance(user_pos_test, r, auc, Ks):
        '''

        :param user_pos_test:    user 测试集中真实交互的item
        :param r:                r = [1,0,1] 表示预测TOP-K是否命中
        :param auc:              auc =0 标量
        :param Ks:               TOP-K
        :return:
        '''
        precision, recall, ndcg, hit_ratio, MAP = [], [], [], [], []

        for K in Ks:
            precision.append(metrics.precision_at_k(r, K))
            recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
            ndcg.append(metrics.ndcg_at_k(r, K))
            hit_ratio.append(metrics.hit_at_k(r, K))
            MAP.append(metrics.AP_at_k(r, K, len(user_pos_test)))

        return {'recall': np.array(recall), 'precision': np.array(precision), 'ndcg': np.array(ndcg),
                'hit_ratio': np.array(hit_ratio), 'MAP': np.array(MAP), 'auc': auc}
            preds[u] = list(top_n)
        # get actual interaction info. of test users
        test_ur = defaultdict(list)
        for u in test_set.user.unique():
            test_ur[u] = test_set.loc[test_set.user == u,
                                      'item'].values.tolist()
        for u in preds.keys():
            preds[u] = [1 if e in test_ur[u] else 0 for e in preds[u]]

        # calculate metrics
        precision_k = np.mean(
            [precision_at_k(r, args.topk) for r in preds.values()])
        fnl_precision.append(precision_k)

        recall_k = np.mean([
            recall_at_k(r, len(test_ur[u]), args.topk)
            for u, r in preds.items()
        ])
        fnl_recall.append(recall_k)

        map_k = map_at_k(list(preds.values()))
        fnl_map.append(map_k)

        ndcg_k = np.mean([ndcg_at_k(r, args.topk) for r in preds.values()])
        fnl_ndcg.append(ndcg_k)

        hr_k = hr_at_k(list(preds.values()), list(preds.keys()), test_ur)
        fnl_hr.append(hr_k)

        mrr_k = mrr_at_k(list(preds.values()))
        fnl_mrr.append(mrr_k)
示例#3
0
                actual_cands = set(candidates[u])
                neg_item_pool = set(item_pool) - set(test_ur[u]) - set(ur[u])
                neg_cands = random.sample(neg_item_pool, max_i_num - len(candidates[u])) 
                cands = actual_cands | set(neg_cands)
            else:
                cands = random.sample(candidates[u], max_i_num)
            pred_rates = algo.user_vec[u, :].dot(algo.item_vec).toarray()[0, list(cands)]
            rec_idx = np.argsort(pred_rates)[::-1][:args.topk]
            preds[u] = list(np.array(list(cands))[rec_idx])
        for u in preds.keys():
            preds[u] = [1 if i in test_ur[u] else 0 for i in preds[u]]
    
        precision_k = np.mean([precision_at_k(r, args.topk) for r in preds.values()])
        fnl_precision.append(precision_k)

        recall_k = np.mean([recall_at_k(r, len(test_ur[u]), args.topk) for u, r in preds.items()])
        fnl_recall.append(recall_k)

        map_k = map_at_k(list(preds.values()))
        fnl_map.append(map_k)

        ndcg_k = np.mean([ndcg_at_k(r, args.topk) for r in preds.values()])
        fnl_ndcg.append(ndcg_k)

        hr_k = hr_at_k(list(preds.values()), list(preds.keys()), test_ur)
        fnl_hr.append(hr_k)

        mrr_k = mrr_at_k(list(preds.values()))
        fnl_mrr.append(mrr_k)

    for i in range(len(val_kpi)):
示例#4
0
        # get top-N list for test users
        preds = reco.predict(test_set)
        # get actual interaction info. of test users
        test_ur = defaultdict(list)
        for u in test_set.user.unique():
            test_ur[u] = test_set.loc[test_set.user == u,
                                      'item'].values.tolist()
        for u in preds.keys():
            preds[u] = [1 if e in test_ur[u] else 0 for e in preds[u]]

        # calculate metrics
        precision_k = np.mean([precision_at_k(r, k) for r in preds.values()])
        fnl_precision.append(precision_k)

        recall_k = np.mean(
            [recall_at_k(r, len(test_ur[u]), k) for u, r in preds.items()])
        fnl_recall.append(recall_k)

        map_k = map_at_k(list(preds.values()))
        fnl_map.append(map_k)

        ndcg_k = np.mean([ndcg_at_k(r, k) for r in preds.values()])
        fnl_ndcg.append(ndcg_k)

        hr_k = hr_at_k(list(preds.values()), list(preds.keys()), test_ur)
        fnl_hr.append(hr_k)

        mrr_k = mrr_at_k(list(preds.values()))
        fnl_mrr.append(mrr_k)

    print('---------------------------------')
示例#5
0
            top_n = np.array(test_u_is[u])[rec_idx]
            preds[u] = list(top_n)
        # get actual interaction info. of test users
        ur = defaultdict(list)
        for u in test_set.user.unique():
            ur[u] = test_set.loc[test_set.user == u, 'item'].values.tolist()
        for u in preds.keys():
            preds[u] = [1 if e in ur[u] else 0 for e in preds[u]]

        # calculate metrics
        precision_k = np.mean(
            [precision_at_k(r, args.topk) for r in preds.values()])
        fnl_precision.append(precision_k)

        recall_k = np.mean(
            [recall_at_k(r, len(ur[u]), args.topk) for u, r in preds.items()])
        fnl_recall.append(recall_k)

        map_k = map_at_k(list(preds.values()))
        fnl_map.append(map_k)

        ndcg_k = np.mean([ndcg_at_k(r, args.topk) for r in preds.values()])
        fnl_ndcg.append(ndcg_k)

        hr_k = hr_at_k(list(preds.values()), list(preds.keys()), ur)
        fnl_hr.append(hr_k)

        mrr_k = mrr_at_k(list(preds.values()))
        fnl_mrr.append(mrr_k)

        gc.collect()