コード例 #1
0
def test_Model(U, V):
    # test the precision
    k_num = len(top_k)
    # k_num-length list to record F1 and NDCG
    F1 = np.zeros(k_num)
    NDCG = np.zeros(k_num)

    # test all test samples
    user_num = 0
    for u in range(M):
        # the data in test set is [[i, i, i, i],...,[i, i]]
        test_item = Test[u]
        if len(test_item) > 0:
            user_num += 1
            # score all items
            score = np.dot(V, U[u])
            # order
            b = zip(score, range(N))
            b.sort(key=lambda x: x[0])
            order = [x[1] for x in b]
            order.reverse()
            # remove the training samples from the recommendations
            train_positive = train_data_aux[u]
            for item in train_positive:
                order.remove(item)

            # for each k, calculate top_k
            for i in range(len(top_k)):
                F1[i] += evaluation_F1(order, top_k[i], test_item)
                NDCG[i] += evaluation_NDCG(order, top_k[i], test_item)
    # calculate the average
    F1 = (F1 / user_num).tolist()
    NDCG = (NDCG / user_num).tolist()

    return F1, NDCG
コード例 #2
0
def test_DCFA(U, Vu, Vt, T, M, N, F):
    # test the effectiveness
    U = mat(U)
    Vu = mat(Vu)
    Vt = mat(Vt)
    T = mat(T)
    F = mat(F)
    M = mat(M)
    N = mat(N)
    k_num = len(top_k)
    # k_num-long lists to record F1 and NDCG
    F1 = np.zeros(k_num)
    NDCG = np.zeros(k_num)
    num_item = len(Test)

    # choose batch_size_test test samples randomly
    for i in range(batch_size_test):
        j = int(math.floor(num_item * random.random()))
        # test data: [u, [i, i, i, i], [r, r, r]], where u, i, r are for user, item, time, respectively
        u = Test[j][0]
        test_item = Test[j][1]
        # score for all users
        Order = []
        for r in Test[j][2]:
            # for each r, score all items
            UV = U[u] * Vu.T + M[u] * F.T
            VT = T[r] * Vt.T + N[r] * F.T
            UV = np.array(UV.tolist()[0])
            VT = np.array(VT.tolist()[0])
            score = (UV * VT).tolist()
            # order
            b = zip(score, range(len(score)))
            b.sort(key=lambda x: x[0])
            order = [x[1] for x in b]
            order.reverse()
            Order.append(order)
        # train samples
        train_positive = train_data_aux[u][0]
        # we have len(train_data_aux[u][1]) k-length recommendation lists for each user,
        # to compare fairly with other baselines, we combine len(train_data_aux[u][1]) k-length list to one k-length lists for each user
        # we will remove at most len(train_positive) train samples from order, so we return k+len(train_positive) items
        order = get_order(Order, top_k[-1] + len(train_positive))
        # remove the train samples from the recommendations
        for item in train_positive:
            try:
                order.remove(item)
            except:
                continue
        # we also remove the train samples from test samples
        test_item = list(set(test_item) - set(train_positive))
        # test F1 and NDCG for each k
        for i in range(len(top_k)):
            F1[i] += evaluation_F1(order, top_k[i], test_item)
            NDCG[i] += evaluation_NDCG(order, top_k[i], test_item)
    # calculate the average
    F1 = (F1 / batch_size_test).tolist()
    NDCG = (NDCG / batch_size_test).tolist()
    return F1, NDCG