예제 #1
0
def evaluate_model(model, item_valid_input, valid_labels, num_items, topK):
    """
    Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
    Return: hits score & NDCG score.
    """
    hits, ndcgs = [], []

    valid_id_input = to_categorical(item_valid_input, num_classes=num_items)
    valid_id_input = np.expand_dims(valid_id_input, axis=1)

    valid_item_feature_input = embedding_matrix[item_valid_input, :]
    valid_item_feature_input = np.expand_dims(valid_item_feature_input, axis=1)

    predictions = model.predict([valid_id_input, valid_item_feature_input],
                                batch_size=args.batch_size)
    #predictions = model.predict(valid_item_feature_input, batch_size=args.batch_size)
    #predictions = model.predict([np.array(user_valid_input), np.array(item_valid_input)], batch_size=args.batch_size, verbose=0)
    topk_ind = predictions.argsort()[:, ::-1][:, :topK]
    for item in zip(topk_ind, valid_labels):
        hr = ProjectUtility.getHitRatio(item[0], item[1])
        ndcg = ProjectUtility.getNDCG(item[0], item[1])
        hits.append(hr)
        ndcgs.append(ndcg)
    hits.append(hr)
    ndcgs.append(ndcg)
    return (hits, ndcgs)
예제 #2
0
def evaluate_model(model, user_valid_input, item_valid_input, valid_labels, num_items, topK):
    """
    Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
    Return: score of each batch.
    """ 
    print('\n# Evaluate on test data')
    slice_y_val = keras.utils.to_categorical(valid_labels, num_items)
    evaluate_loss = model.evaluate([np.array(user_valid_input), np.array(item_valid_input)],np.array(slice_y_val),batch_size=args.batch_size)
    hits, ndcgs = [],[]
    predictions = model.predict([np.array(user_valid_input), np.array(item_valid_input)], batch_size=args.batch_size, verbose=0)
    print(len(predictions))
    topk_ind = predictions.argsort()[:,::-1][:,:topK]
    for item in zip(topk_ind,valid_labels):
        hr = ProjectUtility.getHitRatio(item[0], item[1])
        ndcg = ProjectUtility.getNDCG(item[0], item[1])
        hits.append(hr)
        ndcgs.append(ndcg)
    hits.append(hr)
    ndcgs.append(ndcg)  
    return (hits, ndcgs, evaluate_loss)
def evaluate_model(model, user_valid_input, item_valid_input,
                   valid_item_feature_input, valid_labels, topK):
    """
    Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
    Return: score of each test rating.
    """
    hits, ndcgs = [], []
    predictions = model.predict(
        [user_valid_input, item_valid_input, valid_item_feature_input],
        batch_size=args.batch_size,
        verbose=0)
    topk_ind = predictions.argsort()[:, ::-1][:, :topK]
    for item in zip(topk_ind, valid_labels):
        hr = ProjectUtility.getHitRatio(item[0], item[1])
        ndcg = ProjectUtility.getNDCG(item[0], item[1])
        hits.append(hr)
        ndcgs.append(ndcg)
    hits.append(hr)
    ndcgs.append(ndcg)
    return (hits, ndcgs)
예제 #4
0
def evaluate_model(model, item_valid_input, valid_labels,num_items, topK):
    """
    Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
    Return: score of each test rating.
    """  
    hits, ndcgs = [],[]
    target_oh = to_categorical(valid_labels, num_classes=num_items)
    input_oh  = to_categorical(item_valid_input,  num_classes=num_items) 
    input_oh = np.expand_dims(input_oh, axis=1)

    predictions = model.predict(input_oh, batch_size=args.batch_size)
    #predictions = model.predict([np.array(user_valid_input), np.array(item_valid_input)], batch_size=args.batch_size, verbose=0)
    topk_ind = predictions.argsort()[:,::-1][:,:topK]
    for item in zip(topk_ind,valid_labels):
        hr = ProjectUtility.getHitRatio(item[0], item[1])
        ndcg = ProjectUtility.getNDCG(item[0], item[1])
        hits.append(hr)
        ndcgs.append(ndcg)
    hits.append(hr)
    ndcgs.append(ndcg)   
    return (hits, ndcgs)
예제 #5
0
    parser.add_argument('--valid_mask_path', type=str, default='data/preprocessed_data/valid_mask.dat')     
    parser.add_argument('--valid_target_path', type=str, default='data/preprocessed_data/valid_target.dat')    
    parser.add_argument('--item_dic_path', type=str, default='data/preprocessed_data/item_dic.txt')
    parser.add_argument('--visitor_dic_path', type=str, default='data/preprocessed_data/visitor_dic.txt')
    parser.add_argument('--item_features_path', type=str, default='data/preprocessed_data/item_features.csv')    
    parser.add_argument('--checkpoint_path', type=str, default='checkpoint/')      
    parser.add_argument('--batch_size', type=str, default=256)
    parser.add_argument('--slice_size', type=str, default=256)
    parser.add_argument('--topK', type=str, default=50)
    parser.add_argument('--epochs', type=str, default=100)
    parser.add_argument('--latent_dim', type=str, default=128)
    parser.add_argument('--learning_rate', type=str, default=0.1)
    parser.add_argument('--patience', type=str, default=5)    
    args = parser.parse_args()

    visitor_dic=ProjectUtility.readItemDic(args.base_path+args.visitor_dic_path)
    num_users=len(visitor_dic)

    item_dic=ProjectUtility.readItemDic(args.base_path+args.item_dic_path)
    num_items=len(item_dic)

    #read training input data
    with open(args.base_path+args.train_input_path, 'rb') as f:
        train_inputlist = pickle.load(f)
    train_inputlist = np.array(train_inputlist)     
    user_train_input=train_inputlist[:,0]
    item_train_input=train_inputlist[:,1]

    #read training input mask data
    with open(args.base_path+args.train_mask_path, 'rb') as f:
        train_mask_inputlist = pickle.load(f)    
예제 #6
0
                        type=str,
                        default='data/preprocessed_data/visitor_dic.txt')
    parser.add_argument('--item_features_path',
                        type=str,
                        default='data/preprocessed_data/item_features.csv')
    parser.add_argument('--checkpoint_path', type=str, default='checkpoint/')
    parser.add_argument('--batch_size', type=str, default=256)
    parser.add_argument('--slice_size', type=str, default=256)
    parser.add_argument('--topK', type=str, default=50)
    parser.add_argument('--epochs', type=str, default=100)
    parser.add_argument('--latent_dim', type=str, default=128)
    parser.add_argument('--learning_rate', type=str, default=0.1)
    parser.add_argument('--patience', type=str, default=5)
    args = parser.parse_args()

    visitor_dic = ProjectUtility.readItemDic(args.base_path +
                                             args.visitor_dic_path)
    num_users = len(visitor_dic)

    item_dic = ProjectUtility.readItemDic(args.base_path + args.item_dic_path)
    num_items = len(item_dic)

    #read training input data
    with open(args.base_path + args.train_input_path, 'rb') as f:
        train_inputlist = pickle.load(f)
    train_inputlist = np.array(train_inputlist)
    user_train_input = train_inputlist[:, 0]
    item_train_input = train_inputlist[:, 1]

    #read training input mask data
    with open(args.base_path + args.train_mask_path, 'rb') as f:
        train_mask_inputlist = pickle.load(f)