Exemplo n.º 1
0
def my_eval(eval_data_df, model):
    u_indices = eval_data_df[Constants.DEFAULT_USER_COL].to_numpy()
    i_indices = eval_data_df[Constants.DEFAULT_ITEM_COL].to_numpy()
    r_preds = np.fromiter(
        (model.score(user_idx, item_idx).item()
         for user_idx, item_idx in zip(u_indices, i_indices)),
        dtype=np.float,
        count=len(u_indices),
    )

    pred_df = pd.DataFrame({
        Constants.DEFAULT_USER_COL: u_indices,
        Constants.DEFAULT_ITEM_COL: i_indices,
        Constants.DEFAULT_PREDICTION_COL: r_preds,
    })

    result_dic = {}
    TOP_K = [5, 10, 20]
    if type(TOP_K) != list:
        TOP_K = [TOP_K]
    if 10 not in TOP_K:
        TOP_K.append(10)
    metrics = ["ndcg_at_k", "precision_at_k", "recall_at_k", "map_at_k"]

    for k in TOP_K:
        for metric in metrics:
            eval_metric = getattr(eval_model, metric)
            result = eval_metric(eval_data_df, pred_df, k=k)
            result_dic[metric + "@" + str(k)] = result
    result_dic.update(config)
    result_df = pd.DataFrame(result_dic, index=[0])
    save_to_csv(result_df, config["result_file"])
Exemplo n.º 2
0
def test_eval_worker(testEngine, eval_data_df, prediction):
    """Start a worker for the evaluation during training.

    Prediction and evaluation on the testing set.
    """
    result_para = {
        "run_time": [testEngine.config["run_time"]],
    }
    testEngine.n_worker += 1
    for cfg in ["model", "dataset"]:
        for col in testEngine.config[cfg]["result_col"]:
            result_para[col] = [testEngine.config[cfg][col]]

    test_result_dic = evaluate(eval_data_df, prediction, testEngine.metrics,
                               testEngine.k)
    print_dict_as_table(
        test_result_dic,
        tag="performance on test",
        columns=["metrics", "values"],
    )
    test_result_dic.update(result_para)
    lock_test_eval.acquire()  # need to be test
    result_df = pd.DataFrame(test_result_dic)
    save_to_csv(result_df, testEngine.config["system"]["result_file"])
    lock_test_eval.release()
    testEngine.n_worker -= 1
    return test_result_dic
Exemplo n.º 3
0
def test_eval_worker(testEngine, eval_data_df, prediction, k_li=[5, 10, 20]):
    """
    Prediction and evaluation on the testing set
    """
    result_para = {
        "model": [testEngine.config["model"]],
        "dataset": [testEngine.config["dataset"]],
        "data_split": [testEngine.config["data_split"]],
        "emb_dim": [int(testEngine.config["emb_dim"])],
        "lr": [testEngine.config["lr"]],
        "batch_size": [int(testEngine.config["batch_size"])],
        "optimizer": [testEngine.config["optimizer"]],
        "max_epoch": [testEngine.config["max_epoch"]],
        "model_run_id": [testEngine.config["model_run_id"]],
        "run_time": [testEngine.config["run_time"]],
    }
    if "late_dim" in testEngine.config:
        result_para["late_dim"] = [int(testEngine.config["late_dim"])]
    if "remark" in testEngine.config:
        result_para["remark"] = [testEngine.config["remark"]]
    if "alpha" in testEngine.config:
        result_para["alpha"] = [testEngine.config["alpha"]]
    if "activator" in testEngine.config:
        result_para["activator"] = [testEngine.config["activator"]]
    if "item_fea_type" in testEngine.config:
        result_para["item_fea_type"] = [testEngine.config["item_fea_type"]]
    if "n_sample" in testEngine.config:
        result_para["n_sample"] = [testEngine.config["n_sample"]]
    if "time_step" in testEngine.config:
        result_para["time_step"] = [testEngine.config["time_step"]]

    test_result_dic = evaluate(eval_data_df, prediction, testEngine.metrics,
                               k_li)
    print_dict_as_table(
        test_result_dic,
        tag=f"performance on test",
        columns=["metrics", "values"],
    )
    test_result_dic.update(result_para)
    lock_test_eval.acquire()  # need to be test
    result_df = pd.DataFrame(test_result_dic)
    save_to_csv(result_df, testEngine.config["result_file"])
    lock_test_eval.release()
    return test_result_dic