model_path=MODEL_DIR,
                               checkpoint=args.checkpoint,
                               lmbda=params["lambda"],
                               max_iter=params["max_iter"],
                               num_workers=params["num_workers"])

if params['loss'] == 'utility':
    print("utility loss")
    trainer.fit_utility_loss()
else:
    print("mse loss")
    trainer.fit()

users_test = X_test[:, 0].reshape(-1, 1)
items_test = X_test[:, 1].reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

preds = trainer.predict(users=users_test,
                        items=items_test,
                        y=y_test,
                        batch_size=TEST_BATCH_SIZE).reshape(-1, 1)

output = pd.DataFrame(np.concatenate((users_test, preds, y_test), axis=1),
                      columns=['user_id', 'pred', 'y_true'])

output, rmse, dcg = get_eval_metrics(output, at_k=params['eval_k'])

print("rmse: {:.4f}".format(rmse))
print("dcg: {:.4f}".format(dcg))

log_output(MODEL_DIR, MODEL_NAME, params, output=[rmse, dcg])
Exemple #2
0
items_test = X_test[:, 1].reshape(-1,1)
y_test = y_test.reshape(-1,1)


predictor = Predictor(model=model, batch_size=TEST_BATCH_SIZE, users=users_test, items=items_test, y=y_test,
                      use_cuda=args.cuda, n_items=stats["n_items"])



preds = predictor.predict().reshape(-1,1)


output = pd.DataFrame(np.concatenate((users_test, preds, y_test), axis=1),
                      columns = ['user_id', 'pred', 'y_true'])


if args.task == "choice":

    output, hit_ratio, ndcg = get_choice_eval_metrics(output, at_k=EVAL_K)

    print("hit ratio: {:.4f}".format(hit_ratio))
    print("ndcg: {:.4f}".format(ndcg))

else:

    output, rmse, dcg = get_eval_metrics(output, at_k=EVAL_K)

    print("rmse: {:.4f}".format(rmse))
    print("dcg: {:.4f}".format(dcg))