config_with_json = True  # specify configuration with json files or not

    models_to_run = [
        #'RankMSE',
        #'RankNet',
        'LambdaRank',
        #'ListNet',
        #'ListMLE',
        #'RankCosine',
        #'ApproxNDCG',
        'WassRank',
        #'STListNet',
        #'LambdaLoss'
    ]

    evaluator = LTREvaluator(cuda=cuda)

    if config_with_json:  # specify configuration with json files
        # the directory of json files
        dir_json = '/Users/dryuhaitao/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
        #dir_json = '/Users/solar/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
        #dir_json = '/home/dl-box/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'

        for model_id in models_to_run:
            evaluator.run(debug=debug,
                          model_id=model_id,
                          config_with_json=config_with_json,
                          dir_json=dir_json)

    else:  # specify configuration manually
        ''' Selected dataset '''
Beispiel #2
0
    #dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella_X/'

    #data_id = 'Istella'
    #dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella/'

    #data_id = 'Istella_S'
    #dir_data = '/home/dl-box/WorkBench/Datasets/L2R/ISTELLA_L2R/Istella_S/'
    ''' output directory '''
    dir_output = '/Users/dryuhaitao/WorkBench/CodeBench/Bench_Output/NeuralLTR/Listwise/'
    #dir_output = '/home/dl-box/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/Listwise/'
    #dir_output = '/Users/solar/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/'

    debug = False  # in a debug mode, we just check whether the model can operate
    grid_search = False  # with grid_search, we can explore the effects of different hyper-parameters of a model

    evaluator = LTREvaluator()

    to_run_models = [
        #'RankMSE', 'RankNet',
        'RankNet',
        #'ListNet', 'ListMLE', 'RankCosine',
        #'ApproxNDCG',
        #'WassRank',
        #'STListNet', 'LambdaLoss'
    ]

    for model_id in to_run_models:
        evaluator.run(debug=debug,
                      grid_search=grid_search,
                      model_id=model_id,
                      data_id=data_id,
Beispiel #3
0
    | LETTOR    | MQ2007_Super %  MQ2008_Super %  MQ2007_Semi %  MQ2008_Semi                |
    -----------------------------------------------------------------------------------------
    | MSLRWEB   | MSLRWEB10K %  MSLRWEB30K                                                  |
    -----------------------------------------------------------------------------------------
    | Yahoo_LTR | Set1 % Set2                                                               |
    -----------------------------------------------------------------------------------------
    | ISTELLA_LTR | Istella_S | Istella | Istella_X                                         |
    -----------------------------------------------------------------------------------------

    """

    args_obj = ArgsUtil(given_root='./')
    l2r_args = args_obj.get_l2r_args()

    if l2r_args.model in LTR_ADHOC_MODEL:
        evaluator = LTREvaluator(cuda=l2r_args.cuda)

    elif l2r_args.model in LTR_ADVERSARIAL_MODEL:
        evaluator = AdLTREvaluator(cuda=l2r_args.cuda)

    elif l2r_args.model in LTR_TREE_MODEL:
        evaluator = TreeLTREvaluator()

    else:
        args_obj.args_parser.print_help()
        sys.exit()

    print('Started evaluation with pt_ranking !')
    evaluator.run(model_id=l2r_args.model,
                  dir_json=l2r_args.dir_json,
                  debug=l2r_args.debug,
Beispiel #4
0
    | MSLRWEB   | MSLRWEB10K %  MSLRWEB30K                                                  |
    -----------------------------------------------------------------------------------------
    | Yahoo_LTR | Set1 % Set2                                                               |
    -----------------------------------------------------------------------------------------
    | ISTELLA_LTR | Istella_S | Istella | Istella_X                                         |
    -----------------------------------------------------------------------------------------

    """

    print('Started PT_Ranking ...')

    args_obj = ArgsUtil(given_root='./')
    l2r_args = args_obj.get_l2r_args()

    if l2r_args.model in LTR_ADHOC_MODEL:
        evaluator = LTREvaluator(cuda=l2r_args.cuda)

    elif l2r_args.model in LTR_ADVERSARIAL_MODEL:
        evaluator = AdLTREvaluator(cuda=l2r_args.cuda)

    elif l2r_args.model in LTR_TREE_MODEL:
        evaluator = TreeLTREvaluator()
    else:
        raise NotImplementedError

    evaluator.run(debug=True,
                  model_id=l2r_args.model,
                  data_id=l2r_args.data_id,
                  dir_data=l2r_args.dir_data,
                  dir_output=l2r_args.dir_output,
                  grid_search=False)