]

    evaluator = AdLTREvaluator(cuda=cuda)

    if config_with_json:  # specify configuration with json files
        # the directory of json files
        #dir_json = '/home/dl-box/WorkBench/ExperimentBench/ALTR/ecir2021/irgan/mq2008_json/'
        dir_json = '/home/dl-box/WorkBench/ExperimentBench/ALTR/ecir2021/irgan/ms30k_json/'

        #dir_json = '/home/dl-box/WorkBench/ExperimentBench/ALTR/ecir2021/irfgan/mq2008_json/'
        dir_json = '/home/dl-box/WorkBench/ExperimentBench/ALTR/ecir2021/irfgan/ms30k_json/'

        #dir_json = '/Users/dryuhaitao/WorkBench/Dropbox/CodeBench/GitPool/irgan_ptranking/testing/ltr_adversarial/json/'

        for model_id in models_to_run:
            evaluator.run(debug=debug, model_id=model_id, config_with_json=config_with_json, dir_json=dir_json)

    else:  # specify configuration manually
        data_id = 'MQ2008_Super'

        ''' Location of the adopted data '''
        dir_data = '/Users/dryuhaitao/WorkBench/Corpus/' + 'LETOR4.0/MQ2008/'
        #dir_data = '/home/dl-box/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
        #dir_data = '/Users/solar/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'

        ''' Output directory '''
        dir_output = '/Users/dryuhaitao/WorkBench/CodeBench/Bench_Output/NeuralLTR/ALTR/'
        #dir_output = '/home/dl-box/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/Listwise/'
        #dir_output = '/Users/solar/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/'

        grid_search = False # with grid_search, we can explore the effects of different hyper-parameters of a model
    | IRGAN_MQ2008_Semi                                                                      |
    -----------------------------------------------------------------------------------------

    """
    ''' selected dataset '''
    data_id = 'MQ2007_Super'
    ''' location of the adopted data '''
    dir_data = os.path.join(DATASET_DIR, 'MQ2007/')
    #dir_data = '/home/dl-box/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
    #dir_data = '/Users/solar/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
    ''' output directory '''
    dir_output = os.path.join(PROJECT_OUTPUT_DIR, 'NeuralLTR/Listwise/')
    #dir_output = '/home/dl-box/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/Listwise/'
    #dir_output = '/Users/solar/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/'

    debug = True  # with a debug mode, we can make a quick test, e.g., check whether the model can operate or not

    grid_search = False  # with grid_search, we can explore the effects of different hyper-parameters of a model

    evaluator = AdLTREvaluator()

    to_run_models = ['IRGAN_Pair']

    for model_id in to_run_models:
        evaluator.run(debug=debug,
                      model_id=model_id,
                      data_id=data_id,
                      dir_data=dir_data,
                      dir_output=dir_output,
                      grid_search=grid_search)