예제 #1
0
def main():
    setMemoryLimit(10000000000)

    # create the top-level parser
    parser = argparse.ArgumentParser(
        prog='FA*IR',
        description='a fair Top-k ranking algorithm',
        epilog="=== === === end === === ===")
    parser.add_argument(
        "-c",
        "--create",
        nargs='*',
        help="creates a ranking from the raw data and dumps it to disk")
    parser.add_argument("-e",
                        "--evaluate",
                        nargs='*',
                        help="evaluates rankings and writes results to disk")
    subparsers = parser.add_subparsers(help='sub-command help')

    # create the parser for the "create" command
    parser_create = subparsers.add_parser('dataset_create',
                                          help='choose a dataset to generate')
    parser_create.add_argument(
        dest='dataset_to_create',
        choices=["sat", "compas", "germancredit", "xing", "csat"])

    # create the parser for the "evaluate" command
    parser_evaluate = subparsers.add_parser(
        'dataset_evaluate', help='choose a dataset to evaluate')
    parser_evaluate.add_argument(dest='dataset_to_evaluate',
                                 choices=[
                                     "sat", "xing"
                                     "compas_gender", "compas_race",
                                     "germancredit_25", "germancredit_35",
                                     "germancredit_gender"
                                 ])

    args = parser.parse_args()

    if args.create == []:
        print("creating rankings for all datasets...")
        createDataAndRankings()
    elif args.create == ['sat']:
        createAndRankSATData(1500)
    elif args.create == ['compas']:
        createAndRankCOMPASData(1000)
    elif args.create == ['germancredit']:
        createAndRankGermanCreditData(100)
    elif args.create == ['xing']:
        createAndRankXingData(40)
    elif args.create == ['csat']:
        createAndRankChileData(1500)
    elif args.create == ['syntheticsat']:
        createSyntheticSAT(1000)
    #=======================================================
    elif args.evaluate == []:
        evaluator = Evaluator()
        evaluator.printResults()
    elif args.evaluate == ['compas_gender']:
        evaluator = Evaluator('compas_gender')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['compas_race']:
        evaluator = Evaluator('compas_race')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_25']:
        evaluator = Evaluator('germancredit_25')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['germancredit_35']:
        evaluator = Evaluator('germancredit_35')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_gender']:
        evaluator = Evaluator('germancredit_gender')
        evaluator.printResults()
    elif args.evaluate == ['xing']:
        evaluator = Evaluator('xing')
        evaluator.printResults()
    elif args.evaluate == ['sat']:
        evaluator = Evaluator('sat')
        evaluator.printResults()

    else:
        print(
            "FA*IR \n running the full program \n Press ctrl+c to abort \n \n")
        createDataAndRankings()
        evaluator = Evaluator()
        evaluator.printResults()

        if EVALUATE_FAILURE_PROBABILITY:
            determineFailProbOfGroupFairnessTesterForStoyanovichRanking()
예제 #2
0
def main():
    setMemoryLimit(10000000000)

    # create the top-level parser
    parser = argparse.ArgumentParser(prog='FA*IR', description='a fair Top-k ranking algorithm',
                                     epilog="=== === === end === === ===")
    parser.add_argument("-c", "--create", nargs='*', help="creates a ranking from the raw data and dumps it to disk")
    parser.add_argument("-e", "--evaluate", nargs='*', help="evaluates rankings and writes results to disk")
    subparsers = parser.add_subparsers(help='sub-command help')

    # create the parser for the "create" command
    parser_create = subparsers.add_parser('dataset_create', help='choose a dataset to generate')
    parser_create.add_argument(dest='dataset_to_create', choices=["sat", "compas", "germancredit", "xing", "chilesat", "lsat"])

    # create the parser for the "evaluate" command
    parser_evaluate = subparsers.add_parser('dataset_evaluate', help='choose a dataset to evaluate')
    parser_evaluate.add_argument(dest='dataset_to_evaluate', choices=["sat", "xing",
                                                                  "compas_gender", "compas_race",
                                                                  "germancredit_25", "germancredit_35", "germancredit_gender"])

    args = parser.parse_args()

    if args.create == []:
        print("creating rankings for all datasets...")
        createDataAndRankings()
    elif args.create == ['sat']:
        createAndRankSATData()
    elif args.create == ['compas']:
        createAndRankCOMPASData()
    elif args.create == ['germancredit']:
        createAndRankGermanCreditData()
    elif args.create == ['xing']:
        createAndRankXingData()
    elif args.create == ['chilesat']:
        createAndRankChileData()
        # gender
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_1/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_2/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_3/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_4/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_5/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_5/")
        # highschool
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_1/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_2/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_3/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_4/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_5/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_5/")
    elif args.create == ['lsat']:
#         createAndRankLSATData()
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/gender/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/gender/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_asian/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_asian/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_black/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_black/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_hispanic/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_hispanic/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_mexican/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_mexican/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_puertorican/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_puertorican/")
    elif args.create == ['trec']:
        createAndRankTRECData()
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_1/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_2/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_3/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_4/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_5/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_5/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_6/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_6/")
    elif args.create == ['syntheticsat']:
        createSyntheticSAT()
    #=======================================================
    elif args.evaluate == []:
        evaluator = Evaluator()
        evaluator.printResults()
    elif args.evaluate == ['compas_gender']:
        evaluator = Evaluator('compas_gender')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['compas_race']:
        evaluator = Evaluator('compas_race')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_25']:
        evaluator = Evaluator('germancredit_25')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['germancredit_35']:
        evaluator = Evaluator('germancredit_35')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_gender']:
        evaluator = Evaluator('germancredit_gender')
        evaluator.printResults()
    elif args.evaluate == ['xing']:
        evaluator = Evaluator('xing')
        evaluator.printResults()
    elif args.evaluate == ['sat']:
        evaluator = Evaluator('sat')
        evaluator.printResults()

    else:
        print("FA*IR \n running the full program \n Press ctrl+c to abort \n \n")
        createDataAndRankings()
        evaluator = Evaluator()
        evaluator.printResults()

        if EVALUATE_FAILURE_PROBABILITY:
            determineFailProbOfGroupFairnessTesterForStoyanovichRanking()
예제 #3
0
def main():
    setMemoryLimit(10000000000)

    # create the top-level parser
    parser = argparse.ArgumentParser(prog='FA*IR', description='a fair Top-k ranking algorithm',
                                     epilog="=== === === end === === ===")
                                     # argument_default="-a")
    parser.add_argument("-c", "--create", nargs='*', help="creates a ranking and dumps it to disk")
    parser.add_argument("-e", "--evaluate", nargs='*', help="evaluates and transposes results")
    parser.add_argument("-r", "--rank", nargs='*', help="ranks")
    # parser.set_defaults(func='run_whole_prog')
    subparsers = parser.add_subparsers(help='sub-command help')

    # create the parser for the "create" command
    parser_create = subparsers.add_parser('dataset_create', help='choose a dataset to generate')
    parser_create.add_argument(dest='dataset_to_evaluate', choices=["sat", "compas", "germancredit", "xing"])

    # create the parser for the "evaluate" command
    parser_evaluate = subparsers.add_parser('dataset_evaluate', help='choose a dataset to evaluate')
    parser_evaluate.add_argument(dest='dataset_to_create', choices=["sat", "xing"
                                                                  "compas_gender", "compas_race",
                                                                  "germancredit_25", "germancredit_35", "germancredit_gender"])

    # create the parser for the "rank" command
    # parser_evaluate = subparsers.add_parser('dataset_to_rank', help='choose a dataset to rank')
    # parser_evaluate.add_argument("-d", "--dataset", choices=["sat", "compas", "germancredit", "xing"])

    args = parser.parse_args()

    if args.create == []:
        print("creating rankings for all datasets...")
        createRankingsAndWriteToDisk()
    elif args.create == ['sat']:
        createSATData(1500)
    elif args.create == ['compas']:
            createCOMPASData(1000)
    elif args.create == ['germancredit']:
            createGermanCreditData(100)
    elif args.create == ['xing']:
            createXingData(40)
    elif args.evaluate == []:
        evaluator = Evaluator()
        evaluator.printResults()
    elif args.evaluate == ['compas_gender']:
        evaluator = Evaluator('compas_gender')
        evaluator.printResults()
    elif args.evaluate == ['compas_race']:
        evaluator = Evaluator('compas_race')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_25']:
        evaluator = Evaluator('germancredit_25')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_35']:
        evaluator = Evaluator('germancredit_35')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_gender']:
        evaluator = Evaluator('germancredit_gender')
        evaluator.printResults()
    elif args.evaluate == ['xing']:
        evaluator = Evaluator('xing')
        evaluator.printResults()
    elif args.evaluate == ['sat']:
        evaluator = Evaluator('sat')
        evaluator.printResults()

    else:
        print("FA*IR \n running the full program \n Press ctrl+c to abort \n \n")
        createRankingsAndWriteToDisk()
        evaluator = Evaluator()
        evaluator.printResults()
#       in between commits
        if EVALUATE_FAILURE_PROBABILITY:
            determineFailProbOfGroupFairnessTesterForStoyanovichRanking()