Exemple #1
0
def benchmark_synthetic_d2(selectors, alg_n_half=5, problem_n=1000):
    """
    This synthetic benchmark evaluates AlgorithmSelectors on a very specificly crafted task.
    The benchmark measures how well the AS method can exploit a clear logical relationship.
    
    The set of 2 * alg_n algorithms has the following properties:
    
    BEST = random( alg_n )
    score(algorithm_a0) = score(algorithm_b0) * 0.0001
    cost(algorithm_a0) = 0.1 / alg_n
    cost(algorithm_b0) = 1.0
    budget = 1.2
    
    score(algorithm_bBEST) = 1.0
    score(algorithm_b!=BEST) = 0.0

    Parameters
    ----------
    selectors : list
        list of tuples, each consisting of a name and a reference to an AlgorithmSelector class.
        Note, the benchmark will later on create instances of the algorithm selector classes and set the number of algorithms.
    problem_n : integer
        number of problems to evaluate on. Should be much larger than the number of algorithms.

    Returns
    -------
    float
        The average Area under the curve with respect to the highest observed AUC

    Example usage
    -------------
    selectors = list()
    selectors.append( ("MonteCarlo", MonteCarloAlgorithmSelector) )
    selectors.append( ("Conservative", ConservativeAlgorithmSelector) )   
    benchmark_synthetic_a(selectors = selectors, problem_n = 100000 )
    """
    algorithms = np.arange(alg_n_half * 2)
    problems = np.random.rand(
        problem_n
    )  #random vector of size=problem_n with numbers between 0 and alg_n.

    #initialize the selector instances from the provided classes
    selectors2 = list()
    selectorNames = list()
    for s in selectors:
        selectors2.append((s[0], s[1](algorithms=algorithms)))
        selectorNames.append(s[0])
    selectors = selectors2

    resultRecorder = AlgorithmSelectionProcessRecorder(
        experiment_id=os.path.basename(__file__),
        noOfAlgorithms=len(selectors),
        noOfProblems=len(problems),
        namesOfAlgorithms=selectorNames)
    summedScores_auc = np.zeros(len(selectors))
    summedScores_finalscore = np.zeros(len(selectors))
    decayingScores_auc = np.zeros(len(selectors))
    decayingScores_finalscore = np.zeros(len(selectors))
    decayFactor = 0.1

    problemCounter = 0
    printDecayStatsAfterNProblems = 100
    printSummedStatsEveryNProblems = 100

    for p in problems:
        problemCounter += 1
        ground_truth = np.random.randint(0, alg_n_half)

        def callback(algorithm):
            if (algorithm < alg_n_half):
                #algorithms 0 till alg_n_half-1 are the probing algorithms

                cost = 0.1 / alg_n_half
                if (algorithm == ground_truth):
                    return cost, 0.1
                else:
                    return cost, 0
            else:
                #algorithms alg_n till 2*alg_n-1 are the actual scoring algorithms
                cost = 1.0
                if ((algorithm - alg_n_half) == ground_truth):
                    return cost, 1.0
                else:
                    return cost, 0.0

        budget = 2.9  # 1.0 for the "best one", 0.1 for probing and 0.8 to counter small inaccuracies and getting enough reward from the best choosen one. Note: 1.9 budget still does not allow to run two bigger scoring algorithms.

        (oCost, oScore, oAlgorithm, oSequence,
         oAUC) = oracle(algorithms, callback, budget)

        counter = 0
        for (selectorName, selector) in selectors:
            asprocess = AlgorithmSelectionProcess(budget=budget,
                                                  callback=callback,
                                                  measuretime=False)
            selector.process(budget=budget, callback=asprocess.callback)
            (bScore, bAlgorithm) = asprocess.getBest()
            nAUC = asprocess.normalizedAUC(maxAUCScore=oAUC)
            nBestScore = bScore / oScore

            resultRecorder.addRecord(counter, problemCounter - 1, {
                'nauc': nAUC,
                'nBestScore': nBestScore
            })
            summedScores_auc[counter] += nAUC
            summedScores_finalscore[counter] += nBestScore
            decayingScores_auc[counter] = decayingScores_auc[counter] * (
                1.0 - decayFactor) + decayFactor * nAUC
            decayingScores_finalscore[counter] = decayingScores_finalscore[
                counter] * (1.0 - decayFactor) + decayFactor * nBestScore

            counter += 1
        printDecayingPerformances = (problemCounter %
                                     printDecayStatsAfterNProblems == 0)
        printSummedPerformances = (problemCounter %
                                   printSummedStatsEveryNProblems == 0)

        #if this was the last problem, print both stats:
        if (p == problems[-1]):
            printDecayingPerformances = True
            printSummedPerformances = True
            resultRecorder.saveRecords(filedir='./results/')

        if (printDecayingPerformances or printSummedPerformances):
            print("=== Round " + str(problemCounter) + " ===")
            print(selectorNames)
        if printDecayingPerformances:
            print("Decaying Ratios:")
            print("rel AUC       : " +
                  str(decayingScores_auc / np.max(decayingScores_auc)) +
                  " BEST: " + str(decayingScores_finalscore /
                                  np.max(decayingScores_finalscore)))
            print("AUC vs. Oracle: " + str(decayingScores_auc) + " BEST: " +
                  str(decayingScores_finalscore))
        if printSummedPerformances:
            print("Average Ratios:")
            print("rel AUC       : " +
                  str(summedScores_auc / np.max(summedScores_auc)) +
                  " BEST: " + str(summedScores_finalscore /
                                  np.max(summedScores_finalscore)))
            print("AUC vs. Oracle: " + str(summedScores_auc / problemCounter) +
                  " BEST: " + str(summedScores_finalscore / problemCounter))
def benchmark_synthetic_d(selectors, problem_n=1000):
    """
    This synthetic benchmark evaluates AlgorithmSelectors on a very specificly crafted task.
    The benchmark measures how well the AS method can exploit a clear logical relationship.
    
    The set of three algorithms (a, b, c) has the following properties:
    
    score(algorithm_a) = score(algorithm_b) * 0.001
    score(algorithm_c) = 1.0 - score(algorothm_b)
    cost(algorithm_a) + max( cost(algorithm_b), cost(algorithm_v) ) < budget < cost(algorithm_a) + cost(algorithm_b) + cost(algorithm_c)
    
    budget = 0.9
    running a: 0.899 left
    running b: 0.1 left
    running c: 0.1 left
    
    Case p>0.5
        a, b -> [0->0.001]=0, [0.001->0.8001]=0.8 * 0.001, [0.8001->0.9]=0.0999 * 1, SUM = 0.1079
        b, c -> [0->0.8]=0, [0.8->0.9]=0.1 * 1 = 0.1, SUM = 0.1
        a, c -> [0->0.001]=0, [0.001->0.9]=0.8999 * 0.001, SUM = 0.008999
        c, a -> [0->0.8]=0, [0.8->0.8001] = 0, [0.8001->0.9] = 0.0999 * 0.001, SUM = 0.000999
        b    -> [0->0.8]=0, [0.8->0.9]=0.1 * 1 = 0.1, SUM = 0.1
        c    -> SUM = 0
        all other: 0
    Case p<0.5
        symmetry with b<->c

    Parameters
    ----------
    selectors : list
        list of tuples, each consisting of a name and a reference to an AlgorithmSelector class.
        Note, the benchmark will later on create instances of the algorithm selector classes and set the number of algorithms.
    problem_n : integer
        number of problems to evaluate on. Should be much larger than the number of algorithms.

    Returns
    -------
    float
        The average Area under the curve with respect to the highest observed AUC

    Example usage
    -------------
    selectors = list()
    selectors.append( ("MonteCarlo", MonteCarloAlgorithmSelector) )
    selectors.append( ("Conservative", ConservativeAlgorithmSelector) )   
    benchmark_synthetic_a(selectors = selectors, problem_n = 100000 )
    """
    algorithms = ['a', 'b', 'c']
    problems = np.random.rand(
        problem_n
    )  #random vector of size=problem_n with numbers between 0 and alg_n.
    border = 0.5

    #initialize the selector instances from the provided classes
    selectors2 = list()
    selectorNames = list()
    for s in selectors:
        selectors2.append((s[0], s[1](algorithms=algorithms)))
        selectorNames.append(s[0])
    selectors = selectors2

    resultRecorder = AlgorithmSelectionProcessRecorder(
        experiment_id=os.path.basename(__file__),
        noOfAlgorithms=len(selectors),
        noOfProblems=len(problems),
        namesOfAlgorithms=selectorNames)
    summedScores_auc = np.zeros(len(selectors))
    summedScores_finalscore = np.zeros(len(selectors))
    decayingScores_auc = np.zeros(len(selectors))
    decayingScores_finalscore = np.zeros(len(selectors))
    decayFactor = 0.1

    problemCounter = 0
    printDecayStatsAfterNProblems = 10
    printSummedStatsEveryNProblems = 10

    for p in problems:
        problemCounter += 1

        def callback(algorithm):
            if (algorithm == 'a'):
                return (0.001, (p <= border) * 0.001)
            if (algorithm == 'b'):
                return (0.8, p <= border)
            if (algorithm == 'c'):
                return (0.8, p > border)

        budget = 0.9

        (oCost, oScore, oAlgorithm, oSequence,
         oAUC) = oracle(algorithms, callback, budget)

        counter = 0
        for (selectorName, selector) in selectors:
            asprocess = AlgorithmSelectionProcess(budget=budget,
                                                  callback=callback,
                                                  measuretime=False)
            selector.process(budget=budget, callback=asprocess.callback)
            (bScore, bAlgorithm) = asprocess.getBest()
            nAUC = asprocess.normalizedAUC(maxAUCScore=oAUC)
            nBestScore = bScore / oScore

            resultRecorder.addRecord(counter, problemCounter - 1, {
                'nauc': nAUC,
                'nBestScore': nBestScore
            })
            summedScores_auc[counter] += nAUC
            summedScores_finalscore[counter] += nBestScore
            decayingScores_auc[counter] = decayingScores_auc[counter] * (
                1.0 - decayFactor) + decayFactor * nAUC
            decayingScores_finalscore[counter] = decayingScores_finalscore[
                counter] * (1.0 - decayFactor) + decayFactor * nBestScore

            counter += 1
        printDecayingPerformances = (problemCounter %
                                     printDecayStatsAfterNProblems == 0)
        printSummedPerformances = (problemCounter %
                                   printSummedStatsEveryNProblems == 0)

        #if this was the last problem, print both stats:
        if (p == problems[-1]):
            printDecayingPerformances = True
            printSummedPerformances = True
            resultRecorder.saveRecords(filedir='./results/')

        if (printDecayingPerformances or printSummedPerformances):
            print("=== Round " + str(problemCounter) + " ===")
            print(selectorNames)
        if printDecayingPerformances:
            print("Decaying Ratios:")
            print("rel AUC       : " +
                  str(decayingScores_auc / np.max(decayingScores_auc)) +
                  " BEST: " + str(decayingScores_finalscore /
                                  np.max(decayingScores_finalscore)))
            print("AUC vs. Oracle: " + str(decayingScores_auc) + " BEST: " +
                  str(decayingScores_finalscore))
        if printSummedPerformances:
            print("Average Ratios:")
            print("rel AUC       : " +
                  str(summedScores_auc / np.max(summedScores_auc)) +
                  " BEST: " + str(summedScores_finalscore /
                                  np.max(summedScores_finalscore)))
            print("AUC vs. Oracle: " + str(summedScores_auc / problemCounter) +
                  " BEST: " + str(summedScores_finalscore / problemCounter))
def benchmark_synthetic_a(selectors,
                          alg_n=100,
                          problem_n=10000,
                          budgetfactor=0.25):
    algorithms = np.arange(0, alg_n)  #x algorithms
    np.random.shuffle(
        algorithms
    )  #shuffle the algorithms in place to destroy their smoothness
    problems = np.random.rand(problem_n) * len(
        algorithms
    )  #random vector of size 10000 with numbers between 0 and 10.

    noOfSelectors = len(selectors)

    #initialize the selector instances from the provided classes
    selectors2 = list()
    selectorNames = list()
    for s in selectors:
        selectors2.append((s[0], s[1](algorithms=algorithms)))
        selectorNames.append(s[0])
    selectors = selectors2

    summedScores_auc = np.zeros(noOfSelectors)
    summedScores_finalscore = np.zeros(noOfSelectors)
    decayingScores_auc = np.zeros(noOfSelectors)
    decayingScores_finalscore = np.zeros(noOfSelectors)
    decayFactor = 0.1

    problemCounter = 0
    printDecayStatsAfterNProblems = 10
    printSummedStatsEveryNProblems = 10

    resultRecorder = AlgorithmSelectionProcessRecorder(
        experiment_id=os.path.basename(__file__),
        noOfAlgorithms=noOfSelectors,
        noOfProblems=problem_n,
        namesOfAlgorithms=selectorNames)

    for p in problems:
        problemCounter += 1

        def callback(algorithm):
            simulatedCost = (algorithm %
                             10) + 1  #range: (1, 10), linear distributed
            simulatedScore = (1.0 / (1 + np.abs(p - algorithm))
                              ) * 9 + 1  # 1-10, based on their distance
            return (simulatedCost, simulatedScore)

        budget = len(
            algorithms
        ) * 10 * budgetfactor  #max cost is 10, therefore 10*0.5 is about 50% of the budget required to go through half of the algorithms.

        #print("=== Problem: "+str(p)+" b:"+str(budget)+" ===")

        (oCost, oScore, oAlgorithm, oSequence,
         oAUC) = oracle(algorithms, callback, budget)

        #print("oracle oAUC: "+str(oAUC)+" sequence: "+str(oSequence))

        counter = 0
        for (selectorName, selector) in selectors:
            asprocess = AlgorithmSelectionProcess(budget=budget,
                                                  callback=callback,
                                                  measuretime=False)
            selector.process(budget=budget, callback=asprocess.callback)
            (bScore, bAlgorithm) = asprocess.getBest()
            nAUC = asprocess.normalizedAUC(maxAUCScore=oAUC)
            nBestScore = bScore / oScore

            resultRecorder.addRecord(counter, problemCounter - 1, {
                'nauc': nAUC,
                'nBestScore': nBestScore
            })
            summedScores_auc[counter] += nAUC
            summedScores_finalscore[counter] += nBestScore
            decayingScores_auc[counter] = decayingScores_auc[counter] * (
                1.0 - decayFactor) + decayFactor * nAUC
            decayingScores_finalscore[counter] = decayingScores_finalscore[
                counter] * (1.0 - decayFactor) + decayFactor * nBestScore
            counter += 1

        printDecayingPerformances = (problemCounter %
                                     printDecayStatsAfterNProblems == 0)
        printSummedPerformances = (problemCounter %
                                   printSummedStatsEveryNProblems == 0)

        #if this was the last problem, print both stats:
        if (p == problems[-1]):
            printDecayingPerformances = True
            printSummedPerformances = True
            resultRecorder.saveRecords(filedir='./results/')

        if (printDecayingPerformances or printSummedPerformances):
            print("=== Round " + str(problemCounter) + " ===")
            print(selectorNames)
        if printDecayingPerformances:
            print("Decaying Ratios:")
            print("rel AUC       : " +
                  str(decayingScores_auc / np.max(decayingScores_auc)) +
                  " BEST: " + str(decayingScores_finalscore /
                                  np.max(decayingScores_finalscore)))
            print("AUC vs. Oracle: " + str(decayingScores_auc) + " BEST: " +
                  str(decayingScores_finalscore))
        if printSummedPerformances:
            print("Average Ratios:")
            print("rel AUC       : " +
                  str(summedScores_auc / np.max(summedScores_auc)) +
                  " BEST: " + str(summedScores_finalscore /
                                  np.max(summedScores_finalscore)))
            print("AUC vs. Oracle: " + str(summedScores_auc / problemCounter) +
                  " BEST: " + str(summedScores_finalscore / problemCounter))
def benchmark_synthetic_b(selectors, alg_n = 100, problem_n = 10000, budgetfactor = 0.25):
    """
    This synthetic benchmark evaluates AlgorithmSelectors on a bunch of generical algorithms and problems.

    The algorithms are designed with the following properties:
    The score is normed to be between (1,10].
    The runtime is normed to be between [1,10].
    There is no guarantee that any algorithm on a given problem achieves the maximum or minimum score.
    We simulate "hard" problems that require algorithms with higher cost.
    We simulate relations between algorithms, meaning that one can learn to map performances from  cheaper to well working more expensive algorithms.

    Parameters
    ----------
    selectors : list
        list of tuples, each consisting of a name and a reference to an AlgorithmSelector class.
        Note, the benchmark will later on create instances of the algorithm selector classes and set the number of algorithms.
    alg_n : integer
        number of synthetic algorithms created and used in the benchmark.
    problem_n : integer
        number of problems to evaluate on. Should be much larger than the number of algorithms.
    budgetfactor : float
        how much time with respect to the overall required time to run all algorithms is provided (per problem)

    Returns
    -------
    float
        The average Area under the curve with respect to the highest observed AUC

    Example usage
    -------------
    selectors = list()
    selectors.append( ("MonteCarlo", MonteCarloAlgorithmSelector) )
    selectors.append( ("Conservative", ConservativeAlgorithmSelector) )   
    benchmark_synthetic_a(selectors = selectors, alg_n = 100, problem_n = 100000 )
    """
    algorithms = np.arange(0,alg_n) #x algorithms
    np.random.shuffle(algorithms) #shuffle the algorithms in place to destroy their smoothness
    problems = np.random.rand(problem_n)*len(algorithms) #random vector of size=problem_n with numbers between 0 and alg_n.
    
    #initialize the selector instances from the provided classes
    selectors2 = list()
    selectorNames = list()
    for s in selectors:
        selectors2.append((s[0],s[1](algorithms = algorithms)))
        selectorNames.append(s[0])
    selectors = selectors2
    
    resultRecorder = AlgorithmSelectionProcessRecorder(experiment_id = os.path.basename(__file__), noOfAlgorithms = alg_n, noOfProblems = len(problems), namesOfAlgorithms = selectorNames )
    summedScores_auc = np.zeros(len(selectors))
    summedScores_finalscore = np.zeros(len(selectors))
    decayingScores_auc = np.zeros(len(selectors))
    decayingScores_finalscore = np.zeros(len(selectors))
    decayFactor = 0.1

    problemCounter = 0
    printDecayStatsAfterNProblems = 10
    printSummedStatsEveryNProblems = 10
    
    
    
    for p in problems:
        problemCounter += 1
        def callback(algorithm):
            simulatedCost = np.power( 10, (p / alg_n) * (algorithm % 100)/100 ) #range: (1, 10), exponentially distributed. Note that some problems are "harder" and require higher costs.
            simulatedScore = 10 * ( 1.0 - ( np.abs(p - algorithm) / alg_n ) )# 1-10, based on their distance, favoring further away algorithms
            return ( simulatedCost, simulatedScore )
            
        
        def getMaxBudget():
            tCost = 0
            for a in algorithms:
                (cost, score) = callback(a)
                tCost += cost
            return tCost
            
        budget = getMaxBudget() * budgetfactor#max cost is 10, therefore 10*0.5 is about 50% of the budget required to go through half of the algorithms via random picking
        
        #print("=== Problem: "+str(p)+" b:"+str(budget)+" ===")
        
        (oCost, oScore, oAlgorithm, oSequence, oAUC) = oracle(algorithms, callback, budget)


        counter = 0
        for (selectorName, selector) in selectors:
            asprocess = AlgorithmSelectionProcess( budget = budget, callback = callback, measuretime = False )
            selector.process(budget = budget, callback = asprocess.callback)
            (bScore, bAlgorithm) = asprocess.getBest()
            nAUC = asprocess.normalizedAUC( maxAUCScore = oAUC )
            nBestScore = bScore / oScore
            
            resultRecorder.addRecord(counter, problemCounter-1, {'nauc':nAUC, 'nBestScore':nBestScore})
            summedScores_auc[counter] += nAUC
            summedScores_finalscore[counter] += nBestScore
            decayingScores_auc[counter] = decayingScores_auc[counter] * (1.0-decayFactor) + decayFactor * nAUC
            decayingScores_finalscore[counter] = decayingScores_finalscore[counter] * (1.0-decayFactor) + decayFactor * nBestScore
            
            counter += 1
        printDecayingPerformances = (problemCounter % printDecayStatsAfterNProblems == 0 )
        printSummedPerformances = (problemCounter % printSummedStatsEveryNProblems == 0 )
        
        #if this was the last problem, print both stats:
        if (p == problems[-1]):
            printDecayingPerformances = True
            printSummedPerformances = True
            resultRecorder.saveRecords(filedir = './results/')
            
        if (printDecayingPerformances or printSummedPerformances):
            print("=== Round "+str(problemCounter)+" ===")
            print(selectorNames)
        if printDecayingPerformances:
            print("Decaying Ratios:")
            print("rel AUC       : "+str( decayingScores_auc / np.max(decayingScores_auc)) + " BEST: " + str( decayingScores_finalscore / np.max(decayingScores_finalscore) ) )
            print("AUC vs. Oracle: "+str( decayingScores_auc ) + " BEST: " + str( decayingScores_finalscore ) )
        if printSummedPerformances:
            print("Average Ratios:")
            print("rel AUC       : "+str( summedScores_auc / np.max(summedScores_auc)) + " BEST: " + str( summedScores_finalscore / np.max(summedScores_finalscore) ) )
            print("AUC vs. Oracle: "+str( summedScores_auc / problemCounter) + " BEST: " + str( summedScores_finalscore / problemCounter ) )
def run(selectors,
        selectorNames,
        algorithms,
        algorithm_names,
        problems,
        costs_matrix,
        scores_matrix,
        budgetfactor,
        resultPrefix=''):
    summedScores_auc = np.zeros(len(selectors))
    summedScores_finalscore = np.zeros(len(selectors))
    decayingScores_auc = np.zeros(len(selectors))
    decayingScores_finalscore = np.zeros(len(selectors))
    decayFactor = 0.1
    regularization = 10 ^ -5

    problemCounter = 0
    printDecayStatsAfterNProblems = 100
    printSummedStatsEveryNProblems = 100

    noOfSelectors = len(selectors)
    bestAlgorithms = np.zeros(len(
        algorithms))  #for counting how often one algorithm was the best one.

    resultRecorder = AlgorithmSelectionProcessRecorder(
        experiment_id=str(resultPrefix) + str(os.path.basename(__file__)),
        noOfAlgorithms=noOfSelectors,
        noOfProblems=len(problems),
        namesOfAlgorithms=selectorNames)

    for p in problems:
        problemCounter += 1

        def callback(algorithm):
            result = (costs_matrix[algorithm, p], scores_matrix[algorithm, p])
            return result

        def getMaxBudget():
            tCost = 0
            for a in algorithms:
                (cost, score) = callback(a)
                tCost += cost
            return tCost

        budget = getMaxBudget(
        ) * budgetfactor  #max cost is 10, therefore 10*0.5 is about 50% of the budget required to go through half of the algorithms via random picking

        #print("=== Problem: "+str(p)+" b:"+str(budget)+" ===")

        (oCost, oScore, oAlgorithm, oSequence,
         oAUC) = oracle(algorithms, callback, budget)
        #oNormAUC = ( budget - oCost ) / budget
        bestAlgorithms[oAlgorithm] += 1
        #print("Oracle:   score "+str(oScore)+" algorithm: "+str(oAlgorithm)+" cost: "+str(oCost)+" NormAUC: "+str(oNormAUC))

        counter = 0
        for (selectorName, selector) in selectors:
            asprocess = AlgorithmSelectionProcess(budget=budget,
                                                  callback=callback,
                                                  measuretime=False)
            selector.process(budget=budget, callback=asprocess.callback)
            (bScore, bAlgorithm) = asprocess.getBest()
            nAUC = asprocess.normalizedAUC(maxAUCScore=oAUC)
            if (bScore is None):
                bScore = 0

            nBestScore = 0
            if (oScore <= 0):
                oScore = regularization

            if not (oScore is None):
                nBestScore = bScore / oScore

            resultRecorder.addRecord(counter, problemCounter - 1, {
                'nauc': nAUC,
                'nBestScore': nBestScore
            })
            summedScores_auc[counter] += nAUC
            summedScores_finalscore[counter] += nBestScore
            decayingScores_auc[counter] = decayingScores_auc[counter] * (
                1.0 - decayFactor) + decayFactor * nAUC
            decayingScores_finalscore[counter] = decayingScores_finalscore[
                counter] * (1.0 - decayFactor) + decayFactor * nBestScore

            counter += 1
        printDecayingPerformances = (problemCounter %
                                     printDecayStatsAfterNProblems == 0)
        printSummedPerformances = (problemCounter %
                                   printSummedStatsEveryNProblems == 0)

        #if this was the last problem, print both stats:
        if (p == problems[-1]):
            printDecayingPerformances = True
            printSummedPerformances = True

        if (printDecayingPerformances or printSummedPerformances):
            print("=== Round " + str(problemCounter) + " ===")
            print(selectorNames)
        if printDecayingPerformances:
            print("Decaying Ratios:")
            print("rel AUC       : " +
                  str(decayingScores_auc / np.max(decayingScores_auc)) +
                  " BEST: " + str(decayingScores_finalscore /
                                  np.max(decayingScores_finalscore)))
            print("AUC vs. Oracle: " + str(decayingScores_auc) + " BEST: " +
                  str(decayingScores_finalscore))
        if printSummedPerformances:
            print("Average Ratios:")
            print("rel AUC       : " +
                  str(summedScores_auc / np.max(summedScores_auc)) +
                  " BEST: " + str(summedScores_finalscore /
                                  np.max(summedScores_finalscore)))
            print("AUC vs. Oracle: " + str(summedScores_auc / problemCounter) +
                  " BEST: " + str(summedScores_finalscore / problemCounter))

    sortedIndexes = np.argsort(bestAlgorithms)
    algorithms = np.flip((algorithms[sortedIndexes]))
    for alg_index in algorithms:
        if (bestAlgorithms[alg_index] > 0):
            print(
                str(algorithm_names[alg_index]) + " was " +
                str(int(bestAlgorithms[alg_index])) + " times the best one.")

    return resultRecorder