Exemple #1
0
def compare_refflows():
    """Check if the ref-flow computed with ReferencedPII object is correct."""
    data_set = 'HDI'
    random.seed()
    seed = random.randint(1, 1000)
    print(seed)
    alt_num = 20
    ref_number = 4
    strategy = prom.strategy2

    input_file = 'data/' + str(data_set) + '/raw.csv'
    alternatives = dr.open_raw(input_file)[0]

    referenced = prom.ReferencedPII(alternatives, strategy=strategy, seed=seed)

    SRP = referenced.SRP
    ref_scores = referenced.scores
    for i, alt in enumerate(alternatives):
        SRP_alt = SRP[:]
        SRP_alt.append(alt)
        promethee = prom.PrometheeII(SRP_alt, seed=seed)
        scores = promethee.scores
        if abs(scores[-1] - ref_scores[i]) < 1e-5:
            print("ok")
        else:
            print("There is something wrong")
            print(scores)
def count_draws(threshold=0.001):
    """Test with EPI, SHA, GEQ dataset.

    This test counts the number of draws.
    """
    data_sets = ['SHA', 'EPI', 'GEQ']
    output = "res/ReferencedPII/reference_quantity/thresh_" + str(threshold) \
        + ".txt"

    # Change these parameters if needed
    ref_numbers = [2, 3, 5, 10, 15, 25]
    alternative_numbers = [10, 20, 40, 80]
    seed_list = range(20)

    ref_set_strategy = prom.strategy1

    all_res = []
    for ref_number in ref_numbers:
        res = []
        for alt_number in alternative_numbers:
            tot = 0
            for seed in seed_list:
                for data_set in data_sets:
                    source = "data/" + data_set + "/raw.csv"
                    alts = dr.open_raw(source)[0]
                    ref_prom = prom.ReferencedPII(alts,
                                                  alt_num=alt_number,
                                                  strategy=ref_set_strategy,
                                                  seed=seed,
                                                  ref_num=ref_number)
                    tot += ref_prom.draws_quantity(ref_prom.scores, threshold)
            res.append(tot)
        all_res.append(res)
    print_to_file(output, ref_numbers, alternative_numbers, seed_list, all_res)
def compare(tests_qty=3):
    """Compare the different stratiegies."""
    output = "res/ReferencedPII/strategies/comparisons.txt"
    data_sets = ['EPI', 'SHA', 'GEQ']
    # data_sets = ['HDI']
    range_seed = range(0, 0 + tests_qty)
    alt_num = 30
    ref_number = 4
    strategies = [
        prom.strategy1, prom.strategy2, prom.strategy3, prom.strategy4
    ]
    # strategies = [prom.strategy2]

    kendall_taus = [[] for i in range(4)]  # One list for each strategy
    titles = []

    for data_set in data_sets:
        input_file = 'data/' + str(data_set) + '/raw.csv'
        alternatives = dr.open_raw(input_file)[0]

        for seed in range_seed:
            promethee = prom.PrometheeII(alternatives,
                                         seed=seed,
                                         alt_num=alt_num)
            prom_ranking = promethee.ranking

            title = data_set + str(seed)
            titles.append(title)

            for i, strategy in enumerate(strategies):
                referenced = prom.ReferencedPII(alternatives,
                                                seed=seed,
                                                strategy=strategy,
                                                alt_num=alt_num)
                refrank = referenced.ranking
                tau = stats.kendalltau(refrank, prom_ranking)[0]
                tau = int(tau * 1000) / 1000
                kendall_taus[i].append(tau)

    print_to_file(output, titles, kendall_taus, tests_qty)
Exemple #4
0
def genetic_search(alternatives, seed=None, weights=None, ceils=None,
                   coefficients=None, alt_num=-1, SRP_size=4, pop_size=600,
                   mut_prob=0.01, MAXIT=50):
    """Search for references sets reproducing PII with a genetic algorithm.

    Inputs:
        alternatives - matrix composed of one list of evaluations for each
                       alternative.

        seed - seed provided to python pseudo random number generator. It is
               used to create some random (w, F) for the method if these are not
               provided as arguments. See promethee.py to see how this is done

        weights - list of the relative importance (or weigths) of all criteria.

        ceils - list of the values of the strict preference thresholds for all
                criteria (p).

        coefficients - if 'ceils' is not provided, some new ceils will be
                       computed as these coefficents time the amplitude
                       between the highest and lowest evaluation of each
                       criterion.

        alt_num - quantity of alternatives from 'alternative' which must be
                  kept.

        SRP_size - quantity of reference profiles searched.

        pop_size - size of the population.

        mut_prob - probability of mutation of each of the evaluation of each
                   individual.

        MAXIT - maximal number of iterations of the procedure.
    """
    # Initialisation of the PrometheeII, ReferencedPII objects
    promethee = prom.PrometheeII(alternatives, seed=seed, alt_num=alt_num,
                                 ceils=ceils, weights=weights,
                                 coefficients=coefficients)
    prom_ranking = promethee.ranking
    random.seed()

    population = initial_population(alternatives, pop_size, SRP_size)
    referenced = prom.ReferencedPII(alternatives, seed=seed, alt_num=alt_num,
                                    ceils=ceils, weights=weights,
                                    ref_set=population[0],
                                    coefficients=coefficients)

    evaluations = compute_evaluations(population, prom_ranking, referenced)

    best_score = max(evaluations)
    best_SRP_ever = population[evaluations.index(best_score)]

    it = 0
    while(abs(best_score - 1) > 1e-5 and it < MAXIT):
        # print("it:" + str(it) + '  best score:' + str(best_score))
        parents = chose_parents(population, evaluations, pop_size)
        population = combine_parents(parents)
        population = mutate_population(population, mut_prob)
        evaluations = compute_evaluations(population, prom_ranking, referenced)
        if max(evaluations) > best_score:
            best_score = max(evaluations)
            best_SRP_ever = population[evaluations.index(best_score)]
        it += 1

    return best_score
    def __init__(self,
                 init_alternatives,
                 seed=0,
                 alt_num=30,
                 ref_number=4,
                 pts_per_random_it=200,
                 random_add_it=500,
                 divide_it=5,
                 desired_points=3000):
        """Constructor.

        Inputs:
            init_alternatives - matrix composed of one list of evaluations for
                                each alternative.
            seed - used to generate some pseudo random parameters.
            max_alt - maximal number of alternatives on which the procedure must
                      be applied.
            ref_number - number of reference profiles in each set.
            pts_per_random_it - minimal quantity of points which are tried to
                                be added at random 'simultaneously'. This
                                quantity is repeated 'random_add_it' times at
                                each iteration of the procedure.
            random_add_it - quantity of times at each iteration of the procedure
                            'pts_per_random_it' are considered to be added to
                            the set of all admissible points.
            divide_it - number of times we try to add a new point near of an
                        admissible one (for each of the admissible ones).
            desired_points - desired size of the set of admissible points after
                             each iteration.

            These four last arguments are used because it is computationally
            not possible to start with a big enough set of admissible points.
            Therefore, at each iteration some points. More information in the
            'round_add_points' function.
        """
        self.ref_number = ref_number
        self.pts_per_random_it = pts_per_random_it
        self.desired_points = desired_points
        self.seed = seed
        self.random_add_it = random_add_it
        self.divide_it = divide_it
        self.promethee = PII.PrometheeII(init_alternatives,
                                         seed=self.seed,
                                         alt_num=alt_num)
        self.PII_ranking = self.promethee.ranking
        self.alternatives = self.promethee.alternatives

        # Used to add new points
        self.min_per_crit = [
            min(crit) for crit in self.promethee.eval_per_crit
        ]
        self.max_per_crit = [
            max(crit) for crit in self.promethee.eval_per_crit
        ]
        self.delta_per_crit = [
            self.max_per_crit[crit] - self.min_per_crit[crit]
            for crit in range(len(self.max_per_crit))
        ]

        self.crit_number = len(self.promethee.alternatives[0])

        # SRP only used to initialise the referenced promethee object
        SRP = [[1 for i in range(self.crit_number)] for r in range(ref_number)]
        self.referenced = PII.ReferencedPII(init_alternatives,
                                            seed=self.seed,
                                            alt_num=alt_num,
                                            ref_set=SRP)

        if (not PII.check_parameters(self.promethee, self.referenced)):
            print('parameters not equal between method')
            exit()

        # This list contains all points which are still admissible at any given
        # iteration but which do not exactly reproduce the PII ranking. Points
        # reproducing the PII ranking are kept in another list for performances
        # purposes.
        self.admissible_points = []
        self.correct_points = []
        self.constraints = []

        # Matrix that keep trace of all the rankings (one list per iteration)
        self.kendall_taus = []

        self.add_initial_points()

        # define the template for printing the iteration analysis
        self.it_template = "{:^3d}|{: ^9d}|{: ^10d}|" \
            + "{:^7d}|{: ^7.3f}|{: ^7.3f}|{: ^7.3f}|{: ^7.3f}|{: ^10s}|{: ^9d}"
        self.iteration = 0