コード例 #1
0
def check_output(capfd, request):
    """
    Pytest fixture to compare output (stdout) with stored text file.

    Output might depend on
    installed packages, might need to be adjusted to make test work on all platforms.
    If a test fails, the actual output is copied to a file called <testname>.new, so it should
    be easy to accept changes by `mv expected_output/<testname>.new expected_output/<testname>`.
    """
    # reset verbosity, because might have been modified, this is just paranoia
    output.set_verbosity(WARNING)

    yield

    # reset verbosity, examples modify the verbosity level
    output.set_verbosity(WARNING)

    stdout = capfd.readouterr().out
    test_name = request.node.name
    fname = Path(__file__).parent / "expected_output" / test_name
    try:
        with open(fname, encoding="utf8") as file:
            expected_output = file.read()
        expected_output = remove_algorithm_info(str(expected_output))
    except FileNotFoundError:
        expected_output = None

    stdout = remove_solver_output(str(stdout))
    stdout = remove_algorithm_info(stdout)

    if expected_output != stdout:
        with open(f"{fname}.new", "w", encoding="utf8") as file:
            file.write(stdout)

    assert expected_output == stdout, f"Unexpected output, output written to {fname}.new"
コード例 #2
0
ファイル: properties.py プロジェクト: martinlackner/abcvoting
def full_analysis(profile, committee):
    """
    Test all implemented properties for the given committee.

    Returns a dictionary with the following keys: "pareto", "jr", "pjr", and "ejr".
    The values are `True` or `False`, depending on whether this property is satisfied.

    Parameters
    ----------
    profile : abcvoting.preferences.Profile
        A profile.
    committee : iterable of int
        A committee.

    Returns
    -------
    dict
    """
    results = {}

    # temporarily no output
    current_verbosity = output.verbosity
    output.set_verbosity(WARNING)

    results["pareto"] = check_pareto_optimality(profile, committee)
    results["jr"] = check_JR(profile, committee)
    results["pjr"] = check_PJR(profile, committee)
    results["ejr"] = check_EJR(profile, committee)

    description = {
        "pareto": "Pareto optimality",
        "jr": "Justified representation (JR)",
        "pjr": "Proportional justified representation (PJR)",
        "ejr": "Extended justified representation (EJR)",
    }

    # restore output verbosity
    output.set_verbosity(current_verbosity)

    for prop, value in results.items():
        output.info(f"{description[prop]:50s} : {value}")
コード例 #3
0
"""
Example 2.5 (PAV, seq-PAV, revseq-PAV).

From "Multi-Winner Voting with Approval Preferences"
by Martin Lackner and Piotr Skowron
https://arxiv.org/abs/2007.01795
"""

from abcvoting import abcrules
from abcvoting.preferences import Profile, Voter
from abcvoting import misc
from abcvoting.output import output, DETAILS

output.set_verbosity(DETAILS)

print(misc.header("Example 5", "*"))

# Approval profile
num_cand = 4
a, b, c, d = range(4)  # a = 0, b = 1, c = 2, ...
cand_names = "abcd"

approval_sets = [[a, b]] * 3 + [[a, d]] * 6 + [[b]] * 4 + [[c]] * 5 + [[c, d]
                                                                       ] * 5
profile = Profile(num_cand, cand_names=cand_names)
profile.add_voters(approval_sets)

print(misc.header("Input:"))
print(profile.str_compact())

committees_pav = abcrules.compute_pav(profile, 2)
コード例 #4
0
"""
Compute winning committees with PAV.
"""

from abcvoting.preferences import Profile
from abcvoting import abcrules
from abcvoting.output import output, INFO

output.set_verbosity(INFO)

profile = Profile(num_cand=5)
profile.add_voters([{0, 1, 2}, {0, 1}, {0, 1}, {1, 2}, {3, 4}, {3, 4}])
committeesize = 3
print(f"Computing winning committees of size {committeesize}\n"
      f"with the Proportional Approval Voting (PAV) rule\n"
      f"given the following {profile}")
committees = abcrules.compute_pav(profile, committeesize)
コード例 #5
0
"""
Unit tests for abcvoting/properties.py.
"""

import pytest
import os

import abcvoting.misc
from abcvoting.output import DETAILS, output
from abcvoting.preferences import Profile
from abcvoting import abcrules, properties, fileio

# set verbosity to DETAILS to increase unittest coverage
output.set_verbosity(verbosity=DETAILS)


# Test from literature: Lackner and Skowron 2020
# With given input profile, committee returned by Monroe Rule
# is not Pareto optimal
@pytest.mark.parametrize(
    "algorithm",
    ["brute-force",
     pytest.param("gurobi", marks=pytest.mark.gurobipy)])
def test_pareto_optimality_methods(algorithm):
    # profile with 4 candidates: a, b, c, d
    profile = Profile(4)

    # add voters in the profile
    profile.add_voters([[0]] * 2 + [[0, 2]] + [[0, 3]] + [[1, 2]] * 10 +
                       [[1, 3]] * 10)
コード例 #6
0
ファイル: test_abcrules.py プロジェクト: lumbric/abcvoting
def test_output(capfd, rule_id, algorithm, resolute, verbosity):
    if algorithm == "fastest":
        return
        # not necessary, output for "fastest" is the same as
        # whatever algorithm is selected as fastest
        # (and "fastest" depends on the available solvers)

    if algorithm == "cvxpy_glpk_mi":
        # TODO unfortunately GLPK_MI prints "Long-step dual simplex will be used" to stderr and it
        #  would be very complicated to capture this on all platforms reliably, changing
        #  sys.stderr doesn't help.
        #  This seems to be fixed in GLPK 5.0 but not in GLPK 4.65. For some weird reason this
        #  test succeeds and does not need to be skipped when using conda-forge, although the
        #  version from conda-forge is given as glpk 4.65 he80fd80_1002.
        #  This could help to introduce a workaround: https://github.com/xolox/python-capturer
        #  Sage math is fighting the same problem: https://trac.sagemath.org/ticket/24824
        pytest.skip("GLPK_MI prints something to stderr, not easy to capture")

    output.set_verbosity(verbosity=verbosity)

    try:
        profile = Profile(2)
        profile.add_voters([[0]])
        committeesize = 1

        committees = abcrules.compute(
            rule_id, profile, committeesize, algorithm=algorithm, resolute=resolute
        )
        out = str(capfd.readouterr().out)

        # remove unwanted solver output
        out = remove_solver_output(out)

        if verbosity >= WARNING:
            assert out == ""
        else:
            assert len(out) > 0
            rule = abcrules.get_rule(rule_id)
            start_output = misc.header(rule.longname) + "\n"
            if resolute and rule.resolute_values[0] == False:
                # only if irresolute is default but resolute is chosen
                start_output += "Computing only one winning committee (resolute=True)\n\n"
            if not resolute and rule.resolute_values[0] == True:
                # only if resolute is default but resolute=False is chosen
                start_output += (
                    "Computing all possible winning committees for any tiebreaking order\n"
                    " (aka parallel universes tiebreaking) (resolute=False)\n\n"
                )
            if verbosity <= DETAILS:
                start_output += "Algorithm: " + abcrules.ALGORITHM_NAMES[algorithm] + "\n"
            if verbosity <= DEBUG:
                assert start_output in out
            else:
                assert out.startswith(start_output)
            end_output = (
                f"{misc.str_committees_header(committees, winning=True)}\n"
                f"{misc.str_sets_of_candidates(committees, cand_names=profile.cand_names)}\n"
            )
            if verbosity == INFO:
                assert out.endswith(end_output)
            else:
                assert end_output in out

    finally:
        output.set_verbosity(verbosity=WARNING)