Esempio n. 1
0
def test_random_trace_equilibria(base):
    """Test random equilibrium trace"""
    game0 = gamegen.poly_aggfn(base.num_role_players, base.num_role_strats, 6)
    game1 = gamegen.poly_aggfn(base.num_role_players, base.num_role_strats, 6)

    eqa = game0.trim_mixture_support(nash.mixed_nash(
        game0, regret_thresh=1e-4))
    for eqm in eqa:
        if regret.mixture_regret(game0, eqm) > 1e-3:
            # trimmed equilibrium had too high of regret...
            continue  # pragma: no cover
        probs, mixes = trace.trace_equilibrium(game0, game1, 0, eqm, 1)
        for prob, mix in zip(probs, mixes):
            reg = regret.mixture_regret(rsgame.mix(game0, game1, prob), mix)
            assert reg <= 1.1e-3

    eqa = game1.trim_mixture_support(nash.mixed_nash(
        game1, regret_thresh=1e-4))
    for eqm in eqa:
        if regret.mixture_regret(game1, eqm) > 1e-3:
            # trimmed equilibrium had too high of regret...
            continue  # pragma: no cover
        probs, mixes = trace.trace_equilibrium(game0, game1, 1, eqm, 0)
        for prob, mix in zip(probs, mixes):
            reg = regret.mixture_regret(rsgame.mix(game0, game1, prob), mix)
            assert reg <= 1.1e-3
Esempio n. 2
0
def test_at_least_one():
    # Equilibrium of game is not at a starting point for equilibria finding
    game = gamegen.sym_2p2s_known_eq(1/math.sqrt(2))
    # Don't converge
    opts = {'max_iters': 0}
    eqa = nash.mixed_nash(game, processes=1, replicator=opts)
    assert eqa.size == 0, "found an equilibrium normally"
    eqa = nash.mixed_nash(game, replicator=opts, processes=1,
                          at_least_one=True)
    assert eqa.shape[0] == 1, "at_least_one didn't return anything"
Esempio n. 3
0
def test_mixed_roshambo(methods):
    game = gamegen.rock_paper_scissors()
    eqa = nash.mixed_nash(game, dist_thresh=1e-2, processes=1, **methods)
    assert eqa.shape[0] == 1, \
        "didn't find right number of equilibria in roshambo"
    assert np.allclose(1/3, eqa), \
        "roshambo equilibria wasn't uniform"
Esempio n. 4
0
def test_hard_roshambo():
    game = gamegen.rock_paper_scissors(loss=[-2, -3, -3])
    eqa = nash.mixed_nash(game)
    assert eqa.shape[0] == 1, \
        "didn't find right number of equilibria in roshambo"
    assert np.allclose([0.3125, 0.40625, 0.28125], eqa), \
        "roshambo equilibria wasn't uniform"
Esempio n. 5
0
async def run(args):
    """Brute force entry point"""
    sched = await schedspec.parse_scheduler(args.scheduler)
    red, red_players = utils.parse_reduction(sched, args)

    rest = (
        np.ones(sched.num_strats, bool)
        if args.restrict is None
        else sched.restriction_from_json(json.load(args.restrict))
    )

    async with sched:
        data = await schedgame.schedgame(sched, red, red_players).get_deviation_game(
            rest
        )

    # now find equilibria
    eqa = sched.trim_mixture_support(
        restrict.translate(
            nash.mixed_nash(
                data.restrict(rest),
                regret_thresh=args.regret_thresh,
                dist_thresh=args.dist_thresh,
                at_least_one=args.one,
                min_reg=args.min_reg,
            ),
            rest,
        ),
        thresh=args.supp_thresh,
    )
    reg_info = []
    for eqm in eqa:
        gains = regret.mixture_deviation_gains(data, eqm)
        bri = np.argmax(gains)
        reg_info.append((gains[bri],) + sched.role_strat_names[bri])

    logging.error(
        "brute sampling finished finding %d equilibria:\n%s",
        eqa.shape[0],
        "\n".join(
            "{:d}) {} with regret {:g} to {} {}".format(
                i, sched.mixture_to_repr(eqm), reg, role, strat
            )
            for i, (eqm, (reg, role, strat)) in enumerate(zip(eqa, reg_info), 1)
        ),
    )

    json.dump(
        [
            {
                "equilibrium": sched.mixture_to_json(eqm),
                "regret": reg,
                "best_response": {"role": role, "strat": strat},
            }
            for eqm, (reg, role, strat) in zip(eqa, reg_info)
        ],
        args.output,
    )
    args.output.write("\n")
Esempio n. 6
0
def test_old_nash():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, processes=2)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    assert np.allclose(eqm, [prob, 1 - prob], atol=1e-3)
Esempio n. 7
0
def test_old_nash():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, processes=2)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    assert np.allclose(eqm, [prob, 1 - prob], atol=1e-3)
Esempio n. 8
0
def test_fixed_point_always_eq(_):
    num_roles = np.random.randint(1, 4)
    players = np.random.randint(2, 5, num_roles)
    strategies = np.random.randint(2, 5, num_roles)
    functions = np.random.randint(2, 8)
    agame = agggen.random_aggfn(players, strategies, functions)
    eqa = nash.mixed_nash(agame, fixedpoint=None)
    assert eqa.size, "didn't find equilibrium but should always find one"
Esempio n. 9
0
def test_mixed_known_eq(methods, eq_prob):
    game = gamegen.sym_2p2s_known_eq(eq_prob)
    eqa = nash.mixed_nash(game, processes=1, **methods)
    assert eqa.shape[0] >= 1, "didn't find equilibrium"
    expected = [eq_prob, 1 - eq_prob]
    assert np.isclose(eqa, expected, atol=1e-3, rtol=1e-3).all(1).any(), \
        "didn't find correct equilibrium {} instead of {}".format(
            eqa, expected)
Esempio n. 10
0
def test_old_nash_at_least_one():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, replicator=dict(max_iters=0), at_least_one=True)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    assert np.allclose(eqm, [prob, 1 - prob], atol=1e-3)
Esempio n. 11
0
def test_old_nash_min_reg():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, replicator=dict(max_iters=0), min_reg=True)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    reg = regret.mixture_regret(game, eqm)
    assert reg > 1e-3
Esempio n. 12
0
def test_old_nash_min_reg():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, replicator=dict(max_iters=0), min_reg=True)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    reg = regret.mixture_regret(game, eqm)
    assert reg > 1e-3
Esempio n. 13
0
def main(args):
    """Entry point for learning script"""
    with warnings.catch_warnings(record=True) as warns:
        game = learning.rbfgame_train(gamereader.load(args.input))
    methods = {'replicator': {'max_iters': args.max_iters,
                              'converge_thresh': args.converge_thresh},
               'optimize': {}}

    mixed_equilibria = game.trim_mixture_support(
        nash.mixed_nash(game, regret_thresh=args.regret_thresh,
                        dist_thresh=args.dist_thresh, processes=args.processes,
                        at_least_one=args.one, **methods),
        thresh=args.supp_thresh)

    equilibria = [(eqm, regret.mixture_regret(game, eqm))
                  for eqm in mixed_equilibria]

    # Output game
    args.output.write('Game Learning\n')
    args.output.write('=============\n')
    args.output.write(str(game))
    args.output.write('\n\n')

    if any(w.category == UserWarning and
           w.message.args[0] == (
               'some lengths were at their bounds, this may indicate a poor '
               'fit') for w in warns):
        args.output.write('Warning\n')
        args.output.write('=======\n')
        args.output.write(
            'Some length scales were at their limit. This is a strong\n'
            'indication that a good representation was not found.\n')
        args.output.write('\n\n')

    # Output Equilibria
    args.output.write('Equilibria\n')
    args.output.write('----------\n')

    if equilibria:
        args.output.write('Found {:d} equilibri{}\n\n'.format(
            len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
        for i, (eqm, reg) in enumerate(equilibria, 1):
            args.output.write('Equilibrium {:d}:\n'.format(i))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\nRegret: {:.4f}\n\n'.format(reg))
    else:
        args.output.write('Found no equilibria\n\n')
    args.output.write('\n')

    # Output json data
    args.output.write('Json Data\n')
    args.output.write('=========\n')
    json_data = {
        'equilibria': [game.mixture_to_json(eqm) for eqm, _ in equilibria]}
    json.dump(json_data, args.output)
    args.output.write('\n')
Esempio n. 14
0
def test_old_nash_at_least_one():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game,
                          replicator=dict(max_iters=0),
                          at_least_one=True)
    assert eqa.shape == (1, 2)
    eqm, = eqa
    assert np.allclose(eqm, [prob, 1 - prob], atol=1e-3)
Esempio n. 15
0
def test_mixed_prisoners_dilemma(methods):
    game = gamegen.prisoners_dilemma()
    eqa = nash.mixed_nash(game, dist_thresh=1e-3, processes=1, **methods)

    assert eqa.shape[0] >= 1, \
        "didn't find at least one equilibria in pd {}".format(eqa)
    assert all(regret.mixture_regret(game, eqm) < 1e-3 for eqm in eqa), \
        "returned equilibria with high regret"
    expected = [0, 1]
    assert np.isclose(eqa, expected, atol=1e-3, rtol=1e-3).all(1).any(), \
        "didn't find pd equilibrium {}".format(eqa)
Esempio n. 16
0
def test_hard_nash():
    with open('test/hard_nash_game_1.json') as f:
        game, conv = gameio.read_game(json.load(f))
    eqa = nash.mixed_nash(game)
    expected = conv.from_prof_json({
        'background': {
            'markov:rmin_30000_rmax_30000_thresh_0.001_priceVarEst_1e6':
            0.5407460907477768,
            'markov:rmin_500_rmax_1000_thresh_0.8_priceVarEst_1e9':
            0.45925390925222315
        },
        'hft': {
            'trend:trendLength_5_profitDemanded_50_expiration_50': 1.0
        }
    })
    assert np.isclose(game.trim_mixture_support(eqa), expected,
                      atol=1e-4, rtol=1e-4).all(1).any(), \
        "Didn't find equilibrium in known hard instance"
Esempio n. 17
0
def main(args):
    game, serial = gameio.read_game(json.load(args.input))

    if args.type == 'pure':
        equilibria = nash.pure_nash(game, args.regret)
        if args.one and not equilibria:
            equilibria = nash.min_regret_profile(game)[None]

    elif args.type == 'mixed':
        rep_args = {
            'max_iters': args.max_iterations,
            'converge_thresh': args.convergence
        }
        equilibria = nash.mixed_nash(game, args.regret, args.distance,
                                     random_restarts=args.random_mixtures,
                                     grid_points=args.grid_points,
                                     at_least_one=args.one,
                                     processes=args.processes,
                                     replicator=rep_args, optimize={})
        equilibria = game.trim_mixture_support(equilibria, args.support)

    elif args.type == 'min-reg-prof':
        equilibria = nash.min_regret_profile(game)[None]

    elif args.type == 'min-reg-grid':
        equilibria = nash.min_regret_grid_mixture(
            game, args.grid_points)[None]
        equilibria = game.trim_mixture_support(equilibria, args.support)

    elif args.type == 'min-reg-rand':
        equilibria = nash.min_regret_rand_mixture(
            game, args.random_mixtures)[None]
        equilibria = game.trim_mixture_support(equilibria, args.support)

    elif args.type == 'rand':
        equilibria = game.random_mixtures(args.random_mixtures)
        equilibria = game.trim_mixture_support(equilibria, args.support)

    else:
        raise ValueError('Unknown command given: {0}'.format(args.type))  # pragma: no cover # noqa

    json.dump([serial.to_prof_json(eqm) for eqm in equilibria], args.output)
    args.output.write('\n')
Esempio n. 18
0
def test_random_trace_interpolate(game0, game1): # pylint: disable=too-many-locals
    """Test random trace interpolation"""
    prob = np.random.random()
    eqa = game0.trim_mixture_support(nash.mixed_nash(
        rsgame.mix(game0, game1, prob),
        regret_thresh=1e-4))
    for eqm in eqa:
        if regret.mixture_regret(rsgame.mix(game0, game1, prob), eqm) > 1e-3:
            # trimmed equilibrium had too high of regret...
            continue  # pragma: no cover

        for target in [0, 1]:
            # Test that interpolate recovers missing equilibria
            probs, mixes = trace.trace_equilibrium(
                game0, game1, prob, eqm, target)
            if probs.size < 3:
                # not enough to test leave one out
                continue # pragma: no cover

            start, interp, end = np.sort(np.random.choice(
                probs.size, 3, replace=False))
            interp_mix, = trace.trace_interpolate(
                game0, game1, [probs[start], probs[end]],
                [mixes[start], mixes[end]], [probs[interp]])
            assert np.allclose(interp_mix, mixes[interp], rtol=1e-2, atol=2e-2)

            # Test interp at first
            mix, = trace.trace_interpolate(
                game0, game1, probs, mixes, [probs[0]])
            assert np.allclose(mix, mixes[0], rtol=1e-2, atol=2e-2)

            # Test interp at last
            mix, = trace.trace_interpolate(
                game0, game1, probs, mixes, [probs[-1]])
            assert np.allclose(mix, mixes[-1], rtol=1e-2, atol=2e-2)

            # Test random t
            p_interp = np.random.uniform(probs[0], probs[-1])
            mix, = trace.trace_interpolate(
                game0, game1, probs, mixes, [p_interp])
            assert regret.mixture_regret(rsgame.mix(
                game0, game1, p_interp), mix) <= 1.1e-3
Esempio n. 19
0
 def analyze_subgame(unsched_subgames, sub):
     """Process a subgame"""
     if sub.is_complete():
         subg = sub.get_subgame()
         sub_eqa = nash.mixed_nash(subg, regret_thresh=regret_thresh)
         eqa = subgame.translate(subg.trim_mixture_support(sub_eqa),
                                 sub.subgame_mask)
         if eqa.size == 0:  # No equilibria
             if sub.counts < reschedule_limit * observation_increment:
                 log.info(
                     'Found no equilibria in subgame:\n%s\n',
                     json.dumps(
                         {r: list(s) for r, s
                          in serial.to_prof_json(sub.subgame_mask).items()},
                         indent=2))
                 sub.update_counts(sub.counts + observation_increment)
                 unsched_subgames.append(sub)
             else:
                 log.error(
                     'Failed to find equilibria in subgame:\n%s\n',
                     json.dumps(
                         {r: list(s)
                          for r, s in serial.to_prof_json(subm).items()},
                         indent=2))
         else:
             log.debug(
                 'Found candidate equilibria:\n%s\nin subgame:\n%s\n',
                 json.dumps(list(map(serial.to_prof_json, eqa)), indent=2),
                 json.dumps(
                     {r: list(s) for r, s in
                      serial.to_prof_json(sub.subgame_mask).items()},
                     indent=2))
             if all_devs:
                 for eqm in eqa:
                     add_mixture(eqm)
             else:
                 for eqm in eqa:
                     add_mixture(eqm, 0)
     else:
         unsched_subgames.append(sub)
Esempio n. 20
0
def test_old_nash_failure():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, replicator=dict(max_iters=0))
    assert not eqa.size
Esempio n. 21
0
def test_nash_finding(players, facilities, required):
    """Test that nash works on congestion games"""
    cgame = congestion.gen_congestion_game(players, facilities, required)
    eqa = nash.mixed_nash(cgame)
    assert eqa.size > 0, "didn't find any equilibria"
Esempio n. 22
0
def test_nash_finding(players, strategies, functions, by_role):
    game = agggen.random_aggfn(players, strategies, functions, by_role=by_role)
    eqa = nash.mixed_nash(game)
    assert eqa.size > 0, "didn't find any equilibria"
Esempio n. 23
0
def main(args):
    game, serial = gameio.read_game(json.load(args.input))

    # create gpgame
    lgame = gpgame.PointGPGame(game)

    # mixed strategy nash equilibria search
    methods = {
        'replicator': {
            'max_iters': args.max_iters,
            'converge_thresh': args.converge_thresh}}

    mixed_equilibria = game.trim_mixture_support(
        nash.mixed_nash(lgame, regret_thresh=args.regret_thresh,
                        dist_thresh=args.dist_thresh, processes=args.processes,
                        at_least_one=True, **methods),
        args.supp_thresh)

    equilibria = [(eqm, regret.mixture_regret(lgame, eqm))
                  for eqm in mixed_equilibria]

    # Output game
    args.output.write('Game Learning\n')
    args.output.write('=============\n')
    args.output.write(game.to_str(serial))
    args.output.write('\n\n')

    # Output social welfare
    args.output.write('Social Welfare\n')
    args.output.write('--------------\n')
    welfare, profile = game.get_max_social_welfare()
    if profile is None:
        args.output.write('There was no profile with complete payoff data\n\n')
    else:
        args.output.write('\nMaximum social welfare profile:\n')
        args.output.write(serial.to_prof_printstring(profile))
        args.output.write('Welfare: {:.4f}\n\n'.format(welfare))

        if game.num_roles > 1:
            for role, welfare, profile in zip(
                    serial.role_names, *game.get_max_social_welfare(True)):
                args.output.write('Maximum "{}" welfare profile:\n'.format(
                    role))
                args.output.write(serial.to_prof_printstring(profile))
                args.output.write('Welfare: {:.4f}\n\n'.format(welfare))

    args.output.write('\n')

    # Output Equilibria
    args.output.write('Equilibria\n')
    args.output.write('----------\n')

    if equilibria:
        args.output.write('Found {:d} equilibri{}\n\n'.format(
            len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
        for i, (eqm, reg) in enumerate(equilibria, 1):
            args.output.write('Equilibrium {:d}:\n'.format(i))
            args.output.write(serial.to_prof_printstring(eqm))
            args.output.write('Regret: {:.4f}\n\n'.format(reg))
    else:
        args.output.write('Found no equilibria\n\n')
    args.output.write('\n')

    # Output json data
    args.output.write('Json Data\n')
    args.output.write('=========\n')
    json_data = {
        'equilibria': [serial.to_prof_json(eqm) for eqm, _ in equilibria]}
    json.dump(json_data, args.output)
    args.output.write('\n')
Esempio n. 24
0
def test_mixed_nash(methods, strategies):
    game = gamegen.role_symmetric_game(1, strategies)
    eqa = nash.mixed_nash(game, at_least_one=True, processes=1, **methods)
    assert eqa.size > 0, "Didn't find an equilibria with at_least_one on"
Esempio n. 25
0
def main(args):
    game = gpgame.PointGPGame(pickle.load(args.input))
    serial = pickle.load(args.input)
    equilibria = nash.mixed_nash(game, replicator=None)
    json.dump([serial.to_mix_json(eqm) for eqm in equilibria], args.output)
    args.output.write('\n')
Esempio n. 26
0
def test_old_nash_failure():
    """Test old nash functions appropriately"""
    prob = 1 / np.sqrt(2)
    game = gamegen.sym_2p2s_known_eq(prob)
    eqa = nash.mixed_nash(game, replicator=dict(max_iters=0))
    assert not eqa.size
Esempio n. 27
0
def main(args):
    game, serial = gameio.read_game(json.load(args.input))

    if args.dpr:
        red_players = serial.from_role_json(dict(zip(
            args.dpr[::2], map(int, args.dpr[1::2]))))
        red = reduction.DeviationPreserving(game.num_strategies,
                                            game.num_players, red_players)
        redgame = red.reduce_game(game, True)
    else:
        redgame = game
    redserial = serial

    if args.dominance:
        domsub = dominance.iterated_elimination(redgame, 'strictdom')
        redgame = subgame.subgame(redgame, domsub)
        redserial = subgame.subserializer(redserial, domsub)

    if args.subgames:
        subgames = subgame.maximal_subgames(redgame)
    else:
        subgames = np.ones(redgame.num_role_strats, bool)[None]

    methods = {
        'replicator': {
            'max_iters': args.max_iters,
            'converge_thresh': args.converge_thresh},
        'optimize': {}}
    noeq_subgames = []
    candidates = []
    for submask in subgames:
        subg = subgame.subgame(redgame, submask)
        subeqa = nash.mixed_nash(
            subg, regret_thresh=args.regret_thresh,
            dist_thresh=args.dist_thresh, processes=args.processes, **methods)
        eqa = subgame.translate(subg.trim_mixture_support(
            subeqa, supp_thresh=args.supp_thresh), submask)
        if eqa.size:
            for eqm in eqa:
                if not any(linalg.norm(eqm - eq) < args.dist_thresh
                           for eq in candidates):
                    candidates.append(eqm)
        else:
            noeq_subgames.append(submask)  # pragma: no cover

    equilibria = []
    unconfirmed = []
    unexplored = []
    for eqm in candidates:
        support = eqm > 0
        gains = regret.mixture_deviation_gains(redgame, eqm)
        role_gains = redgame.role_reduce(gains, ufunc=np.fmax)
        gain = np.nanmax(role_gains)

        if np.isnan(gains).any() and gain <= args.regret_thresh:
            # Not fully explored but might be good
            unconfirmed.append((eqm, gain))

        elif np.any(role_gains > args.regret_thresh):
            # There are deviations, did we explore them?
            dev_inds = ([np.argmax(gs == mg) for gs, mg
                         in zip(redgame.role_split(gains), role_gains)] +
                        redgame.role_starts)[role_gains > args.regret_thresh]
            for dind in dev_inds:
                devsupp = support.copy()
                devsupp[dind] = True
                if not np.all(devsupp <= subgames, -1).any():
                    unexplored.append((devsupp, dind, gains[dind], eqm))

        else:
            # Equilibrium!
            equilibria.append((eqm, np.max(gains)))

    # Output Game
    args.output.write('Game Analysis\n')
    args.output.write('=============\n')
    args.output.write(serial.to_game_printstr(game))
    args.output.write('\n\n')
    if args.dpr is not None:
        args.output.write('With DPR reduction: ')
        args.output.write(' '.join(args.dpr))
        args.output.write('\n\n')
    if args.dominance:
        num = np.sum(~domsub)
        if num:
            args.output.write('Found {:d} dominated strateg{}\n'.format(
                num, 'y' if num == 1 else 'ies'))
            args.output.write(serial.to_subgame_printstr(~domsub))
            args.output.write('\n')
        else:
            args.output.write('Found no dominated strategies\n\n')
    if args.subgames:
        num = subgames.shape[0]
        if num:
            args.output.write(
                'Found {:d} maximal complete subgame{}\n\n'.format(
                    num, '' if num == 1 else 's'))
        else:
            args.output.write('Found no complete subgames\n\n')
    args.output.write('\n')

    # Output social welfare
    args.output.write('Social Welfare\n')
    args.output.write('--------------\n')
    welfare, profile = regret.max_pure_social_welfare(game)
    if profile is None:
        args.output.write('There was no profile with complete payoff data\n\n')
    else:
        args.output.write('\nMaximum social welfare profile:\n')
        args.output.write(serial.to_prof_printstr(profile))
        args.output.write('Welfare: {:.4f}\n\n'.format(welfare))

        if game.num_roles > 1:
            for role, welfare, profile in zip(
                    serial.role_names,
                    *regret.max_pure_social_welfare(game, True)):
                args.output.write('Maximum "{}" welfare profile:\n'.format(
                    role))
                args.output.write(serial.to_prof_printstr(profile))
                args.output.write('Welfare: {:.4f}\n\n'.format(welfare))

    args.output.write('\n')

    # Output Equilibria
    args.output.write('Equilibria\n')
    args.output.write('----------\n')
    if equilibria:
        args.output.write('Found {:d} equilibri{}\n\n'.format(
            len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
        for i, (eqm, reg) in enumerate(equilibria, 1):
            args.output.write('Equilibrium {:d}:\n'.format(i))
            args.output.write(redserial.to_mix_printstr(eqm))
            args.output.write('Regret: {:.4f}\n\n'.format(reg))
    else:
        args.output.write('Found no equilibria\n\n')  # pragma: no cover
    args.output.write('\n')

    # Output No-equilibria Subgames
    args.output.write('No-equilibria Subgames\n')
    args.output.write('----------------------\n')
    if noeq_subgames:  # pragma: no cover
        args.output.write('Found {:d} no-equilibria subgame{}\n\n'.format(
            len(noeq_subgames), '' if len(noeq_subgames) == 1 else 's'))
        noeq_subgames.sort(key=lambda x: x.sum())
        for i, subg in enumerate(noeq_subgames, 1):
            args.output.write('No-equilibria subgame {:d}:\n'.format(i))
            args.output.write(redserial.to_subgame_printstr(subg))
            args.output.write('\n')
    else:
        args.output.write('Found no no-equilibria subgames\n\n')
    args.output.write('\n')

    # Output Unconfirmed Candidates
    args.output.write('Unconfirmed Candidate Equilibria\n')
    args.output.write('--------------------------------\n')
    if unconfirmed:
        args.output.write('Found {:d} unconfirmed candidate{}\n\n'.format(
            len(unconfirmed), '' if len(unconfirmed) == 1 else 's'))
        unconfirmed.sort(key=lambda x: ((x[0] > 0).sum(), x[1]))
        for i, (eqm, reg_bound) in enumerate(unconfirmed, 1):
            args.output.write('Unconfirmed candidate {:d}:\n'.format(i))
            args.output.write(redserial.to_mix_printstr(eqm))
            args.output.write('Regret at least: {:.4f}\n\n'.format(reg_bound))
    else:
        args.output.write('Found no unconfirmed candidate equilibria\n\n')
    args.output.write('\n')

    # Output Unexplored Subgames
    args.output.write('Unexplored Best-response Subgames\n')
    args.output.write('---------------------------------\n')
    if unexplored:
        min_supp = min(supp.sum() for supp, _, _, _ in unexplored)
        args.output.write(
            'Found {:d} unexplored best-response subgame{}\n'.format(
                len(unexplored), '' if len(unexplored) == 1 else 's'))
        args.output.write(
            'Smallest unexplored subgame has support {:d}\n\n'.format(
                min_supp))

        unexplored.sort(key=lambda x: (x[0].sum(), -x[2]))
        for i, (sub, dev, gain, eqm) in enumerate(unexplored, 1):
            args.output.write('Unexplored subgame {:d}:\n'.format(i))
            args.output.write(redserial.to_subgame_printstr(sub))
            args.output.write('{:.4f} for deviating to {} from:\n'.format(
                gain, redserial.strat_name(dev)))
            args.output.write(redserial.to_mix_printstr(eqm))
            args.output.write('\n')
    else:
        args.output.write('Found no unexplored best-response subgames\n\n')
    args.output.write('\n')

    # Output json data
    args.output.write('Json Data\n')
    args.output.write('=========\n')
    json_data = {
        'equilibria': [redserial.to_mix_json(eqm) for eqm, _ in equilibria]}
    json.dump(json_data, args.output)
    args.output.write('\n')