Пример #1
0
 async def add_restriction(rest):
     """Adds a restriction to be evaluated"""
     if not exp_restrictions.add(rest):
         return  # already explored
     if agame.is_pure_restriction(rest):
         # Short circuit for pure restriction
         return await add_deviations(rest, rest.astype(float),
                                     init_role_dev)
     data = await agame.get_restricted_game(rest)
     reqa = await loop.run_in_executor(
         executor,
         functools.partial(nash.mixed_equilibria,
                           data,
                           regret_thresh=regret_thresh,
                           dist_thresh=dist_thresh,
                           style=style,
                           processes=1))
     if reqa.size:
         eqa = restrict.translate(
             data.trim_mixture_support(reqa, thresh=support_thresh), rest)
         await asyncio.gather(
             *[add_deviations(rest, eqm, init_role_dev) for eqm in eqa])
     else:
         logging.warning(
             "couldn't find equilibria in %s with restriction %s. This is "
             'likely due to high variance in payoffs which means '
             'quiesce should be re-run with more samples per profile. '
             'This could also be fixed by performing a more expensive '
             'equilibria search to always return one.', agame,
             agame.restriction_to_repr(rest))
Пример #2
0
 def _rprofs(self, rest):
     """Get the restricted profiles for a restriction"""
     return restrict.translate(
         self._red.expand_profiles(
             rsgame.empty_copy(self).restrict(rest),
             self._rgame.restrict(rest).all_profiles()),
         rest)
Пример #3
0
async def run(args):
    """Brute force entry point"""
    sched = await schedspec.parse_scheduler(args.scheduler)
    red, red_players = utils.parse_reduction(sched, args)

    rest = (
        np.ones(sched.num_strats, bool)
        if args.restrict is None
        else sched.restriction_from_json(json.load(args.restrict))
    )

    async with sched:
        data = await schedgame.schedgame(sched, red, red_players).get_deviation_game(
            rest
        )

    # now find equilibria
    eqa = sched.trim_mixture_support(
        restrict.translate(
            nash.mixed_nash(
                data.restrict(rest),
                regret_thresh=args.regret_thresh,
                dist_thresh=args.dist_thresh,
                at_least_one=args.one,
                min_reg=args.min_reg,
            ),
            rest,
        ),
        thresh=args.supp_thresh,
    )
    reg_info = []
    for eqm in eqa:
        gains = regret.mixture_deviation_gains(data, eqm)
        bri = np.argmax(gains)
        reg_info.append((gains[bri],) + sched.role_strat_names[bri])

    logging.error(
        "brute sampling finished finding %d equilibria:\n%s",
        eqa.shape[0],
        "\n".join(
            "{:d}) {} with regret {:g} to {} {}".format(
                i, sched.mixture_to_repr(eqm), reg, role, strat
            )
            for i, (eqm, (reg, role, strat)) in enumerate(zip(eqa, reg_info), 1)
        ),
    )

    json.dump(
        [
            {
                "equilibrium": sched.mixture_to_json(eqm),
                "regret": reg,
                "best_response": {"role": role, "strat": strat},
            }
            for eqm, (reg, role, strat) in zip(eqa, reg_info)
        ],
        args.output,
    )
    args.output.write("\n")
Пример #4
0
def test_rbfgame_restriction(base):  # pylint: disable=too-many-locals
    """Test rbf game restriction"""
    game = gamegen.gen_num_profiles(base, 13)
    reggame = learning.rbfgame_train(game)

    rest = game.random_restriction()
    rreg = reggame.restrict(rest)

    subpays = rreg.payoffs()
    fullpays = reggame.get_payoffs(restrict.translate(rreg.profiles(),
                                                      rest))[:, rest]
    assert np.allclose(subpays, fullpays)

    mix = rreg.random_mixture()
    sub_dev_profs = rreg.random_role_deviation_profiles(20, mix)
    sub_pays = rreg.get_dev_payoffs(sub_dev_profs)
    pays = reggame.get_dev_payoffs(restrict.translate(sub_dev_profs,
                                                      rest))[:, rest]
    assert np.allclose(sub_pays, pays)

    for mix in rreg.random_mixtures(20):
        dev_pay = rreg.deviation_payoffs(mix)
        full_pay = reggame.deviation_payoffs(restrict.translate(mix,
                                                                rest))[rest]
        assert np.allclose(dev_pay, full_pay)

    assert rreg.min_strat_payoffs().shape == (rreg.num_strats, )
    assert rreg.max_strat_payoffs().shape == (rreg.num_strats, )

    jgame = json.dumps(rreg.to_json())
    copy = learning.rbfgame_json(json.loads(jgame))
    assert hash(copy) == hash(rreg)
    assert copy == rreg

    rrest = rreg.random_restriction()
    rrreg = rreg.restrict(rrest)

    assert rrreg.min_strat_payoffs().shape == (rrreg.num_strats, )
    assert rrreg.max_strat_payoffs().shape == (rrreg.num_strats, )

    jgame = json.dumps(rrreg.to_json())
    copy = learning.rbfgame_json(json.loads(jgame))
    assert hash(copy) == hash(rrreg)
    assert copy == rrreg
Пример #5
0
 def dev_profs(red_players, full_players, mask, rst):
     """Deviation profiles for a particular role"""
     rgame = rsgame.empty(red_players, support)
     sub_profs = restrict.translate(rgame.all_profiles(), rest)
     game = rsgame.empty(full_players, full_game.num_role_strats)
     non_devs = hierarchical.expand_profiles(game, sub_profs)
     ndevs = np.sum(~mask)
     devs = np.zeros((ndevs, full_game.num_strats), int)
     devs[:, rst:rst + mask.size][:, ~mask] = np.eye(ndevs, dtype=int)
     profs = non_devs[:, None] + devs
     profs.shape = (-1, full_game.num_strats)
     return profs
 def dev_profs(red_players, full_players, mask, rst):
     """Deviation profiles for a particular role"""
     rgame = rsgame.empty(red_players, support)
     sub_profs = restrict.translate(rgame.all_profiles(), rest)
     game = rsgame.empty(full_players, full_game.num_role_strats)
     non_devs = hierarchical.expand_profiles(game, sub_profs)
     ndevs = np.sum(~mask)
     devs = np.zeros((ndevs, full_game.num_strats), int)
     devs[:, rst:rst + mask.size][:, ~mask] = np.eye(ndevs, dtype=int)
     profs = non_devs[:, None] + devs
     profs.shape = (-1, full_game.num_strats)
     return profs
Пример #7
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     payoffs = np.zeros(profiles.shape)
     for i, (off, scale, reg) in enumerate(zip(
             self._offset, self._scale, self._regressors)):
         mask = profiles[..., i] > 0
         profs = profiles[mask]
         profs[:, i] -= 1
         if profs.size:
             payoffs[mask, i] = reg.predict(restrict.translate(
                 profs, self._rest)).ravel() * scale + off
     return payoffs
Пример #8
0
    def get_dev_payoffs(self, dev_profs):
        """Compute the payoff for deviating

        This implementation is more efficient than the default since we don't
        need to compute the payoff for non deviators."""
        prof_view = np.rollaxis(restrict.translate(dev_profs.reshape(
            (-1, self.num_roles, self.num_strats)), self._rest), 1, 0)
        payoffs = np.empty(dev_profs.shape[:-2] + (self.num_strats,))
        pay_view = payoffs.reshape((-1, self.num_strats)).T
        for pays, profs, reg in zip(
                pay_view, utils.repeat(prof_view, self.num_role_strats),
                self._regressors):
            np.copyto(pays, reg.predict(profs))
        return payoffs * self._scale + self._offset
Пример #9
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     payoffs = np.zeros(profiles.shape)
     for i, (off, scale, reg) in enumerate(
             zip(self._offset, self._scale, self._regressors)):
         mask = profiles[..., i] > 0
         profs = profiles[mask]
         profs[:, i] -= 1
         if profs.size:
             payoffs[mask,
                     i] = reg.predict(restrict.translate(
                         profs, self._rest)).ravel() * scale + off
     return payoffs
Пример #10
0
async def test_basic_asyncgame():
    """Test that wrapped async games work"""
    game = gamegen.game([4, 3], [3, 4])
    agame = asyncgame.wrap(game)
    rest = agame.random_restriction()
    rgame = await agame.get_restricted_game(rest)
    assert rgame.is_complete()
    assert rsgame.empty_copy(rgame) == rsgame.empty_copy(game.restrict(rest))

    dgame = await agame.get_deviation_game(rest)
    mix = restrict.translate(rgame.random_mixture(), rest)
    assert not np.isnan(dgame.deviation_payoffs(mix)).any()

    dup = asyncgame.wrap(game)
    assert hash(dup) == hash(agame)
    assert dup == agame
Пример #11
0
    def get_dev_payoffs(self, dev_profs):
        """Compute the payoff for deviating

        This implementation is more efficient than the default since we don't
        need to compute the payoff for non deviators."""
        prof_view = np.rollaxis(
            restrict.translate(
                dev_profs.reshape((-1, self.num_roles, self.num_strats)),
                self._rest), 1, 0)
        payoffs = np.empty(dev_profs.shape[:-2] + (self.num_strats, ))
        pay_view = payoffs.reshape((-1, self.num_strats)).T
        for pays, profs, reg in zip(
                pay_view, utils.repeat(prof_view, self.num_role_strats),
                self._regressors):
            np.copyto(pays, reg.predict(profs))
        return payoffs * self._scale + self._offset
Пример #12
0
async def test_mix_asyncgame():
    """Test that that mixture async games work"""
    game0 = gamegen.game([4, 3], [3, 4])
    game1 = gamegen.game([4, 3], [3, 4])
    agame = asyncgame.mix(asyncgame.wrap(game0), asyncgame.wrap(game1), 0.4)
    assert agame.get_game() == rsgame.mix(game0, game1, 0.4)
    assert str(agame) == "{} - 0.4 - {}".format(repr(game0), repr(game1))

    rest = agame.random_restriction()
    rgame = await agame.get_restricted_game(rest)
    assert rgame.is_complete()
    assert rsgame.empty_copy(rgame) == rsgame.empty_copy(game0.restrict(rest))

    dgame = await agame.get_deviation_game(rest)
    mix = restrict.translate(rgame.random_mixture(), rest)
    assert not np.isnan(dgame.deviation_payoffs(mix)).any()

    dup = asyncgame.mix(asyncgame.wrap(game0), asyncgame.wrap(game1), 0.4)
    assert hash(dup) == hash(agame)
    assert dup == agame
Пример #13
0
def test_missing_data_maximal_restrictions(base, prob):
    """Test missing data"""
    game = gamegen.game_replace(base, prob)
    rests = restrict.maximal_restrictions(game)

    if rests.size:
        maximal = np.all(rests <= rests[:, None], -1)
        np.fill_diagonal(maximal, False)
        assert not maximal.any(), \
            'One maximal restriction dominated another'

    for rest in rests:
        rgame = rsgame.empty_copy(game).restrict(rest)
        restprofs = restrict.translate(rgame.all_profiles(), rest)
        assert all(p in game for p in restprofs), \
            "Maximal restriction didn't have all profiles"
        for dev in np.nonzero(~rest)[0]:
            devprofs = restrict.additional_strategy_profiles(
                game, rest, dev)
            assert not all(p in game for p in devprofs), (  # pragma: no branch
                'Maximal restriction could be bigger {} {}'.format(
                    dev, rest))
Пример #14
0
def test_translate():
    """Test translate"""
    prof = np.arange(6) + 1
    rest = np.array([1, 0, 0, 1, 1, 0, 1, 1, 0, 1], bool)
    expected = [1, 0, 0, 2, 3, 0, 4, 5, 0, 6]
    assert np.all(expected == restrict.translate(prof, rest))
Пример #15
0
def main(args):  # pylint: disable=too-many-statements,too-many-branches,too-many-locals
    """Entry point for analysis"""
    game = gamereader.load(args.input)

    if args.dpr is not None:
        red_players = game.role_from_repr(args.dpr, dtype=int)
        game = reduction.deviation_preserving.reduce_game(game, red_players)
    elif args.hr is not None:
        red_players = game.role_from_repr(args.hr, dtype=int)
        game = reduction.hierarchical.reduce_game(game, red_players)

    if args.dominance:
        domsub = dominance.iterated_elimination(game, 'strictdom')
        game = game.restrict(domsub)

    if args.restrictions:
        restrictions = restrict.maximal_restrictions(game)
    else:
        restrictions = np.ones((1, game.num_strats), bool)

    noeq_restrictions = []
    candidates = []
    for rest in restrictions:
        rgame = game.restrict(rest)
        reqa = nash.mixed_equilibria(rgame,
                                     style=args.style,
                                     regret_thresh=args.regret_thresh,
                                     dist_thresh=args.dist_thresh,
                                     processes=args.processes)
        eqa = restrict.translate(
            rgame.trim_mixture_support(reqa, thresh=args.support), rest)
        if eqa.size:
            candidates.extend(eqa)
        else:
            noeq_restrictions.append(rest)

    equilibria = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
    unconfirmed = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
    unexplored = {}
    for eqm in candidates:
        support = eqm > 0
        # FIXME This treats trimming support differently than quiesce does,
        # which means quiesce could find an equilibria, and this would fail to
        # find it.
        gains = regret.mixture_deviation_gains(game, eqm)
        role_gains = np.fmax.reduceat(gains, game.role_starts)
        gain = np.nanmax(role_gains)

        if np.isnan(gains).any() and gain <= args.regret_thresh:
            # Not fully explored but might be good
            unconfirmed.add(eqm, gain)

        elif np.any(role_gains > args.regret_thresh):
            # There are deviations, did we explore them?
            dev_inds = ([
                np.argmax(gs == mg) for gs, mg in zip(
                    np.split(gains, game.role_starts[1:]), role_gains)
            ] + game.role_starts)[role_gains > args.regret_thresh]
            for dind in dev_inds:
                devsupp = support.copy()
                devsupp[dind] = True
                if not np.all(devsupp <= restrictions, -1).any():
                    ind = restrict.to_id(game, devsupp)
                    old_info = unexplored.get(ind, (0, 0, 0, None))
                    new_info = (gains[dind], dind, old_info[2] + 1, eqm)
                    unexplored[ind] = max(new_info, old_info)

        else:
            # Equilibrium!
            equilibria.add(eqm, np.max(gains))

    # Output Game
    args.output.write('Game Analysis\n')
    args.output.write('=============\n')
    args.output.write(str(game))
    args.output.write('\n\n')
    if args.dpr is not None:
        args.output.write('With deviation preserving reduction: ')
        args.output.write(args.dpr.replace(';', ' '))
        args.output.write('\n\n')
    elif args.hr is not None:
        args.output.write('With hierarchical reduction: ')
        args.output.write(args.hr.replace(';', ' '))
        args.output.write('\n\n')
    if args.dominance:
        num = np.sum(~domsub)
        if num:
            args.output.write('Found {:d} dominated strateg{}\n'.format(
                num, 'y' if num == 1 else 'ies'))
            args.output.write(game.restriction_to_str(~domsub))
            args.output.write('\n\n')
        else:
            args.output.write('Found no dominated strategies\n\n')
    if args.restrictions:
        num = restrictions.shape[0]
        if num:
            args.output.write(
                'Found {:d} maximal complete restricted game{}\n\n'.format(
                    num, '' if num == 1 else 's'))
        else:
            args.output.write('Found no complete restricted games\n\n')
    args.output.write('\n')

    # Output social welfare
    args.output.write('Social Welfare\n')
    args.output.write('--------------\n')
    welfare, profile = regret.max_pure_social_welfare(game)
    if profile is None:
        args.output.write('There was no profile with complete payoff data\n\n')
    else:
        args.output.write('\nMaximum social welfare profile:\n')
        args.output.write(game.profile_to_str(profile))
        args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))

        if game.num_roles > 1:
            for role, welfare, profile in zip(
                    game.role_names,
                    *regret.max_pure_social_welfare(game, by_role=True)):
                args.output.write(
                    'Maximum "{}" welfare profile:\n'.format(role))
                args.output.write(game.profile_to_str(profile))
                args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))

    args.output.write('\n')

    # Output Equilibria
    args.output.write('Equilibria\n')
    args.output.write('----------\n')
    if equilibria:
        args.output.write('Found {:d} equilibri{}\n\n'.format(
            len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
        for i, (eqm, reg) in enumerate(equilibria, 1):
            args.output.write('Equilibrium {:d}:\n'.format(i))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\nRegret: {:.4f}\n\n'.format(reg))
    else:
        args.output.write('Found no equilibria\n\n')
    args.output.write('\n')

    # Output No-equilibria Subgames
    args.output.write('No-equilibria Subgames\n')
    args.output.write('----------------------\n')
    if noeq_restrictions:
        args.output.write(
            'Found {:d} no-equilibria restricted game{}\n\n'.format(
                len(noeq_restrictions),
                '' if len(noeq_restrictions) == 1 else 's'))
        noeq_restrictions.sort(key=lambda x: x.sum())
        for i, subg in enumerate(noeq_restrictions, 1):
            args.output.write(
                'No-equilibria restricted game {:d}:\n'.format(i))
            args.output.write(game.restriction_to_str(subg))
            args.output.write('\n\n')
    else:
        args.output.write('Found no no-equilibria restricted games\n\n')
    args.output.write('\n')

    # Output Unconfirmed Candidates
    args.output.write('Unconfirmed Candidate Equilibria\n')
    args.output.write('--------------------------------\n')
    if unconfirmed:
        args.output.write('Found {:d} unconfirmed candidate{}\n\n'.format(
            len(unconfirmed), '' if len(unconfirmed) == 1 else 's'))
        ordered = sorted((sum(e > 0 for e in m), r, m) for m, r in unconfirmed)
        for i, (_, reg_bound, eqm) in enumerate(ordered, 1):
            args.output.write('Unconfirmed candidate {:d}:\n'.format(i))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write(
                '\nRegret at least: {:.4f}\n\n'.format(reg_bound))
    else:
        args.output.write('Found no unconfirmed candidate equilibria\n\n')
    args.output.write('\n')

    # Output Unexplored Subgames
    args.output.write('Unexplored Best-response Subgames\n')
    args.output.write('---------------------------------\n')
    if unexplored:
        min_supp = min(restrict.from_id(game, sid).sum() for sid in unexplored)
        args.output.write(
            'Found {:d} unexplored best-response restricted game{}\n'.format(
                len(unexplored), '' if len(unexplored) == 1 else 's'))
        args.output.write(
            'Smallest unexplored restricted game has support {:d}\n\n'.format(
                min_supp))

        ordered = sorted((
            restrict.from_id(game, sind).sum(),
            -gain,
            dev,
            restrict.from_id(game, sind),
            eqm,
        ) for sind, (gain, dev, _, eqm) in unexplored.items())
        for i, (_, ngain, dev, sub, eqm) in enumerate(ordered, 1):
            args.output.write('Unexplored restricted game {:d}:\n'.format(i))
            args.output.write(game.restriction_to_str(sub))
            args.output.write('\n{:.4f} for deviating to {} from:\n'.format(
                -ngain, game.strat_name(dev)))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\n\n')
    else:
        args.output.write(
            'Found no unexplored best-response restricted games\n\n')
    args.output.write('\n')

    # Output json data
    args.output.write('Json Data\n')
    args.output.write('=========\n')
    json_data = {
        'equilibria': [game.mixture_to_json(eqm) for eqm, _ in equilibria]
    }
    json.dump(json_data, args.output)
    args.output.write('\n')
Пример #16
0
def main(args): # pylint: disable=too-many-statements,too-many-branches,too-many-locals
    """Entry point for analysis"""
    game = gamereader.load(args.input)

    if args.dpr is not None:
        red_players = game.role_from_repr(args.dpr, dtype=int)
        game = reduction.deviation_preserving.reduce_game(game, red_players)
    elif args.hr is not None:
        red_players = game.role_from_repr(args.hr, dtype=int)
        game = reduction.hierarchical.reduce_game(game, red_players)

    if args.dominance:
        domsub = dominance.iterated_elimination(game, 'strictdom')
        game = game.restrict(domsub)

    if args.restrictions:
        restrictions = restrict.maximal_restrictions(game)
    else:
        restrictions = np.ones((1, game.num_strats), bool)

    noeq_restrictions = []
    candidates = []
    for rest in restrictions:
        rgame = game.restrict(rest)
        reqa = nash.mixed_equilibria(
            rgame, style=args.style, regret_thresh=args.regret_thresh,
            dist_thresh=args.dist_thresh, processes=args.processes)
        eqa = restrict.translate(rgame.trim_mixture_support(
            reqa, thresh=args.support), rest)
        if eqa.size:
            candidates.extend(eqa)
        else:
            noeq_restrictions.append(rest)

    equilibria = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
    unconfirmed = collect.mcces(args.dist_thresh * np.sqrt(2 * game.num_roles))
    unexplored = {}
    for eqm in candidates:
        support = eqm > 0
        # FIXME This treats trimming support differently than quiesce does,
        # which means quiesce could find an equilibria, and this would fail to
        # find it.
        gains = regret.mixture_deviation_gains(game, eqm)
        role_gains = np.fmax.reduceat(gains, game.role_starts)
        gain = np.nanmax(role_gains)

        if np.isnan(gains).any() and gain <= args.regret_thresh:
            # Not fully explored but might be good
            unconfirmed.add(eqm, gain)

        elif np.any(role_gains > args.regret_thresh):
            # There are deviations, did we explore them?
            dev_inds = ([np.argmax(gs == mg) for gs, mg
                         in zip(np.split(gains, game.role_starts[1:]),
                                role_gains)] +
                        game.role_starts)[role_gains > args.regret_thresh]
            for dind in dev_inds:
                devsupp = support.copy()
                devsupp[dind] = True
                if not np.all(devsupp <= restrictions, -1).any():
                    ind = restrict.to_id(game, devsupp)
                    old_info = unexplored.get(ind, (0, 0, 0, None))
                    new_info = (gains[dind], dind, old_info[2] + 1, eqm)
                    unexplored[ind] = max(new_info, old_info)

        else:
            # Equilibrium!
            equilibria.add(eqm, np.max(gains))

    # Output Game
    args.output.write('Game Analysis\n')
    args.output.write('=============\n')
    args.output.write(str(game))
    args.output.write('\n\n')
    if args.dpr is not None:
        args.output.write('With deviation preserving reduction: ')
        args.output.write(args.dpr.replace(';', ' '))
        args.output.write('\n\n')
    elif args.hr is not None:
        args.output.write('With hierarchical reduction: ')
        args.output.write(args.hr.replace(';', ' '))
        args.output.write('\n\n')
    if args.dominance:
        num = np.sum(~domsub)
        if num:
            args.output.write('Found {:d} dominated strateg{}\n'.format(
                num, 'y' if num == 1 else 'ies'))
            args.output.write(game.restriction_to_str(~domsub))
            args.output.write('\n\n')
        else:
            args.output.write('Found no dominated strategies\n\n')
    if args.restrictions:
        num = restrictions.shape[0]
        if num:
            args.output.write(
                'Found {:d} maximal complete restricted game{}\n\n'.format(
                    num, '' if num == 1 else 's'))
        else:
            args.output.write('Found no complete restricted games\n\n')
    args.output.write('\n')

    # Output social welfare
    args.output.write('Social Welfare\n')
    args.output.write('--------------\n')
    welfare, profile = regret.max_pure_social_welfare(game)
    if profile is None:
        args.output.write('There was no profile with complete payoff data\n\n')
    else:
        args.output.write('\nMaximum social welfare profile:\n')
        args.output.write(game.profile_to_str(profile))
        args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))

        if game.num_roles > 1:
            for role, welfare, profile in zip(
                    game.role_names,
                    *regret.max_pure_social_welfare(game, by_role=True)):
                args.output.write('Maximum "{}" welfare profile:\n'.format(
                    role))
                args.output.write(game.profile_to_str(profile))
                args.output.write('\nWelfare: {:.4f}\n\n'.format(welfare))

    args.output.write('\n')

    # Output Equilibria
    args.output.write('Equilibria\n')
    args.output.write('----------\n')
    if equilibria:
        args.output.write('Found {:d} equilibri{}\n\n'.format(
            len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
        for i, (eqm, reg) in enumerate(equilibria, 1):
            args.output.write('Equilibrium {:d}:\n'.format(i))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\nRegret: {:.4f}\n\n'.format(reg))
    else:
        args.output.write('Found no equilibria\n\n')
    args.output.write('\n')

    # Output No-equilibria Subgames
    args.output.write('No-equilibria Subgames\n')
    args.output.write('----------------------\n')
    if noeq_restrictions:
        args.output.write(
            'Found {:d} no-equilibria restricted game{}\n\n'.format(
                len(noeq_restrictions),
                '' if len(noeq_restrictions) == 1 else 's'))
        noeq_restrictions.sort(key=lambda x: x.sum())
        for i, subg in enumerate(noeq_restrictions, 1):
            args.output.write(
                'No-equilibria restricted game {:d}:\n'.format(i))
            args.output.write(game.restriction_to_str(subg))
            args.output.write('\n\n')
    else:
        args.output.write('Found no no-equilibria restricted games\n\n')
    args.output.write('\n')

    # Output Unconfirmed Candidates
    args.output.write('Unconfirmed Candidate Equilibria\n')
    args.output.write('--------------------------------\n')
    if unconfirmed:
        args.output.write('Found {:d} unconfirmed candidate{}\n\n'.format(
            len(unconfirmed), '' if len(unconfirmed) == 1 else 's'))
        ordered = sorted(
            (sum(e > 0 for e in m), r, m) for m, r in unconfirmed)
        for i, (_, reg_bound, eqm) in enumerate(ordered, 1):
            args.output.write('Unconfirmed candidate {:d}:\n'.format(i))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\nRegret at least: {:.4f}\n\n'.format(
                reg_bound))
    else:
        args.output.write('Found no unconfirmed candidate equilibria\n\n')
    args.output.write('\n')

    # Output Unexplored Subgames
    args.output.write('Unexplored Best-response Subgames\n')
    args.output.write('---------------------------------\n')
    if unexplored:
        min_supp = min(restrict.from_id(game, sid).sum() for sid in unexplored)
        args.output.write(
            'Found {:d} unexplored best-response restricted game{}\n'.format(
                len(unexplored), '' if len(unexplored) == 1 else 's'))
        args.output.write(
            'Smallest unexplored restricted game has support {:d}\n\n'.format(
                min_supp))

        ordered = sorted((
            restrict.from_id(game, sind).sum(),
            -gain, dev,
            restrict.from_id(game, sind),
            eqm,
        ) for sind, (gain, dev, _, eqm) in unexplored.items())
        for i, (_, ngain, dev, sub, eqm) in enumerate(ordered, 1):
            args.output.write('Unexplored restricted game {:d}:\n'.format(i))
            args.output.write(game.restriction_to_str(sub))
            args.output.write('\n{:.4f} for deviating to {} from:\n'.format(
                -ngain, game.strat_name(dev)))
            args.output.write(game.mixture_to_str(eqm))
            args.output.write('\n\n')
    else:
        args.output.write(
            'Found no unexplored best-response restricted games\n\n')
    args.output.write('\n')

    # Output json data
    args.output.write('Json Data\n')
    args.output.write('=========\n')
    json_data = {
        'equilibria': [game.mixture_to_json(eqm) for eqm, _ in equilibria]}
    json.dump(json_data, args.output)
    args.output.write('\n')