Exemple #1
0
def rbfgame_json(json):
    """Read an rbf game from json"""
    utils.check(json['type'].split('.', 1)[0] == 'rbf', 'incorrect type')
    base = rsgame.empty_json(json)

    offsets = base.payoff_from_json(json['offsets'])
    coefs = base.payoff_from_json(json['coefs'])

    lengths = np.empty((base.num_strats, ) * 2)
    for role, strats in json['lengths'].items():
        for strat, pay in strats.items():
            ind = base.role_strat_index(role, strat)
            base.payoff_from_json(pay, lengths[ind])

    profiles = [None] * base.num_strats
    for role, strats in json['profiles'].items():
        for strat, profs in strats.items():
            ind = base.role_strat_index(role, strat)
            profiles[ind] = np.stack(
                [base.profile_from_json(p, verify=False) for p in profs])

    alphas = [None] * base.num_strats
    for role, strats in json['alphas'].items():
        for strat, alph in strats.items():
            ind = base.role_strat_index(role, strat)
            alphas[ind] = np.array(alph)

    sizes = np.fromiter(  # pragma: no branch
        (a.size for a in alphas), int, base.num_strats)

    return _RbfGpGame(base.role_names, base.strat_names, base.num_role_players,
                      offsets, coefs, lengths, sizes, np.concatenate(profiles),
                      np.concatenate(alphas))
Exemple #2
0
 async def sample_payoffs(self, profile):
     gu.check(self._is_open, "not open")
     self._check_fetcher()
     hprof = gu.hash_array(profile)
     data = self._profiles.setdefault(
         hprof, ([0], [0], [0], [None], asyncio.Queue()))
     scheduled, _, claimed, prof_id, pays = data
     claimed[0] += 1
     if scheduled[0] < claimed[0]:
         scheduled[0] += self._simult_obs
         async with self._sched_lock:
             for _ in range(self._simult_obs):
                 await self._scheduled.acquire()
             pid = prof_id[0]
             if pid is not None:
                 await self._sched.remove_profile(pid)
             assignment = self._game.profile_to_repr(profile)
             prof_id[0] = (await
                           self._sched.add_profile(assignment,
                                                   scheduled[0]))["id"]
             if pid is None:
                 self._prof_ids[prof_id[0]] = data
     pay = await pays.get()
     self._check_fetcher()
     return pay
Exemple #3
0
def parse_index_spec(game, spec):
    """Parse restriction index specification"""
    rest = np.zeros(game.num_strats, bool)
    rest[list(map(int, spec.split(',')))] = True
    utils.check(game.is_restriction(rest),
                '"{}" does not define a valid restriction', spec)
    return rest
Exemple #4
0
def dump(game, filelike):
    """Dump game to gambit file"""
    utils.check(game.is_complete(), 'gambit games must be complete')
    game = matgame.matgame_copy(game)
    filelike.write('NFG 1 R "gameanalysis game"\n{ ')

    for role in game.role_names:
        filelike.write('"')
        filelike.write(role.replace('"', '\\"'))
        filelike.write('" ')
    filelike.write('}\n{\n')
    for strats in game.strat_names:
        filelike.write('  { ')
        for strat in strats:
            filelike.write('"')
            filelike.write(strat.replace('"', '\\"'))
            filelike.write('" ')
        filelike.write('}\n')
    filelike.write('}\n\n{\n')

    perm = tuple(range(game.num_roles - 1, -1, -1)) + (game.num_roles, )
    pays = np.transpose(game.payoff_matrix(), perm)
    for outcome in pays.reshape((-1, game.num_roles)):
        filelike.write('  { "" ')
        filelike.write(', '.join(map(str, outcome)))
        filelike.write(' }\n')
    filelike.write('}\n1')
    for i in range(2, game.num_profiles + 1):
        filelike.write(' ')
        filelike.write(str(i))
Exemple #5
0
    async def aopen(self):  # pylint: disable=too-many-locals
        """Open the eosched"""
        gu.check(not self._is_open, "already open")
        try:
            game = await self._api.get_game(self._game_id)
            obs = await game.get_observations()
            gu.check(
                rsgame.empty_copy(self._game) == rsgame.empty_json(obs),
                "egtaonline game didn't match specified game",
            )
            conf = dict(obs.get("configuration", ()) or ())
            profiles = obs.get("profiles", ()) or ()

            # Parse profiles
            num_profs = len(profiles)
            num_pays = 0
            for jprof in profiles:
                pid = jprof["id"]
                prof, spays = self._game.profsamplepay_from_json(jprof)
                spays.setflags(write=False)
                hprof = gu.hash_array(prof)
                pays = asyncio.Queue()
                num_spays = len(spays)
                num_pays += num_spays
                for pay in spays:
                    pays.put_nowait(pay)
                data = ([num_spays], [num_spays], [0], [pid], pays)
                self._profiles[hprof] = data
                self._prof_ids[pid] = data
            logging.info(
                "found %d existing profiles with %d payoffs in game %d",
                num_profs,
                num_pays,
                self._game_id,
            )

            # Create and start scheduler
            self._sched = await obs.create_generic_scheduler(
                "egta_" + eu.random_string(20),
                True,
                self._obs_memory,
                self._obs_time,
                self._simult_obs,
                1,
                conf,
            )
            logging.warning(
                "created scheduler %d for running simulations of game %d: "
                "https://%s/generic_schedulers/%d",
                self._sched["id"],
                self._game_id,
                self._api.domain,
                self._sched["id"],
            )
            self._fetcher = asyncio.ensure_future(self._fetch())
            self._is_open = True
        except Exception as ex:
            await self.aclose()
            raise ex
        return self
Exemple #6
0
    def open(self):
        """Open the zip scheduler"""
        utils.check(not self._is_open, "can't be open")
        try:
            self._num = 0
            self._sim_dir = tempfile.TemporaryDirectory()
            self._prof_dir = tempfile.TemporaryDirectory()
            with zipfile.ZipFile(self.zipf) as zfil:
                zfil.extractall(self._sim_dir.name)
            sim_files = [
                d for d in os.listdir(self._sim_dir.name)
                if d not in {"__MACOSX"}
            ]
            utils.check(
                len(sim_files) == 1,
                "improper zip format, only one file should exist in root",
            )
            self._sim_root = os.path.join(self._sim_dir.name, sim_files[0])
            os.chmod(os.path.join(self._sim_root, "script", "batch"), 0o700)

            with open(os.path.join(self._sim_root, "defaults.json")) as fil:
                self._base["configuration"] = json.load(fil).get(
                    "configuration", {})
            self._base["configuration"].update(self.conf)

            self._is_open = True
        except Exception as ex:
            self.close()
            raise ex
Exemple #7
0
def neighbor_json(json):
    """Read neighbor game from json"""
    utils.check(
        json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type')
    return _NeighborDeviationGame(
        gamereader.loadj(json['model']),
        num_neighbors=json.get('neighbors', json.get('devs', None)))
Exemple #8
0
def rock_paper_scissors(win=1, loss=-1):
    """Return an instance of rock paper scissors"""
    if isinstance(win, abc.Iterable):
        win = list(win)
    else:
        win = [win] * 3
    if isinstance(loss, abc.Iterable):
        loss = list(loss)
    else:
        loss = [loss] * 3
    utils.check(
        all(l < 0 for l in loss) and all(w > 0 for w in win) and len(loss) == 3
        and len(win) == 3,
        'win must be greater than 0 and loss must be less than zero')
    profiles = [[2, 0, 0],
                [1, 1, 0],
                [1, 0, 1],
                [0, 2, 0],
                [0, 1, 1],
                [0, 0, 2]]
    payoffs = [[0., 0., 0.],
               [loss[0], win[0], 0.],
               [win[1], 0., loss[1]],
               [0., 0., 0.],
               [0., loss[2], win[2]],
               [0., 0., 0.]]
    return paygame.game_names(['all'], 2, [['paper', 'rock', 'scissors']],
                              profiles, payoffs)
Exemple #9
0
def dump(game, filelike):
    """Dump game to gambit file"""
    utils.check(game.is_complete(), 'gambit games must be complete')
    game = matgame.matgame_copy(game)
    filelike.write('NFG 1 R "gameanalysis game"\n{ ')

    for role in game.role_names:
        filelike.write('"')
        filelike.write(role.replace('"', '\\"'))
        filelike.write('" ')
    filelike.write('}\n{\n')
    for strats in game.strat_names:
        filelike.write('  { ')
        for strat in strats:
            filelike.write('"')
            filelike.write(strat.replace('"', '\\"'))
            filelike.write('" ')
        filelike.write('}\n')
    filelike.write('}\n\n{\n')

    perm = tuple(range(game.num_roles - 1, -1, -1)) + (game.num_roles,)
    pays = np.transpose(game.payoff_matrix(), perm)
    for outcome in pays.reshape((-1, game.num_roles)):
        filelike.write('  { "" ')
        filelike.write(', '.join(map(str, outcome)))
        filelike.write(' }\n')
    filelike.write('}\n1')
    for i in range(2, game.num_profiles + 1):
        filelike.write(' ')
        filelike.write(str(i))
Exemple #10
0
def aggfn_json(json):  # pylint: disable=too-many-locals
    """Read an Aggfn from json

    Json versions of the game will generally have 'type': 'aggfn...' in them,
    but as long as the proper fields exist, this will succeed."""
    base = rsgame.empty_json(json)

    _, version = json.get('type', '.3').split('.', 1)
    utils.check(version == '3',
                'parsing versions below 3 is currently unsupported')

    num_functions = len(json['function_tables'])
    function_inputs = np.empty((base.num_strats, num_functions), bool)
    action_weights = np.empty((num_functions, base.num_strats))
    function_table = np.empty((num_functions, ) +
                              tuple(base.num_role_players + 1))
    offsets = np.empty(base.num_strats)

    base.payoff_from_json(json.get('offsets', {}), offsets)

    for inps, jinps in zip(function_inputs.T, json['function_inputs']):
        base.restriction_from_json(jinps, inps, verify=False)

    for weights, jweights in zip(action_weights, json['action_weights']):
        base.payoff_from_json(jweights, weights)

    function_table.fill(0)
    for table, jtable in zip(function_table, json['function_tables']):
        for elem in jtable:
            copy = elem.copy()
            value = copy.pop('value')
            table[tuple(int(i) for i in base.role_from_json(copy))] = value

    return aggfn_replace(base, action_weights, function_inputs, function_table,
                         offsets)
Exemple #11
0
 def __init__(self, model_game):
     super().__init__(model_game.role_names, model_game.strat_names,
                      model_game.num_role_players)
     utils.check(
         model_game.is_complete(),
         'deviation models must be complete games')
     self.model = model_game
Exemple #12
0
def parse_sorted(red, game):
    """Parser reduction input for roles in sorted order"""
    players = red.split(',')
    utils.check(
        len(players) == game.num_roles,
        'Must input a reduced count for every role')
    return np.fromiter(map(int, players), int, len(players))
Exemple #13
0
def reduce_game(full_game, red_players):
    """Reduce a game using hierarchical reduction

    Parameters
    ----------
    full_game : Game
        The game to reduce.
    red_players : ndarray-like
        The reduced number of players for each role. This will be coerced
        into the proper shape if necessary.
    """
    red_game = rsgame.empty_names(full_game.role_names, red_players,
                                  full_game.strat_names)
    utils.check(np.all(red_game.num_role_players > 0),
                'all reduced players must be greater than zero')
    utils.check(
        np.all(full_game.num_role_players >= red_game.num_role_players),
        'all full counts must not be less than reduced counts')

    if full_game.is_empty():
        return red_game
    elif full_game.num_profiles < red_game.num_all_profiles:
        profiles = full_game.profiles()
        payoffs = full_game.payoffs()
    else:
        profiles = expand_profiles(full_game, red_game.all_profiles())
        payoffs = full_game.get_payoffs(profiles)
        valid = ~np.all(np.isnan(payoffs) | (profiles == 0), 1)
        profiles = profiles[valid]
        payoffs = payoffs[valid]

    red_profiles, mask = _common.reduce_profiles(
        full_game, red_game.num_role_players[None], profiles)
    return paygame.game_replace(red_game, red_profiles, payoffs[mask])
Exemple #14
0
def rbfgame_json(json):
    """Read an rbf game from json"""
    utils.check(json['type'].split('.', 1)[0] == 'rbf', 'incorrect type')
    base = rsgame.empty_json(json)

    offsets = base.payoff_from_json(json['offsets'])
    coefs = base.payoff_from_json(json['coefs'])

    lengths = np.empty((base.num_strats,) * 2)
    for role, strats in json['lengths'].items():
        for strat, pay in strats.items():
            ind = base.role_strat_index(role, strat)
            base.payoff_from_json(pay, lengths[ind])

    profiles = [None] * base.num_strats
    for role, strats in json['profiles'].items():
        for strat, profs in strats.items():
            ind = base.role_strat_index(role, strat)
            profiles[ind] = np.stack([
                base.profile_from_json(p, verify=False) for p in profs])

    alphas = [None] * base.num_strats
    for role, strats in json['alphas'].items():
        for strat, alph in strats.items():
            ind = base.role_strat_index(role, strat)
            alphas[ind] = np.array(alph)

    sizes = np.fromiter(  # pragma: no branch
        (a.size for a in alphas), int, base.num_strats)

    return _RbfGpGame(
        base.role_names, base.strat_names, base.num_role_players, offsets,
        coefs, lengths, sizes, np.concatenate(profiles),
        np.concatenate(alphas))
Exemple #15
0
def mix(agame0, agame1, prob):
    """Mix two async games"""
    utils.check(
        rsgame.empty_copy(agame0) == rsgame.empty_copy(agame1),
        "games must have identically structure",
    )
    return _MixedAsyncGame(agame0, agame1, prob)
Exemple #16
0
 def __init__(self, model, num_samples=100):
     super().__init__(model)
     utils.check(num_samples > 0, 'num samples must be greater than 0')
     # TODO It might be interesting to play with a sample schedule, i.e.
     # change the number of samples based off of the query number to
     # deviation payoffs (i.e. reduce variance as we get close to
     # convergence)
     self.num_samples = num_samples
Exemple #17
0
 def __init__(self, model, num_samples=100):
     super().__init__(model)
     utils.check(num_samples > 0, 'num samples must be greater than 0')
     # TODO It might be interesting to play with a sample schedule, i.e.
     # change the number of samples based off of the query number to
     # deviation payoffs (i.e. reduce variance as we get close to
     # convergence)
     self.num_samples = num_samples
Exemple #18
0
def trace_interpolate(game0, game1, peqs, eqa, targets, **kwargs):  # pylint: disable=too-many-locals
    """Get an equilibrium at a specific time

    Parameters
    ----------
    game0 : RsGame
        The game to get data from when the mixture probability is 0.
    game1 : RsGame
        The game to get data from when the mixture probability is 1.
    peqs : [float]
        A parallel list of probabilities for each equilibria in a continuous
        trace.
    eqa : [eqm]
        A parallel list of equilibria for each probability representing
        continuous equilibria for prob mixture games.
    targets : [float]
        The probabilities to compute an equilibria at.
    kwargs : options
        The same options as `trace_equilibrium`.
    """
    peqs = np.asarray(peqs, float)
    eqa = np.asarray(eqa, float)
    targets = np.asarray(targets, float)

    # Make everything sorted
    if np.all(np.diff(peqs) <= 0):
        peqs = peqs[::-1]
        eqa = eqa[::-1]
    order = np.argsort(targets)
    targets = targets[order]

    utils.check(np.all(np.diff(peqs) >= 0),
                'trace probabilities must be sorted')
    utils.check(peqs[0] <= targets[0] and targets[-1] <= peqs[-1],
                'targets must be internal to trace')

    result = np.empty((targets.size, game0.num_strats))
    scan = zip(utils.subsequences(peqs), utils.subsequences(eqa))
    (pi1, pi2), (eqm1, eqm2) = next(scan)
    for target, i in zip(targets, order):
        while target > pi2:
            (pi1, pi2), (eqm1, eqm2) = next(scan)
        (*_, pt1), (*_, eqt1) = trace_equilibrium(  # pylint: disable=too-many-star-expressions
            game0, game1, pi1, eqm1, target, **kwargs)
        (*_, pt2), (*_, eqt2) = trace_equilibrium(  # pylint: disable=too-many-star-expressions
            game0, game1, pi2, eqm2, target, **kwargs)
        if np.isclose(pt1, target) and np.isclose(pt2, target):
            mixgame = rsgame.mix(game0, game1, target)
            _, _, result[i] = min(
                (regret.mixture_regret(mixgame, eqt1), 0, eqt1),
                (regret.mixture_regret(mixgame, eqt2), 1, eqt2))
        elif np.isclose(pt1, target):
            result[i] = eqt1
        elif np.isclose(pt2, target):
            result[i] = eqt2
        else:  # pragma: no cover
            raise ValueError('ode solving failed to reach prob')
    return result
Exemple #19
0
def translate(profiles, rest):
    """Translate a strategy object to the full game"""
    utils.check(profiles.shape[-1] == rest.sum(),
                'profiles must be valid for the restriction')
    if rest.all():
        return profiles
    new_profs = np.zeros(profiles.shape[:-1] + (rest.size, ), profiles.dtype)
    new_profs[..., rest] = profiles
    return new_profs
Exemple #20
0
def reduce_game(full_game, red_players):  # pylint: disable=too-many-locals
    """Reduce a game using deviation preserving reduction

    Parameters
    ----------
    full_game : Game
        The game to reduce.
    red_players : ndarray-like
        The reduced number of players for each role. This will be coerced
        into the proper shape if necessary.
    """
    red_game = rsgame.empty_names(full_game.role_names, red_players,
                                  full_game.strat_names)
    utils.check(
        np.all((red_game.num_role_players > 1)
               | (full_game.num_role_players == 1)),
        'all reduced players must be greater than zero')
    utils.check(
        np.all(full_game.num_role_players >= red_game.num_role_players),
        'all full counts must not be less than reduced counts')

    if full_game.is_empty():
        return red_game
    elif full_game.num_profiles < red_game.num_all_dpr_profiles:
        full_profiles = full_game.profiles()
        full_payoffs = full_game.payoffs()
    else:
        full_profiles = expand_profiles(full_game, red_game.all_profiles())
        full_payoffs = full_game.get_payoffs(full_profiles)
        valid = ~np.all(np.isnan(full_payoffs) | (full_profiles == 0), 1)
        full_profiles = full_profiles[valid]
        full_payoffs = full_payoffs[valid]

    # Reduce
    red_profiles, red_inds, full_inds, strat_inds = _reduce_profiles(
        red_game, full_profiles, True)

    if red_profiles.size == 0:  # Empty reduction
        return red_game

    # Build mapping from payoffs to reduced profiles, and use bincount
    # to count the number of payoffs mapped to a specific location, and
    # sum the number of payoffs mapped to a specific location
    cum_inds = red_inds * full_game.num_strats + strat_inds
    payoff_vals = full_payoffs[full_inds, strat_inds]
    red_payoffs = np.bincount(cum_inds, payoff_vals,
                              red_profiles.size).reshape(red_profiles.shape)
    red_payoff_counts = np.bincount(
        cum_inds, minlength=red_profiles.size).reshape(red_profiles.shape)
    mask = red_payoff_counts > 1
    red_payoffs[mask] /= red_payoff_counts[mask]

    unknown = (red_profiles > 0) & (red_payoff_counts == 0)
    red_payoffs[unknown] = np.nan
    valid = ~np.all((red_profiles == 0) | np.isnan(red_payoffs), 1)
    return paygame.game_replace(red_game, red_profiles[valid],
                                red_payoffs[valid])
Exemple #21
0
def is_pure_profile(game, prof):
    """Returns true of the profile is pure"""
    # For an asymmetric game, this will always return false, but then it
    # shouldn't be an issue, because pure strategy regret will be more
    # informative.
    pure = np.any(np.add.reduceat(prof, game.role_starts) > 1.5)
    utils.check(
        game.is_profile(np.asarray(prof, int)) if pure else
        game.is_mixture(prof), 'profile must be valid')
    return pure
Exemple #22
0
def nngame_train(  # pylint: disable=too-many-arguments,too-many-locals
        game,
        epochs=100,
        layer_sizes=(32, 32),
        dropout=0.2,
        verbosity=0,
        optimizer='sgd',
        loss='mean_squared_error'):
    """Train a neural network regression model

    This mostly exists as a proof of concept, individual testing should be done
    to make sure it is working sufficiently. This API will likely change to
    support more general architectures and training.
    """
    utils.check(layer_sizes, 'must have at least one layer')
    utils.check(0 <= dropout < 1, 'dropout must be a valid probability')
    # This is for delayed importing inf tensor flow
    from keras import models, layers

    model = models.Sequential()
    lay_iter = iter(layer_sizes)
    model.add(
        layers.Dense(next(lay_iter),
                     input_shape=[game.num_strats],
                     activation='relu'))
    for units in lay_iter:
        model.add(layers.Dense(units, activation='relu'))
        if dropout:
            model.add(layers.Dropout(dropout))
    model.add(layers.Dense(1, activation='sigmoid'))

    regs = []
    offsets = np.empty(game.num_strats)
    scales = np.empty(game.num_strats)
    for i, profs, pays in _dev_profpay(game):
        # XXX Payoff normalization specific to sigmoid. If we accept alternate
        # models, we need a way to compute how to potentially normalize
        # payoffs.
        min_pay = pays.min()
        offsets[i] = min_pay
        max_pay = pays.max()
        scale = 1 if np.isclose(max_pay, min_pay) else max_pay - min_pay
        scales[i] = scale
        reg = models.clone_model(model)
        reg.compile(optimizer=optimizer, loss=loss)
        reg.fit(profs, (pays - min_pay) / scale,
                epochs=epochs,
                verbose=verbosity)
        regs.append(reg)

    return _DevRegressionGame(game, tuple(regs), offsets, scales,
                              game.min_strat_payoffs(),
                              game.max_strat_payoffs(),
                              np.ones(game.num_strats, bool))
Exemple #23
0
def min_regret_profile(game):
    """Finds the profile with the confirmed lowest regret

    An error will be raised if there are no profiles with a defined regret.
    """
    utils.check(not game.is_empty(), 'Game must have a profile')
    reg, _, prof = min(
        (_nan_to_inf(regret.pure_strategy_regret(game, prof)), i, prof)
        for i, prof in enumerate(game.profiles()))
    utils.check(not np.isinf(reg), 'No profiles had valid regret')
    return prof
Exemple #24
0
def min_regret_profile(game):
    """Finds the profile with the confirmed lowest regret

    An error will be raised if there are no profiles with a defined regret.
    """
    utils.check(not game.is_empty(), 'Game must have a profile')
    reg, _, prof = min(
        (_nan_to_inf(regret.pure_strategy_regret(game, prof)), i, prof)
        for i, prof in enumerate(game.profiles()))
    utils.check(not np.isinf(reg), 'No profiles had valid regret')
    return prof
Exemple #25
0
def translate(profiles, rest):
    """Translate a strategy object to the full game"""
    utils.check(
        profiles.shape[-1] == rest.sum(),
        'profiles must be valid for the restriction')
    if rest.all():
        return profiles
    new_profs = np.zeros(
        profiles.shape[:-1] + (rest.size,), profiles.dtype)
    new_profs[..., rest] = profiles
    return new_profs
Exemple #26
0
def _dev_profpay(game):
    """Iterate over deviation profiles and payoffs"""
    sgame = paygame.samplegame_copy(game)
    profiles = sgame.flat_profiles()
    payoffs = sgame.flat_payoffs()

    for i, pays in enumerate(payoffs.T):
        mask = (profiles[:, i] > 0) & ~np.isnan(pays)
        utils.check(mask.any(), "couldn't find deviation data for a strategy")
        profs = profiles[mask]
        profs[:, i] -= 1
        yield i, profs, pays[mask]
Exemple #27
0
    def compress_profile(self, profile):
        """Compress profile in array of ints

        Normal profiles are an array of number of players playing a strategy.
        Since matrix games always have one player per role, this compresses
        each roles counts into a single int representing the played strategy
        per role.
        """
        utils.check(self.is_profile(profile).all(), 'must pass vaid profiles')
        profile = np.asarray(profile, int)
        return np.add.reduceat(np.cumsum(self._prof_offset - profile, -1),
                               self.role_starts, -1)
Exemple #28
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     payoffs = np.zeros(profiles.shape)
     for i, (off, scale, reg) in enumerate(zip(
             self._offset, self._scale, self._regressors)):
         mask = profiles[..., i] > 0
         profs = profiles[mask]
         profs[:, i] -= 1
         if profs.size:
             payoffs[mask, i] = reg.predict(restrict.translate(
                 profs, self._rest)).ravel() * scale + off
     return payoffs
Exemple #29
0
def num_deviation_profiles(game, rest):
    """Returns the number of deviation profiles

    This is a closed form way to compute `deviation_profiles(game,
    rest).shape[0]`.
    """
    rest = np.asarray(rest, bool)
    utils.check(game.is_restriction(rest), 'restriction must be valid')
    num_role_strats = np.add.reduceat(rest, game.role_starts)
    num_devs = game.num_role_strats - num_role_strats
    dev_players = game.num_role_players - np.eye(game.num_roles, dtype=int)
    return np.sum(
        utils.game_size(dev_players, num_role_strats).prod(1) * num_devs)
Exemple #30
0
 def __init__(  # pragma: no branch # noqa
     self,
     sgame,
     noise_dist=lambda: 0,
     param_dist=lambda: ()):
     super().__init__(sgame.role_names, sgame.strat_names,
                      sgame.num_role_players)
     utils.check(hasattr(sgame, "get_sample_payoffs"),
                 "sgame not a sample game")
     self._noise_dist = noise_dist
     self._param_dist = param_dist
     self._sgame = sgame
     self._paymap = {}
Exemple #31
0
def num_deviation_profiles(game, rest):
    """Returns the number of deviation profiles

    This is a closed form way to compute `deviation_profiles(game,
    rest).shape[0]`.
    """
    rest = np.asarray(rest, bool)
    utils.check(game.is_restriction(rest), 'restriction must be valid')
    num_role_strats = np.add.reduceat(rest, game.role_starts)
    num_devs = game.num_role_strats - num_role_strats
    dev_players = game.num_role_players - np.eye(game.num_roles, dtype=int)
    return np.sum(utils.game_size(dev_players, num_role_strats).prod(1) *
                  num_devs)
Exemple #32
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     dev_profiles = np.repeat(
         profiles[..., None, :] - np.eye(self.num_strats, dtype=int),
         self._sizes, -2)
     vec = ((dev_profiles - self._profiles) /
            self._lengths.repeat(self._sizes, 0))
     rbf = np.einsum('...ij,...ij->...i', vec, vec)
     payoffs = self._offset + self._coefs * np.add.reduceat(
         np.exp(-rbf / 2) * self._alpha, self._size_starts, -1)  # pylint: disable=invalid-unary-operand-type
     payoffs[profiles == 0] = 0
     return payoffs
Exemple #33
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     dev_profiles = np.repeat(
         profiles[..., None, :] - np.eye(self.num_strats, dtype=int),
         self._sizes, -2)
     vec = ((dev_profiles - self._profiles) /
            self._lengths.repeat(self._sizes, 0))
     rbf = np.einsum('...ij,...ij->...i', vec, vec)
     payoffs = self._offset + self._coefs * np.add.reduceat(
         np.exp(-rbf / 2) * self._alpha, self._size_starts, -1) # pylint: disable=invalid-unary-operand-type
     payoffs[profiles == 0] = 0
     return payoffs
Exemple #34
0
 def uncompress_profile(self, comp_prof):
     """Uncompress a profile"""
     comp_prof = np.asarray(comp_prof, int)
     utils.check(
         np.all(comp_prof >= 0) and
         np.all(comp_prof < self.num_role_strats),
         'must pass valid compressed profiles')
     profile = np.zeros(comp_prof.shape[:-1] + (self.num_strats,), int)
     inds = (comp_prof.reshape((-1, self.num_roles)) +
             self.role_starts + self.num_strats *
             np.arange(int(np.prod(comp_prof.shape[:-1])))[:, None])
     profile.flat[inds] = 1
     return profile
Exemple #35
0
def _dev_profpay(game):
    """Iterate over deviation profiles and payoffs"""
    sgame = paygame.samplegame_copy(game)
    profiles = sgame.flat_profiles()
    payoffs = sgame.flat_payoffs()

    for i, pays in enumerate(payoffs.T):
        mask = (profiles[:, i] > 0) & ~np.isnan(pays)
        utils.check(
            mask.any(), "couldn't find deviation data for a strategy")
        profs = profiles[mask]
        profs[:, i] -= 1
        yield i, profs, pays[mask]
Exemple #36
0
def matgame_copy(copy_game):
    """Copy a matrix game from an existing game

    Parameters
    ----------
    copy_game : RsGame
        Game to copy payoff data out of. This game must be complete.
    """
    utils.check(copy_game.is_complete(), 'can only copy complete games')

    if hasattr(copy_game, 'payoff_matrix'):
        return matgame_replace(copy_game, copy_game.payoff_matrix())

    # Get payoff matrix
    num_role_strats = copy_game.num_role_strats.repeat(
        copy_game.num_role_players)
    shape = tuple(num_role_strats) + (num_role_strats.size,)
    payoff_matrix = np.empty(shape, float)
    offset = copy_game.role_starts.repeat(copy_game.num_role_players)
    for profile, payoffs in zip(copy_game.profiles(), copy_game.payoffs()):
        inds = itertools.product(*[
            set(itertools.permutations(np.arange(s.size).repeat(s))) for s
            in np.split(profile, copy_game.role_starts[1:])])
        for nested in inds:
            ind = tuple(itertools.chain.from_iterable(nested))
            payoff_matrix[ind] = payoffs[ind + offset]

    # Get role names
    if np.all(copy_game.num_role_players == 1):
        roles = copy_game.role_names
        strats = copy_game.strat_names
    else:
        # When we expand names, we need to make sure they stay sorted
        if utils.is_sorted(r + 'p' for r in copy_game.role_names):
            # We can naively append player numbers
            role_names = copy_game.role_names
        else:
            # We have to prefix to preserve role order
            maxlen = max(map(len, copy_game.role_names))
            role_names = (
                p + '_' * (maxlen - len(r)) + r for r, p
                in zip(copy_game.role_names,
                       utils.prefix_strings('', copy_game.num_roles)))
        roles = tuple(itertools.chain.from_iterable(
            (r + s for s in utils.prefix_strings('p', p))
            for r, p in zip(role_names, copy_game.num_role_players)))
        strats = tuple(itertools.chain.from_iterable(
            itertools.repeat(s, p) for s, p
            in zip(copy_game.strat_names, copy_game.num_role_players)))
    return _MatrixGame(roles, strats, payoff_matrix)
Exemple #37
0
def num_deviation_payoffs(game, rest):
    """Returns the number of deviation payoffs

    This is a closed form way to compute `np.sum(deviation_profiles(game, rest)
    > 0)`."""
    rest = np.asarray(rest, bool)
    utils.check(game.is_restriction(rest), 'restriction must be valid')
    num_role_strats = np.add.reduceat(rest, game.role_starts)
    num_devs = game.num_role_strats - num_role_strats
    dev_players = (game.num_role_players - np.eye(game.num_roles, dtype=int) -
                   np.eye(game.num_roles, dtype=int)[:, None])
    temp = utils.game_size(dev_players, num_role_strats).prod(2)
    non_deviators = np.sum(np.sum(temp * num_role_strats, 1) * num_devs)
    return non_deviators + num_deviation_profiles(game, rest)
Exemple #38
0
 def get_payoffs(self, profiles):
     utils.check(
         self.is_profile(profiles).all(), 'must pass valid profiles')
     payoffs = np.zeros(profiles.shape)
     for i, (off, scale, reg) in enumerate(
             zip(self._offset, self._scale, self._regressors)):
         mask = profiles[..., i] > 0
         profs = profiles[mask]
         profs[:, i] -= 1
         if profs.size:
             payoffs[mask,
                     i] = reg.predict(restrict.translate(
                         profs, self._rest)).ravel() * scale + off
     return payoffs
Exemple #39
0
def num_deviation_payoffs(game, rest):
    """Returns the number of deviation payoffs

    This is a closed form way to compute `np.sum(deviation_profiles(game, rest)
    > 0)`."""
    rest = np.asarray(rest, bool)
    utils.check(game.is_restriction(rest), 'restriction must be valid')
    num_role_strats = np.add.reduceat(rest, game.role_starts)
    num_devs = game.num_role_strats - num_role_strats
    dev_players = (game.num_role_players - np.eye(game.num_roles, dtype=int) -
                   np.eye(game.num_roles, dtype=int)[:, None])
    temp = utils.game_size(dev_players, num_role_strats).prod(2)
    non_deviators = np.sum(np.sum(temp * num_role_strats, 1) * num_devs)
    return non_deviators + num_deviation_profiles(game, rest)
Exemple #40
0
def expand_profiles(full_game, profiles):
    """Return input profiles

    Parameters
    ----------
    full_game : Game
        Game that all profiles must be valid for.
    profiles : ndarray-like
        The profiles.
    axis : int, optional
        The axis the profiles lie on.
    """
    profiles = np.asarray(profiles, int)
    utils.check(full_game.is_profile(profiles).all(), 'profiles must be valid')
    return profiles.reshape((-1, full_game.num_strats))
Exemple #41
0
def reduce_profiles(red_game, profiles):
    """Reduce profiles using twins

    Parameters
    ----------
    red_game : Game
        Game that reduced profiles will be profiles for. This game must
        have the valid twins reduction number of players.
    profiles : ndarray-like
        The profiles to reduce.
    """
    profiles = np.asarray(profiles, int)
    utils.check(np.all(red_game.num_role_players <= 2),
                'red game must be a twins game')
    return dpr.reduce_profiles(red_game, profiles)
Exemple #42
0
def reduce_profiles(red_game, profiles):
    """Return original profiles

    Parameters
    ----------
    red_game : Game
        Game that all profiles must be valid for.
    profiles : ndarray-like
        The profiles.
    axis : int, optional
        The axis the profiles are on.
    """
    profiles = np.asarray(profiles, int)
    utils.check(red_game.is_profile(profiles).all(), 'profiles must be valid')
    return profiles.reshape((-1, red_game.num_strats))
Exemple #43
0
def reduce_game(full_game, red_players=None):
    """Return original game

    Parameters
    ----------
    full_game : Game
        The game to reduce.
    red_players : ndarray-like, optional
        If specified, this must match the number of players per role in
        full_game.
    """
    utils.check(
        red_players is None or np.all(
            full_game.num_role_players == red_players),
        'identity reduction must have same number of players')
    return paygame.game_copy(full_game)
Exemple #44
0
def reduce_profiles(red_game, profiles):
    """Reduce profiles using twins

    Parameters
    ----------
    red_game : Game
        Game that reduced profiles will be profiles for. This game must
        have the valid twins reduction number of players.
    profiles : ndarray-like
        The profiles to reduce.
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        np.all(red_game.num_role_players <= 2),
        'red game must be a twins game')
    return dpr.reduce_profiles(red_game, profiles)
Exemple #45
0
def expand_profiles(full_game, profiles):
    """Expand profiles using twins reduction

    Parameters
    ----------
    full_game : Game
        Game that expanded profiles will be valid for.
    profiles : ndarray-like
        The profiles to expand
    """
    red_players = np.minimum(full_game.num_role_players, 2)
    profiles = np.asarray(profiles, int)
    red_game = rsgame.empty(red_players, full_game.num_role_strats)
    utils.check(
        red_game.is_profile(profiles).all(), 'profiles must be valid')
    return dpr.expand_profiles(full_game, profiles)
Exemple #46
0
def reduce_game(full_game, red_players=None):
    """Reduce a game using twins reduction

    Parameters
    ----------
    full_game : Game
        The game to reduce.
    red_players : ndarray-like, optional
        The reduced number of players for each role. This must be None or
        the reduced number of players for the twins reductions.
    """
    exp_red_players = np.minimum(full_game.num_role_players, 2)
    utils.check(
        red_players is None or np.all(exp_red_players == red_players),
        "twins reduction didn't get expected reduced players")
    return dpr.reduce_game(full_game, exp_red_players)
def reduce_profiles(red_game, profiles):
    """Reduce profiles hierarchically

    Parameters
    ----------
    red_game : Game
        Game that reduced profiles will be profiles for.
    profiles : ndarray-like
        The profiles to reduce.
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        profiles.shape[-1] == red_game.num_strats,
        'profiles must be appropriate shape')
    return _common.reduce_profiles(
        red_game, red_game.num_role_players[None],
        profiles.reshape((-1, red_game.num_strats)))[0]
def _reduce_profiles(red_game, profiles, return_contributions): # pylint: disable=too-many-locals
    """Reduce profiles using dpr

    Parameters
    ----------
    red_game : Game
        Game that reduced profiles will be profiles for.
    profiles : ndarray-like
        The profiles to reduce.
    return_contributions : bool, optional
        If true return ancillary information about where the payoffs come
        from.
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        profiles.shape[-1] == red_game.num_strats,
        'profiles not a valid shape')
    if not profiles.size:
        return np.empty((0, red_game.num_strats), int)

    profiles = profiles.reshape((-1, red_game.num_strats))
    all_full_players = np.add.reduceat(profiles, red_game.role_starts, 1)
    full_players = all_full_players[0]
    utils.check(
        np.all(all_full_players == full_players), 'profiles must be valid')

    num_profs = profiles.shape[0]
    dev_profs = profiles.repeat(np.sum(profiles > 0, 1), 0)
    _, strat_inds = profiles.nonzero()
    dev_profs[np.arange(dev_profs.shape[0]), strat_inds] -= 1
    dev_red_players = _devs(red_game, num_profs)
    mask = (profiles > 0).ravel()

    red_profs, reduced = _common.reduce_profiles(
        red_game, dev_red_players[mask], dev_profs)
    rstrat_inds = strat_inds[reduced]
    red_profs[np.arange(red_profs.shape[0]), rstrat_inds] += 1
    red_profs, red_inds = np.unique(
        utils.axis_to_elem(red_profs), return_inverse=True)
    red_profs = utils.axis_from_elem(red_profs)
    if not return_contributions:
        return red_profs

    full_inds = np.arange(num_profs).repeat(
        red_game.num_strats)[mask][reduced]
    return red_profs, red_inds, full_inds, rstrat_inds
def expand_profiles(full_game, profiles):
    """Expand profiles hierarchically

    Parameters
    ----------
    full_game : Game
        Game that expanded profiles will be valid for.
    profiles : ndarray-like
        The profiles to expand
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        profiles.shape[-1] == full_game.num_strats,
        'profiles must be appropriate shape')
    return _common.expand_profiles(
        full_game, full_game.num_role_players[None],
        profiles.reshape((-1, full_game.num_strats)))
Exemple #50
0
def additional_strategy_profiles(game, rest, role_strat_ind):
    """Returns all profiles added by strategy at index"""
    # This uses the observation that the added profiles are all of the profiles
    # of the new restricted game with one less player in role, and then where
    # that last player always plays strat
    rest = np.asarray(rest, bool)
    utils.check(game.is_restriction(rest), 'restriction must be valid')
    new_players = game.num_role_players.copy()
    new_players[game.role_indices[role_strat_ind]] -= 1
    base = rsgame.empty(new_players, game.num_role_strats)
    new_mask = rest.copy()
    new_mask[role_strat_ind] = True
    profs = base.restrict(new_mask).all_profiles()
    expand_profs = np.zeros((profs.shape[0], game.num_strats), int)
    expand_profs[:, new_mask] = profs
    expand_profs[:, role_strat_ind] += 1
    return expand_profs
Exemple #51
0
def expand_profiles(full_game, profiles):
    """Return input profiles

    Parameters
    ----------
    full_game : Game
        Game that all profiles must be valid for.
    profiles : ndarray-like
        The profiles.
    axis : int, optional
        The axis the profiles lie on.
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        full_game.is_profile(profiles).all(),
        'profiles must be valid')
    return profiles.reshape((-1, full_game.num_strats))
Exemple #52
0
def reduce_profiles(red_game, profiles):
    """Return original profiles

    Parameters
    ----------
    red_game : Game
        Game that all profiles must be valid for.
    profiles : ndarray-like
        The profiles.
    axis : int, optional
        The axis the profiles are on.
    """
    profiles = np.asarray(profiles, int)
    utils.check(
        red_game.is_profile(profiles).all(),
        'profiles must be valid')
    return profiles.reshape((-1, red_game.num_strats))
Exemple #53
0
def nngame_train( # pylint: disable=too-many-arguments,too-many-locals
        game, epochs=100, layer_sizes=(32, 32), dropout=0.2, verbosity=0,
        optimizer='sgd', loss='mean_squared_error'):
    """Train a neural network regression model

    This mostly exists as a proof of concept, individual testing should be done
    to make sure it is working sufficiently. This API will likely change to
    support more general architectures and training.
    """
    utils.check(layer_sizes, 'must have at least one layer')
    utils.check(0 <= dropout < 1, 'dropout must be a valid probability')
    # This is for delayed importing inf tensor flow
    from keras import models, layers

    model = models.Sequential()
    lay_iter = iter(layer_sizes)
    model.add(layers.Dense(
        next(lay_iter), input_shape=[game.num_strats], activation='relu'))
    for units in lay_iter:
        model.add(layers.Dense(units, activation='relu'))
        if dropout:
            model.add(layers.Dropout(dropout))
    model.add(layers.Dense(1, activation='sigmoid'))

    regs = []
    offsets = np.empty(game.num_strats)
    scales = np.empty(game.num_strats)
    for i, profs, pays in _dev_profpay(game):
        # XXX Payoff normalization specific to sigmoid. If we accept alternate
        # models, we need a way to compute how to potentially normalize
        # payoffs.
        min_pay = pays.min()
        offsets[i] = min_pay
        max_pay = pays.max()
        scale = 1 if np.isclose(max_pay, min_pay) else max_pay - min_pay
        scales[i] = scale
        reg = models.clone_model(model)
        reg.compile(optimizer=optimizer, loss=loss)
        reg.fit(profs, (pays - min_pay) / scale, epochs=epochs,
                verbose=verbosity)
        regs.append(reg)

    return _DevRegressionGame(
        game, tuple(regs), offsets, scales, game.min_strat_payoffs(),
        game.max_strat_payoffs(), np.ones(game.num_strats, bool))
Exemple #54
0
def travellers_dilemma(players=2, max_value=100):
    """Return an instance of travellers dilemma

    Strategies range from 2 to max_value, thus there will be max_value - 1
    strategies."""
    utils.check(players > 1, 'players must be more than one')
    utils.check(max_value > 2, 'max value must be more than 2')
    base = rsgame.empty(players, max_value - 1)
    profiles = base.all_profiles()
    payoffs = np.zeros(profiles.shape)
    mins = np.argmax(profiles, -1)
    mask = profiles > 0
    payoffs[mask] = mins.repeat(mask.sum(-1))
    rows = np.arange(profiles.shape[0])
    ties = profiles[rows, mins] > 1
    lowest_pays = mins + 4
    lowest_pays[ties] -= 2
    payoffs[rows, mins] = lowest_pays
    return paygame.game_replace(base, profiles, payoffs)
Exemple #55
0
def expand_profiles(sarr, full_players, profiles): # pylint: disable=too-many-locals
    """Hierarchically expands several role symmetric array profiles

    In the event that `full_players` isn't divisible by `reduced_players`,
    we first assign by rounding error and break ties in favor of
    more-played strategies. The final tie-breaker is index / alphabetical
    order."""
    reduced_players = np.add.reduceat(profiles, sarr.role_starts, 1)
    utils.check(
        np.all(full_players >= reduced_players),
        'full_players must be at least as large as reduced_players')
    utils.check(
        np.all((reduced_players > 0) | ((full_players == 0) &
                                        (reduced_players == 0))),
        'reduced_players must be greater than zero')
    # Maximum prevents divide by zero error; equivalent to + eps
    rep_red_players = np.maximum(
        reduced_players, 1).repeat(sarr.num_role_strats, -1)
    rep_full_players = full_players.repeat(sarr.num_role_strats, -1)
    num_profs = profiles.shape[0]
    expand_profs = profiles * rep_full_players // rep_red_players
    unassigned = full_players - \
        np.add.reduceat(expand_profs, sarr.role_starts, 1)

    # Order all possible strategies to find which to increment
    role_order = np.broadcast_to(sarr.role_indices,
                                 (num_profs, sarr.num_strats))
    error = profiles * rep_full_players / rep_red_players - expand_profs
    alpha_inds = np.arange(sarr.num_strats)
    alpha_ord = np.broadcast_to(alpha_inds, (num_profs, sarr.num_strats))
    inds = np.asarray(np.argsort(np.rec.fromarrays(
        [role_order, -error, -profiles, alpha_ord]), 1))

    # Map them to indices in the expand_profs array, and mask out the first
    # that are necessary to meet unassigned
    rectified_inds = (inds + np.arange(num_profs)[:, None] *
                      sarr.num_strats)
    ind_mask = (
        np.arange(sarr.num_strats) <
        np.repeat(sarr.role_starts + unassigned, sarr.num_role_strats, 1))
    expand_profs.flat[rectified_inds[ind_mask]] += 1
    return expand_profs
Exemple #56
0
def local_effect(num_players, num_strategies, *, edge_prob=0.2):
    """Generate a local effect game

    In a local effect game, strategies are connected by a graph, and utilities
    are a function of the number of players playing our strategy and the number
    of players playing a neighboring strategy, hence local effect.

    In this formulation, payoffs for others playing our strategy are negative
    quadratics, and payoffs for playing other strategies are positive cubics.

    Parameters
    ----------
    num_players : int > 1
        The number of players.
    num_strategies : int > 1
        The number of strategies.
    edge_prob : float, optional
        The probability that one strategy affects another.
    """
    utils.check(num_players > 1, "can't generate a single player game")
    utils.check(num_strategies > 1, "can't generate a single strategy game")

    local_effect_graph = np.random.rand(
        num_strategies, num_strategies) < edge_prob
    np.fill_diagonal(local_effect_graph, False)
    num_neighbors = local_effect_graph.sum()
    num_functions = num_neighbors + num_strategies

    action_weights = np.eye(num_functions, num_strategies, dtype=float)
    function_inputs = np.eye(num_strategies, num_functions, dtype=bool)
    in_act, out_act = local_effect_graph.nonzero()
    func_inds = np.arange(num_strategies, num_functions)
    function_inputs[in_act, func_inds] = True
    action_weights[func_inds, out_act] = 1

    function_table = np.empty((num_functions, num_players + 1), float)
    function_table[:num_strategies] = -_random_monotone_polynomial(
        num_strategies, num_players, 2)
    function_table[num_strategies:] = _random_monotone_polynomial(
        num_neighbors, num_players, 3)
    return aggfn.aggfn(num_players, num_strategies, action_weights,
                       function_inputs, function_table)
Exemple #57
0
def _read_payoffs(match):
    """Read gambit payoff format"""
    role_names = _string_list(match.group('roles'))
    num_strats = tuple(map(int, match.group('strats')[1:-1].split()))
    num_roles = len(num_strats)
    utils.check(
        len(role_names) == num_roles,
        "player names didn't match number of strategies")
    strats = utils.prefix_strings('s', sum(num_strats))
    strat_names = [list(itertools.islice(strats, n)) for n in num_strats]

    payoffs = list(map(float, match.group('payoffs').split()))
    matrix = np.empty(num_strats + (num_roles,))
    utils.check(
        len(payoffs) == matrix.size,
        'incorrect number of payoffs for strategies')
    inds = tuple(range(num_roles - 1, -1, -1)) + (num_roles,)
    np.transpose(matrix, inds).flat = payoffs

    return _normalize(role_names, strat_names, matrix)