Ejemplo n.º 1
0
    def generate_valid_topsorted_node_dag(
            self,
            start_type=PossiblyRepeatedInputKey,
            end_type=GameEffect,
            predicate=lambda types: len(types) <= MAX_INTERMEDIATE_UNBOUND_VARS
    ):
        goalstates = set()
        for n in range(MAX_GAME_EFFECTS_PER_POWER):
            goalstates.add(FrozenMultiset([end_type] * n))

        @memoize
        def dfs(available_types):
            """Returns a list of """
            def can_add_nodetype(nodetype):
                required_types = FrozenMultiset(nodetype.INTYPES)
                return required_types.issubset(available_types)

            possible_nodetypes = filter(can_add_nodetype, ALL_NODETYPES)
            random.shuffle(possible_nodetypes)
            for nodetype in possible_nodetypes:
                new_available_types = (available_types - FrozenMultiset(
                    nodetype.INTYPES)) + FrozenMultiset(nodetype.OUTTYPES)
                if new_available_types in goalstates:
                    return [nodetype]
                elif predicate(new_available_types):
                    suffix = dfs(new_available_types)
                    if suffix:
                        return [nodetype] + suffix

        return dfs(FrozenMultiset([start_type]))
Ejemplo n.º 2
0
 def choose_move(self, gamestate):
     mt = [mt for mt in MoveType if not gamestate.own_state.moves[mt]][0]
     hand = gamestate.own_state.hand
     if mt == MoveType.Move1:
         return Move1(cards=FrozenMultiset([max(hand)]))
     elif mt == MoveType.Move2:
         card1 = min(hand)
         card2 = min(hand - FrozenMultiset([card1]))
         return Move2(cards=FrozenMultiset([card1, card2]))
     elif mt == MoveType.Move3:
         return Move3(cards=draw_cards(hand, 3))
     elif mt == MoveType.Move4:
         cards_A = draw_cards(hand, 2)
         cards_B = draw_cards(hand - cards_A, 2)
         return Move4(cards_A=cards_A, cards_B=cards_B)
Ejemplo n.º 3
0
 def __init__(self, wants):
     self.TYPE = 'single'
     self.MAX_PITY = MAX_PITY
     self.BASE_5 = BASE_5
     self.INC_5 = INC_5
     self.wants = wants
     self.universe = Multiset()
     for unit in wants.keys():
         for i in range(0, wants[unit]['number']):
             self.universe.update([unit])
     self.universe = FrozenMultiset(self.universe)
     self.indices = []
     for i in range(0, len(self.universe)):
         for j in itertools.combinations(self.universe, i):
             if FrozenMultiset(j) not in self.indices:
                 self.indices.append(FrozenMultiset(j))
Ejemplo n.º 4
0
def test_can_be_pickled():
    fms = FrozenMultiset('aabcd')

    pickled = pickle.dumps(fms)
    unpickled = pickle.loads(pickled)

    assert fms == unpickled
Ejemplo n.º 5
0
        def dfs(available_types):
            """Returns a list of """
            def can_add_nodetype(nodetype):
                required_types = FrozenMultiset(nodetype.INTYPES)
                return required_types.issubset(available_types)

            possible_nodetypes = filter(can_add_nodetype, ALL_NODETYPES)
            random.shuffle(possible_nodetypes)
            for nodetype in possible_nodetypes:
                new_available_types = (available_types - FrozenMultiset(
                    nodetype.INTYPES)) + FrozenMultiset(nodetype.OUTTYPES)
                if new_available_types in goalstates:
                    return [nodetype]
                elif predicate(new_available_types):
                    suffix = dfs(new_available_types)
                    if suffix:
                        return [nodetype] + suffix
Ejemplo n.º 6
0
 def mutpairs(seq1: str, seq2: str):
     seq1N = add_ns(seq1)
     seq2N = add_ns(seq2)
     mut_idxs = [
         index for index, (base1, base2) in enumerate(zip(seq1N, seq2N))
         if base1 != base2
     ]
     return FrozenMultiset(
         (seq1N[i - h:i + h + 1], seq2N[i]) for i in mut_idxs)
Ejemplo n.º 7
0
def test_newcounters():
    """Make sure the cmcounts found by new CollapsedTree init agree with old"""
    def pseudocount(mset):
        if (0, 1) in mset:
            return mset - {(0, 1)} + {(1, 1)}
        else:
            return mset

    for newforest, newforest_ctrees, oldforest in allforests:
        # To build cached _cm_counts
        newforest.ll(0.5, 0.5)
        newforest_ctrees.ll(0.5, 0.5)

        # Test just a single tree (the first in each forest)
        oldtreecounts = pseudocount(
            FrozenMultiset(oldforest.forest[0]._cm_list))
        newtreecounts = pseudocount(
            first(
                first(newforest._forest.get_trees()).weight_count(
                    **cmcount_dagfuncs)))
        newtreectreecounts = FrozenMultiset(
            dict(first(newforest_ctrees)._cm_counts))

        assert oldtreecounts == newtreecounts
        assert oldtreecounts == newtreectreecounts

        # Test for the whole forests all together
        oldcmcounts = FrozenMultiset([
            pseudocount(FrozenMultiset(ctree._cm_list))
            for ctree in oldforest.forest
        ])

        newcmcounts_ctrees = FrozenMultiset()
        for treecounts, a in newforest_ctrees._cm_countlist:
            newcmcounts_ctrees += FrozenMultiset(
                {FrozenMultiset(dict(treecounts)): a})

        newcmcounts = FrozenMultiset()
        for treecounts, a in newforest._cm_countlist:
            newcmcounts += FrozenMultiset(
                {FrozenMultiset(dict(treecounts)): a})

        assert newcmcounts_ctrees == newcmcounts
        assert oldcmcounts == newcmcounts_ctrees
    def test_0_and_1(self):
        base_attack = 5
        atk_range = range(base_attack, base_attack + 1)
        deck = [cards.plus_0] + [cards.plus_1]
        statistics_by_atk = deck_analyzer.derive_statistics(
            FrozenMultiset(deck), atk_range)

        self.assertEqual(Fraction(5 + 6, 2),
                         statistics_by_atk[base_attack].normal.expected_damage)
        self.assertEqual(
            6, statistics_by_atk[base_attack].advantage.expected_damage)
    def test_two_0s(self):
        base_attack = 5
        atk_range = range(base_attack, base_attack + 1)
        deck = [cards.plus_0] * 2
        statistics_by_atk = deck_analyzer.derive_statistics(
            FrozenMultiset(deck), atk_range)

        self.assertEqual(5,
                         statistics_by_atk[base_attack].normal.expected_damage)
        self.assertEqual(
            5, statistics_by_atk[base_attack].advantage.expected_damage)
def multiply_units(*args):
    this_units = Multiset()
    for each_unit in args:
        if get_unit_type(each_unit) == 'multiply':
            this_units.update(get_complexunit_set(each_unit))
        else:
            this_units.add(each_unit)
    if len(this_units) == 1:
        return list(this_units)[0]
    else:
        return ('multiply', FrozenMultiset(this_units))
Ejemplo n.º 11
0
def deal(oldstate=None, keys=None):
    if oldstate is not None:
        favors = [oldstate.own_state.favors, oldstate.opponent_state.favors]
        keys = [oldstate.own_state.key, oldstate.opponent_state.key]
    else:
        favors = [(0, ) * 7 for i in range(2)]
        assert (keys is not None)
    cards = all_cards
    empty = FrozenMultiset()
    states = []
    for i in range(2):
        hand = draw_cards(cards, 6)
        cards -= hand
        states.append(
            PlayerState(hand=hand,
                        played=empty,
                        hidden=empty,
                        discarded=empty,
                        favors=favors[i],
                        key=keys[i],
                        moves=(0, ) * 4,
                        started=(i == 0)))
    return GameState(own_state=states[0], opponent_state=states[1], pile=cards)
Ejemplo n.º 12
0
def test_frozen_hash_equal():
    ms1 = FrozenMultiset('ab')
    ms2 = FrozenMultiset('ab')

    assert hash(ms1) == hash(ms2)
Ejemplo n.º 13
0
def _doc_normalize(d):
    """Performs various operations on a document (str) to normalize it, returning a multiset of its words."""
    return FrozenMultiset(
        unidecode(d).lower().translate(
            str.maketrans("", "", string.punctuation)).split())
Ejemplo n.º 14
0
class Card(IntEnum):
    Red2    = 0
    Yellow2 = 1
    Purple2 = 2
    Blue3   = 3
    Orange3 = 4
    Green4  = 5
    Pink5   = 6
    Unknown = 7

all_cards = FrozenMultiset({
    Card.Red2:    2,
    Card.Yellow2: 2,
    Card.Purple2: 2,
    Card.Blue3:   3,
    Card.Orange3: 3,
    Card.Green4:  4,
    Card.Pink5:   5
})
no_cards = FrozenMultiset()

# 2. Moves
# ========
#
# Moves are just namedtuples. Each move object (that is, object of type Move*)
# has an associated move type MoveType.Move*. This move type will be used as a
# key to look up if you performed this move type already (note that IntEnum
# objects behave like ints).

Move1 = namedtuple("Move1", ["cards"])
Ejemplo n.º 15
0
 def react_move3(self, gamestate, move, cards):
     return FrozenMultiset([max(cards)])
Ejemplo n.º 16
0
    def create_chains(self):
        """Creates markov chains representing single pulls.

        This method constructs a markov chain containing information
        regarding the transitions between each state of possession, for
        a single pull, at a specific pity level. This is done for each
        pity level, and the chains are mapped to that pity level with a
        dictionary. A chain is also generated for the forced pity break
        that occurs at the 101st or 61st pull.
        """

        state_set = self.universe | frozenset('5')
        self.chain_indices = []
        for i in range(0, len(state_set)):
            for j in itertools.combinations(state_set, i):
                if FrozenMultiset(j) not in self.chain_indices:
                    self.chain_indices.append(FrozenMultiset(j))
        self.chain_indices.append(state_set)
        c_ref = self.chain_indices
        self.n_chain_db = {}
        for pity in range(0, MAX_PITY + 2):
            if MODE == 'Accurate':
                n_chain = np.zeros(
                    [len(self.chain_indices),
                     len(self.chain_indices)],
                    dtype=np.dtype(Dec))
            if MODE == 'Approximate':
                n_chain = np.zeros(
                    [len(self.chain_indices),
                     len(self.chain_indices)])
            for vertical in self.chain_indices:
                available = state_set - vertical
                rate_5 = BASE_5 + pity * INC_5
                cover = set()
                n_rate_none = 1
                for unit in available:
                    if unit not in cover:
                        try:
                            if self.wants[unit]['rarity'] == '5':
                                rate_5 -= self.wants[unit][
                                    'base prob'] + pity * self.wants[unit][
                                        'prob inc']
                                cover.update([unit])
                        except KeyError:
                            pass
                for horizontal in reversed(self.chain_indices):
                    acquisition = horizontal - vertical
                    if len(acquisition) > 1:
                        pass
                    elif horizontal == vertical:
                        n_chain[c_ref.index(vertical)][c_ref.index(
                            horizontal)] = n_rate_none
                    elif acquisition == frozenset():
                        pass
                    elif acquisition <= available and len(
                            horizontal) == len(vertical) + 1:
                        attained = next(iter(acquisition))
                        if attained == '5':
                            n_chain[c_ref.index(vertical)][c_ref.index(
                                horizontal)] = rate_5
                            n_rate_none -= rate_5
                        elif self.wants[attained]['rarity'] == '5':
                            rate = self.wants[attained][
                                'base prob'] + pity * self.wants[attained][
                                    'prob inc']
                            n_chain[c_ref.index(vertical)][c_ref.index(
                                horizontal)] = rate
                            n_rate_none -= rate
                        else:
                            rate = self.wants[attained][
                                'base prob'] + pity * self.wants[attained][
                                    'prob inc']
                            n_chain[c_ref.index(vertical)][c_ref.index(
                                horizontal)] = rate
                            n_rate_none -= rate
            self.n_chain_db[pity] = n_chain
        if MODE == 'Accurate':
            self.s_chain = np.zeros(
                [len(self.chain_indices),
                 len(self.chain_indices)],
                dtype=np.dtype(Dec))
        elif MODE == 'Approximate':
            self.s_chain = np.zeros(
                [len(self.chain_indices),
                 len(self.chain_indices)])
        for vertical in self.chain_indices:
            available = state_set - vertical
            cover = set()
            s_rate_5 = 1
            s_rate_none = 1
            for unit in available:
                if unit not in cover:
                    try:
                        if self.wants[unit]['rarity'] == '5':
                            s_rate_5 -= self.wants[unit]['spec prob']
                            cover.update([unit])
                    except KeyError:
                        pass
            for horizontal in reversed(self.chain_indices):
                acquisition = horizontal - vertical
                if len(acquisition) > 1:
                    pass
                elif horizontal == vertical:
                    self.s_chain[c_ref.index(vertical)][c_ref.index(
                        horizontal)] = s_rate_none
                elif acquisition == frozenset():
                    pass
                elif acquisition <= available and len(
                        horizontal) == len(vertical) + 1:
                    attained = next(iter(acquisition))
                    if attained == '5':
                        self.s_chain[c_ref.index(vertical)][c_ref.index(
                            horizontal)] = s_rate_5
                        s_rate_none -= s_rate_5
                    elif self.wants[attained]['rarity'] == '5':
                        self.s_chain[c_ref.index(vertical)][c_ref.index(
                            horizontal)] = self.wants[attained]['spec prob']
                        s_rate_none -= self.wants[attained]['spec prob']
Ejemplo n.º 17
0
def get_default_deck() -> FrozenMultiset:
    return FrozenMultiset(_default_deck)
Ejemplo n.º 18
0
def draw_cards(cards, n):
    return FrozenMultiset(random.sample(list(cards), n))
Ejemplo n.º 19
0
def censor_cards(cards):
    return FrozenMultiset({Card.Unknown: len(cards)})
Ejemplo n.º 20
0
    def __init__(self, glycan_library: Optional[pd.DataFrame],
                 glycoforms: pd.Series, glycation: pd.Series) -> None:
        """
        Assemble the glycoform graph from peptide mapping
        and glycation frequency data.

        :param pd.DataFrame glycan_library: a glycan library
        :param pd.Series glycoforms: list of glycoforms with abundances/errors
        :param pd.Series glycation: list of glycations with abundances/errors
        :raises ValueError: if a glycan with unknown monosaccharide
                            composition is added
        :return: nothing
        :rtype: None
        """

        super().__init__()

        # regex for extracting the first glycoform from a string like
        # "A2G0F/A2G1F or A2G1F/A2G0F"
        re_first_glycoform = re.compile("([^\s]*)")

        # series with a monosaccharide set as index
        # and abundances as values
        exp_abundances = glycoforms.reset_index()
        exp_abundances["sugar_set"] = exp_abundances["index_col"].apply(
            lambda v: FrozenMultiset(v.split("/")))
        site_count = exp_abundances["sugar_set"].apply(len).unique()
        if site_count.size != 1:
            raise ValueError(
                translate(
                    "correction",
                    "Glycoforms have unequal number of glycosylation sites."))
        else:
            site_count = int(site_count[0])
            logging.info(
                translate("correction",
                          "Glycoprotein has {} sites.").format(site_count))
        exp_abundances = exp_abundances.set_index("sugar_set")["abundance"]

        # dict mapping hexose differences to abundances
        delta_ptm = {
            PTMComposition({"Hex": count}): abundance / 100
            for count, abundance in glycation.iteritems() if count > 0
        }

        gp = Glycoprotein(sites=site_count, library=glycan_library)
        glycoform_glycans = set()
        for v in exp_abundances.index.values:
            glycoform_glycans |= set(v)

        if glycan_library is None:
            # fill the glycan library from glycans in glycoforms
            logging.info(
                translate(
                    "correction", "No glycan library specified. "
                    "Extracting glycans from glycoforms ..."))
            for g in glycoform_glycans:
                try:
                    gp.add_glycan(g)
                except ValueError as e:
                    raise e
        else:
            # compare monosaccharide set of glycan library and glycoforms;
            # add glycans that only appear in the list of glycoforms
            # to the library, but this only works if they have a valid name
            library_glycans = set([n.name for n in gp.glycan_library])

            glycans_only_in_library = library_glycans - glycoform_glycans
            if glycans_only_in_library:
                logging.warning(
                    translate(
                        "correction", "The following glycans only appear "
                        "in the glycan library, "
                        "but not in the list of glycoforms: {}.").format(
                            ", ".join(glycans_only_in_library)))

            glycans_only_in_glycoforms = glycoform_glycans - library_glycans
            if glycans_only_in_glycoforms:
                logging.warning(
                    translate(
                        "correction",
                        "The following glycans only appear in the list of "
                        "glycoforms, but not in the glycan library: {}. "
                        "They will be added to the library.").format(
                            ", ".join(glycans_only_in_glycoforms)))
                for g in glycans_only_in_glycoforms:
                    try:
                        gp.add_glycan(g)
                    except ValueError as e:
                        raise e

        for glycoform in gp.unique_glycoforms():
            # get the experimental abundance of a glycoform
            # use a default value of 0±0 if unavailable
            abundance = ufloat(0, 0)
            for name in glycoform.name.split(" or "):
                try:
                    abundance = exp_abundances[FrozenMultiset(name.split("/"))]
                    break
                except KeyError:
                    continue
            glycoform.abundance = abundance

            # add the current glycoform as a node to the graph;
            # generate an edge to previous nodes if the difference is described
            # in the dict of PTM differences
            self.add_node(glycoform,
                          abundance=abundance,
                          label=re_first_glycoform.match(
                              glycoform.name).group())
            for n in self:
                d = glycoform - n
                try:
                    c = delta_ptm[d]
                    source = n
                    sink = glycoform
                except KeyError:
                    try:
                        d = -d
                        c = delta_ptm[d]
                        source = glycoform
                        sink = n
                    except KeyError:
                        continue
                self.add_edge(source, sink, label=d.composition_str(), c=c)
Ejemplo n.º 21
0
 def can_add_nodetype(nodetype):
     required_types = FrozenMultiset(nodetype.INTYPES)
     return required_types.issubset(available_types)
Ejemplo n.º 22
0
def generate_deck(perks: Iterable[NamedPerk]) -> FrozenMultiset:
    deck = _default_deck.copy()
    for perk in perks:
        perk.deck_modification(deck)
    return FrozenMultiset(deck)
Ejemplo n.º 23
0
def derive_statistics(deck: FrozenMultiset,
                      atk_range: range) -> Dict[int, DeckStatistics]:
    statistics = DeckStatistics(DrawSchemeStatistics(), DrawSchemeStatistics())

    # Possible optimization: Pre-calculate denominators involving deck_length and sequence_length.

    # Of all possible unique decks, there's still a lot of non-unique rolling and terminator collections.
    # That means an optimization is possible here, caching the result of this loop for given collections.
    # (This isn't done, but should be done at some point)
    deck_length = len(deck)
    rolling_cards = [card for card in deck if card.rolling]
    terminator_cards = [card for card in deck if not card.rolling]

    counted_terminator_cards = Counter(terminator_cards)

    for terminator_card, count in counted_terminator_cards.items():
        odds = Fraction(count, deck_length)
        terminated_aggregate = AggregatedLine.from_card(terminator_card)
        statistics.normal.add_aggregated_line(terminated_aggregate, odds)

    short_rolling_combinations = Counter()
    short_rolling_combinations.update(
        [FrozenMultiset(c) for c in itertools.combinations(rolling_cards, 1)])

    lengthy_rolling_combinations = Counter()
    for length in range(2, len(rolling_cards) + 1):
        lengthy_rolling_combinations.update([
            FrozenMultiset(c)
            for c in itertools.combinations(rolling_cards, length)
        ])

    # unique_rolling_combinations = short_rolling_combinations + lengthy_rolling_combinations
    # validate_permutation_count(rolling_cards, unique_rolling_combinations)  # Debug assertion

    short_rolling = terminate_rolling_combos(counted_terminator_cards,
                                             deck_length,
                                             short_rolling_combinations)
    for terminated_line in short_rolling:
        # Advantage gets odds times two because either order can happen and count when advantaged.
        statistics.advantage.add_aggregated_line(
            terminated_line.aggregated_line,
            terminated_line.odds * EITHER_ORDER)
        statistics.normal.add_aggregated_line(terminated_line.aggregated_line,
                                              terminated_line.odds)

    lengthy_rolling = terminate_rolling_combos(counted_terminator_cards,
                                               deck_length,
                                               lengthy_rolling_combinations)
    for terminated_line in lengthy_rolling:
        statistics.advantage.add_aggregated_line(
            terminated_line.aggregated_line, terminated_line.odds)
        statistics.normal.add_aggregated_line(terminated_line.aggregated_line,
                                              terminated_line.odds)

    two_card_odds_factor = Fraction(1, deck_length * (deck_length - 1))
    advantaged_terminator_pairs = list(
        itertools.combinations(terminator_cards, 2))
    deck_statistics_by_atk = {}
    for atk in atk_range:
        new_statistics = DeckStatistics(statistics.normal.make_copy(),
                                        statistics.advantage.make_copy())
        for terminator_pair in advantaged_terminator_pairs:
            add_terminal_adv_to_stats(new_statistics.advantage,
                                      terminator_pair, atk,
                                      two_card_odds_factor)

        deck_statistics_by_atk[atk] = new_statistics

        deck_statistics_by_atk[atk].normal.calculate_expected_damage(atk)
        deck_statistics_by_atk[atk].advantage.calculate_expected_damage(atk)

    # for i in atk_range:
    #     assert deck_statistics_by_atk[i].normal.total_odds == 1  # Debug assertion
    #     assert deck_statistics_by_atk[i].advantage.total_odds == 1  # Debug assertion

    return deck_statistics_by_atk