def generate_valid_topsorted_node_dag( self, start_type=PossiblyRepeatedInputKey, end_type=GameEffect, predicate=lambda types: len(types) <= MAX_INTERMEDIATE_UNBOUND_VARS ): goalstates = set() for n in range(MAX_GAME_EFFECTS_PER_POWER): goalstates.add(FrozenMultiset([end_type] * n)) @memoize def dfs(available_types): """Returns a list of """ def can_add_nodetype(nodetype): required_types = FrozenMultiset(nodetype.INTYPES) return required_types.issubset(available_types) possible_nodetypes = filter(can_add_nodetype, ALL_NODETYPES) random.shuffle(possible_nodetypes) for nodetype in possible_nodetypes: new_available_types = (available_types - FrozenMultiset( nodetype.INTYPES)) + FrozenMultiset(nodetype.OUTTYPES) if new_available_types in goalstates: return [nodetype] elif predicate(new_available_types): suffix = dfs(new_available_types) if suffix: return [nodetype] + suffix return dfs(FrozenMultiset([start_type]))
def choose_move(self, gamestate): mt = [mt for mt in MoveType if not gamestate.own_state.moves[mt]][0] hand = gamestate.own_state.hand if mt == MoveType.Move1: return Move1(cards=FrozenMultiset([max(hand)])) elif mt == MoveType.Move2: card1 = min(hand) card2 = min(hand - FrozenMultiset([card1])) return Move2(cards=FrozenMultiset([card1, card2])) elif mt == MoveType.Move3: return Move3(cards=draw_cards(hand, 3)) elif mt == MoveType.Move4: cards_A = draw_cards(hand, 2) cards_B = draw_cards(hand - cards_A, 2) return Move4(cards_A=cards_A, cards_B=cards_B)
def __init__(self, wants): self.TYPE = 'single' self.MAX_PITY = MAX_PITY self.BASE_5 = BASE_5 self.INC_5 = INC_5 self.wants = wants self.universe = Multiset() for unit in wants.keys(): for i in range(0, wants[unit]['number']): self.universe.update([unit]) self.universe = FrozenMultiset(self.universe) self.indices = [] for i in range(0, len(self.universe)): for j in itertools.combinations(self.universe, i): if FrozenMultiset(j) not in self.indices: self.indices.append(FrozenMultiset(j))
def test_can_be_pickled(): fms = FrozenMultiset('aabcd') pickled = pickle.dumps(fms) unpickled = pickle.loads(pickled) assert fms == unpickled
def dfs(available_types): """Returns a list of """ def can_add_nodetype(nodetype): required_types = FrozenMultiset(nodetype.INTYPES) return required_types.issubset(available_types) possible_nodetypes = filter(can_add_nodetype, ALL_NODETYPES) random.shuffle(possible_nodetypes) for nodetype in possible_nodetypes: new_available_types = (available_types - FrozenMultiset( nodetype.INTYPES)) + FrozenMultiset(nodetype.OUTTYPES) if new_available_types in goalstates: return [nodetype] elif predicate(new_available_types): suffix = dfs(new_available_types) if suffix: return [nodetype] + suffix
def mutpairs(seq1: str, seq2: str): seq1N = add_ns(seq1) seq2N = add_ns(seq2) mut_idxs = [ index for index, (base1, base2) in enumerate(zip(seq1N, seq2N)) if base1 != base2 ] return FrozenMultiset( (seq1N[i - h:i + h + 1], seq2N[i]) for i in mut_idxs)
def test_newcounters(): """Make sure the cmcounts found by new CollapsedTree init agree with old""" def pseudocount(mset): if (0, 1) in mset: return mset - {(0, 1)} + {(1, 1)} else: return mset for newforest, newforest_ctrees, oldforest in allforests: # To build cached _cm_counts newforest.ll(0.5, 0.5) newforest_ctrees.ll(0.5, 0.5) # Test just a single tree (the first in each forest) oldtreecounts = pseudocount( FrozenMultiset(oldforest.forest[0]._cm_list)) newtreecounts = pseudocount( first( first(newforest._forest.get_trees()).weight_count( **cmcount_dagfuncs))) newtreectreecounts = FrozenMultiset( dict(first(newforest_ctrees)._cm_counts)) assert oldtreecounts == newtreecounts assert oldtreecounts == newtreectreecounts # Test for the whole forests all together oldcmcounts = FrozenMultiset([ pseudocount(FrozenMultiset(ctree._cm_list)) for ctree in oldforest.forest ]) newcmcounts_ctrees = FrozenMultiset() for treecounts, a in newforest_ctrees._cm_countlist: newcmcounts_ctrees += FrozenMultiset( {FrozenMultiset(dict(treecounts)): a}) newcmcounts = FrozenMultiset() for treecounts, a in newforest._cm_countlist: newcmcounts += FrozenMultiset( {FrozenMultiset(dict(treecounts)): a}) assert newcmcounts_ctrees == newcmcounts assert oldcmcounts == newcmcounts_ctrees
def test_two_0s(self): base_attack = 5 atk_range = range(base_attack, base_attack + 1) deck = [cards.plus_0] * 2 statistics_by_atk = deck_analyzer.derive_statistics( FrozenMultiset(deck), atk_range) self.assertEqual(5, statistics_by_atk[base_attack].normal.expected_damage) self.assertEqual( 5, statistics_by_atk[base_attack].advantage.expected_damage)
def test_0_and_1(self): base_attack = 5 atk_range = range(base_attack, base_attack + 1) deck = [cards.plus_0] + [cards.plus_1] statistics_by_atk = deck_analyzer.derive_statistics( FrozenMultiset(deck), atk_range) self.assertEqual(Fraction(5 + 6, 2), statistics_by_atk[base_attack].normal.expected_damage) self.assertEqual( 6, statistics_by_atk[base_attack].advantage.expected_damage)
def multiply_units(*args): this_units = Multiset() for each_unit in args: if get_unit_type(each_unit) == 'multiply': this_units.update(get_complexunit_set(each_unit)) else: this_units.add(each_unit) if len(this_units) == 1: return list(this_units)[0] else: return ('multiply', FrozenMultiset(this_units))
def sum_minus_logp(pairs: FrozenMultiset): # I have chosen to multiply substitution rate for central base # with rate of new base. Not sure if this is a good choice. if pairs: # for floating point behavior pairs = sorted(pairs.items()) p_arr = [ mult * (np.log(context_model[mer][0]) + np.log(context_model[mer][1][newbase])) for (mer, newbase), mult in pairs ] return -sum(p_arr) else: return 0.0
def deal(oldstate=None, keys=None): if oldstate is not None: favors = [oldstate.own_state.favors, oldstate.opponent_state.favors] keys = [oldstate.own_state.key, oldstate.opponent_state.key] else: favors = [(0, ) * 7 for i in range(2)] assert (keys is not None) cards = all_cards empty = FrozenMultiset() states = [] for i in range(2): hand = draw_cards(cards, 6) cards -= hand states.append( PlayerState(hand=hand, played=empty, hidden=empty, discarded=empty, favors=favors[i], key=keys[i], moves=(0, ) * 4, started=(i == 0))) return GameState(own_state=states[0], opponent_state=states[1], pile=cards)
def get_default_deck() -> FrozenMultiset: return FrozenMultiset(_default_deck)
def generate_deck(perks: Iterable[NamedPerk]) -> FrozenMultiset: deck = _default_deck.copy() for perk in perks: perk.deck_modification(deck) return FrozenMultiset(deck)
def test_frozen_hash_equal(): ms1 = FrozenMultiset('ab') ms2 = FrozenMultiset('ab') assert hash(ms1) == hash(ms2)
class SingleBlock: """Constructs the markov chain for single pulls. This class contains methods to construct several markov chains characterizing the results of singular pulls at each pity level. The information from these chains may then be synthesized into a much larger chain that characterizes the entire state space. Methods for performing certain operations, such as simulating a given number of pulls, or finding the time spent in transient states are also included. Attributes ---------- TYPE : str Indicates what this chain is meant to handle, singles or tenpulls. May be needed for external methods. MAX_PITY : int Indicates the number of pulls needed to reach maximum pity for the forced pity break. BASE_5 : Decimal Denotes the base 5* rate. INC_5 : Decimal Denotes the increment to the 5* rate based on pity. wants : {dict} A nested dictionary of a particular format, listing information on the units that the user wants from the gacha. universe : FrozenMultiset A multiset characterizing the set of units desired from the gacha. indices : [FrozenMultiset] A list of FrozenMultisets that represents the greater block structure of the complete markov chain. chain_indices : [FrozenMultiset] A list of FrozenMultisets that represents the state space of the single pull markov chains. n_chain_db : {int : array} A dictionary that maps pity level to the correct single pull markov chain. s_chain : array The single pull markov chain for the forced pity break. block_struc : array A submatrix of the full markov chain containing only the transient states. full_struc : array The full markov chain, containing all of the information on transition probabilities to and from each state. absorption_p : The probability of absorption for a particular state of the markov chain. Appended to block_struc along with another similar vector to produce full_struc. This is made an attribute of the class so it can be used for diagnostics. Parameters ---------- wants : {dict} A nested dictionary of a particular format, listing information on the units that the user wants from the gacha. This could conceivably be done manually, but generally it will be handled by the main program. """ def __init__(self, wants): self.TYPE = 'single' self.MAX_PITY = MAX_PITY self.BASE_5 = BASE_5 self.INC_5 = INC_5 self.wants = wants self.universe = Multiset() for unit in wants.keys(): for i in range(0, wants[unit]['number']): self.universe.update([unit]) self.universe = FrozenMultiset(self.universe) self.indices = [] for i in range(0, len(self.universe)): for j in itertools.combinations(self.universe, i): if FrozenMultiset(j) not in self.indices: self.indices.append(FrozenMultiset(j)) def generate(self): """Calls the constructor methods.""" self.create_chains() self.construct_block() def construct_block(self): """Creates and concatonates blocks to form the markov chain. The states of the markov chain we wish to construct has a very simple structure that lends itself to iterative generation. The indices generated in __init__ are used as the axes of a block superstructure, where each block represents the set of states that you are transitioning into. The index of the vertical axis denotes your current block-set of states, and the index of the horizontal axis denotes the block-set of states you are transitioning into. Within each of these blocks, there is a common substructure defined by your pity rate, and the position of the block in the superstructure (read: the units attained in the transition to this state from your current state). This method handles the formation of the block superstructure, and calls get_block for the generation of the substructure within each block. """ self.block_struc = np.array([]) for vertical in self.indices: horz = np.array([]) for horizontal in reversed(self.indices): block = self.get_block(horizontal, vertical) try: horz = np.hstack((block, horz)) except ValueError: horz = block try: self.block_struc = np.vstack((self.block_struc, horz)) except ValueError: self.block_struc = horz self.absorption_p, absorption_s = self.get_end() self.full_struc = np.hstack((self.block_struc, self.absorption_p)) self.full_struc = np.vstack((self.full_struc, absorption_s)) def create_chains(self): """Creates markov chains representing single pulls. This method constructs a markov chain containing information regarding the transitions between each state of possession, for a single pull, at a specific pity level. This is done for each pity level, and the chains are mapped to that pity level with a dictionary. A chain is also generated for the forced pity break that occurs at the 101st or 61st pull. """ state_set = self.universe | frozenset('5') self.chain_indices = [] for i in range(0, len(state_set)): for j in itertools.combinations(state_set, i): if FrozenMultiset(j) not in self.chain_indices: self.chain_indices.append(FrozenMultiset(j)) self.chain_indices.append(state_set) c_ref = self.chain_indices self.n_chain_db = {} for pity in range(0, MAX_PITY + 2): if MODE == 'Accurate': n_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)], dtype=np.dtype(Dec)) if MODE == 'Approximate': n_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)]) for vertical in self.chain_indices: available = state_set - vertical rate_5 = BASE_5 + pity * INC_5 cover = set() n_rate_none = 1 for unit in available: if unit not in cover: try: if self.wants[unit]['rarity'] == '5': rate_5 -= self.wants[unit][ 'base prob'] + pity * self.wants[unit][ 'prob inc'] cover.update([unit]) except KeyError: pass for horizontal in reversed(self.chain_indices): acquisition = horizontal - vertical if len(acquisition) > 1: pass elif horizontal == vertical: n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = n_rate_none elif acquisition == frozenset(): pass elif acquisition <= available and len( horizontal) == len(vertical) + 1: attained = next(iter(acquisition)) if attained == '5': n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate_5 n_rate_none -= rate_5 elif self.wants[attained]['rarity'] == '5': rate = self.wants[attained][ 'base prob'] + pity * self.wants[attained][ 'prob inc'] n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate n_rate_none -= rate else: rate = self.wants[attained][ 'base prob'] + pity * self.wants[attained][ 'prob inc'] n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate n_rate_none -= rate self.n_chain_db[pity] = n_chain if MODE == 'Accurate': self.s_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)], dtype=np.dtype(Dec)) elif MODE == 'Approximate': self.s_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)]) for vertical in self.chain_indices: available = state_set - vertical cover = set() s_rate_5 = 1 s_rate_none = 1 for unit in available: if unit not in cover: try: if self.wants[unit]['rarity'] == '5': s_rate_5 -= self.wants[unit]['spec prob'] cover.update([unit]) except KeyError: pass for horizontal in reversed(self.chain_indices): acquisition = horizontal - vertical if len(acquisition) > 1: pass elif horizontal == vertical: self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = s_rate_none elif acquisition == frozenset(): pass elif acquisition <= available and len( horizontal) == len(vertical) + 1: attained = next(iter(acquisition)) if attained == '5': self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = s_rate_5 s_rate_none -= s_rate_5 elif self.wants[attained]['rarity'] == '5': self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = self.wants[attained]['spec prob'] s_rate_none -= self.wants[attained]['spec prob'] def get_block(self, horizontal, vertical): """Creates blocks used to construct the final markov chain. This method generates the substructure mentioned in construct_blocks. It uses the horizontal and vertical indices to determine which units are aquired in the transition to this set of states, and uses the rarities of the units acquired to determine the form of this particular block. The substates here are defined by pity rate, and the transition probabilities are retrieved from n_chain_db and s_chain. Please note that although the entries of this matrix are stochastic, it is merely a small part of a markov chain - not a markov chain itself. Parameters ---------- horizontal : FrozenMultiset A multiset representing the horizontal index of the block superstructure under construction. vertical : FrozenMultiset A multiset representing the vertical index of the block superstructure under construction. Returns ------- block : numpy array The matrix needed to fill the [vertical][horizontal] index of the block matrix under construction. """ gained = horizontal - vertical block = np.zeros([MAX_PITY * 10 + 2, MAX_PITY * 10 + 2]) if len(gained) > 1: pass else: flag_5 = False for unit in gained: if self.wants[unit]['rarity'] == '5': flag_5 = True vert_index = self.chain_indices.index(vertical) horz_non_index = self.chain_indices.index(horizontal) horz_5_index = self.chain_indices.index(horizontal | frozenset('5')) for pity in range(0, MAX_PITY * 10 + 2): if pity == 0: pity_shift = 1 else: pity_shift = pity if flag_5 and pity != MAX_PITY * 10 + 1: block[pity][0] = ( self.n_chain_db[(pity_shift - 1) // 10][vert_index][horz_5_index] + self.n_chain_db[(pity_shift - 1) // 10][vert_index][horz_non_index]) elif flag_5 and pity == MAX_PITY * 10 + 1: block[pity][0] = (self.s_chain[vert_index][horz_5_index] + self.s_chain[vert_index][horz_non_index]) elif pity < MAX_PITY * 10 + 1: block[pity][0] = self.n_chain_db[ (pity_shift - 1) // 10][vert_index][horz_5_index] block[pity][pity + 1] = self.n_chain_db[ (pity_shift - 1) // 10][vert_index][horz_non_index] elif pity == MAX_PITY * 10 + 1: block[pity][0] = self.s_chain[vert_index][horz_5_index] return block def get_end(self): #note: find some way to make this more accurate """Creates the vector of escape probabilities for the chain. Finds the probability of entering the final state of the markov chain from any particular state, and constructs two arrays to store that information. NOTE: This is achieved very lazily, so the probability of absorption for a particular state may end up being larger than it is supposed to be. In trial runs, this has not caused any failures in computation, but it could conceivably do so. This is intended to be fixed in a future update. Returns ------- absorption_p : array An n x 1 array containing the escape probabilities of the full markov chain, where n is the vertical size of said markov chain. absorption_s : array An array that designates the final state as absorbing in the full markov chain. """ absorption_p = np.zeros([len(self.block_struc), 1]) absorption_s = np.zeros([1, len(self.block_struc) + 1]) absorption_s[0][-1] = 1 for row in range(0, len(self.block_struc)): absorption_p[row] = 1 - sum(self.block_struc[row]) return absorption_p, absorption_s def hitting_time(self): """Finds the expected hitting time by inversion.""" iden = np.eye(len(self.block_struc)) subt = iden - self.block_struc final = np.linalg.inv(subt) t = sum(final[0]) print( f'On average it will take {t} {self.TYPE}pulls to achieve the desired units.' ) def simulate(self, pull_num): """Simulates a given number of pulls. Parameters ---------- pull_num : int The number of pulls to simulate. """ simulated = np.linalg.matrix_power(self.full_struc, pull_num) index = self.indices + [self.universe] groups = len(index) - 1 self.output(simulated, groups, index) def onebyone(self, mode='manual'): """A method to simulate pulls one at a time. Determines the state of the markov chain at each step, and if desired displays the probability of being in each state at that step. NOTE: state here does not literally refer to all states of the markov chain - referring instead to the states of possesion that define the block superstructure of the chain. This method is also used to determine the number of steps needed to achieve a certain probability of being in the final absorbing state. Parameters ---------- mode : str(='manual') Determines how the method is run, and what output it produces. 'manual' indicates that the user is to observe the output at each step, and 'auto' indicates that the user is not interested in the results of each step, and only wishes to know how many steps are needed to reach a certain probability of being in the final state. """ pull_count = 0 step = np.copy(self.full_struc) initial = np.zeros([len(step), len(step)]) initial[0][0] = 1 index = self.indices + [self.universe] groups = len(index) - 1 stop = False if mode == 'manual': print('Press enter to continue pulling.') print('Input "stop" when you wish to stop.') stop = self.manual_proceed() while not stop: if pull_count == 0: self.output(initial, groups, index) elif pull_count == 1: self.output(step, groups, index) else: step = self.full_struc @ step self.output(step, groups, index) print(pull_count) pull_count += 1 stop = self.manual_proceed() elif mode == 'auto': correct = False while not correct: b_prob = input( 'Please enter your desired probability of success: ') checkquit(b_prob) try: d_prob = float(b_prob) except ValueError: print('You must enter a number.') continue if d_prob > 1: print('You can not have a probability greater than 1.') elif d_prob == 1: print('A 100 percent success rate is unreasonable.') elif d_prob < 0: print('You can not have a probability less than 0.') elif 0 <= d_prob < 1: correct = True print('OK.') else: print( "That input should be valid, but it isn't. Try again.") while not stop: if pull_count == 0: success = 0 curr = initial elif pull_count == 1: success = step[0][len(step) - 1] curr = step else: step = self.full_struc @ step success = step[0][len(step) - 1] curr = step if success >= float(b_prob): stop = True print( f'It should take you {pull_count} pulls to achieve the desired success rate.' ) see_out = input( 'Press the enter key to see the breakdown. ') checkquit(see_out) self.output(curr, groups, index) pull_count += 1 def output(self, step, groups, index): """Displays the probability of being in each state. Parameters ---------- step : array The state of the markov chain at the current step. Note - does not care about the actual step count. groups : int The number of 'groups' that the states of the chain can be lumped into. Alternatively, the number of states used to generate the block superstructure of the chain. index : [FrozenMultiset] A list of sets describing the states used to generate the block superstructure of the markov chain. Each of the actual states is assigned to one of these superstates for display purposes. """ probs = step[0] parts = len(probs) - 1 chunk = parts // groups n = 0 for i in range(0, groups): if i == 0: attained = ['None'] else: attained = self.disp_conv(index[i]) print(f'P{attained} = {sum(probs[n:n+chunk])*100}%') n = n + chunk print(f'P{self.disp_conv(index[-1])} = {probs[-1]*100}%') def manual_proceed(self): """Manual advancement of one-by-one pulls.""" another_one = input(': ') checkquit(another_one) if another_one.lower() == 'stop': return True return False def disp_conv(self, index): """Provides prettier state displays. Parameters ---------- index : [FrozenMultiset] A list of sets describing the states used to generate the block superstructure of the markov chain. Each of the actual states is assigned to one of these superstates for display purposes. Returns ------- [str] A sorted list containing the simplified strings. """ out = [] for (element, multiplicity) in index.items(): cons = element + '(' + str(multiplicity) + ')' out.append(cons) return sorted(out)
def can_add_nodetype(nodetype): required_types = FrozenMultiset(nodetype.INTYPES) return required_types.issubset(available_types)
def create_chains(self): """Creates markov chains representing single pulls. This method constructs a markov chain containing information regarding the transitions between each state of possession, for a single pull, at a specific pity level. This is done for each pity level, and the chains are mapped to that pity level with a dictionary. A chain is also generated for the forced pity break that occurs at the 101st or 61st pull. """ state_set = self.universe | frozenset('5') self.chain_indices = [] for i in range(0, len(state_set)): for j in itertools.combinations(state_set, i): if FrozenMultiset(j) not in self.chain_indices: self.chain_indices.append(FrozenMultiset(j)) self.chain_indices.append(state_set) c_ref = self.chain_indices self.n_chain_db = {} for pity in range(0, MAX_PITY + 2): if MODE == 'Accurate': n_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)], dtype=np.dtype(Dec)) if MODE == 'Approximate': n_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)]) for vertical in self.chain_indices: available = state_set - vertical rate_5 = BASE_5 + pity * INC_5 cover = set() n_rate_none = 1 for unit in available: if unit not in cover: try: if self.wants[unit]['rarity'] == '5': rate_5 -= self.wants[unit][ 'base prob'] + pity * self.wants[unit][ 'prob inc'] cover.update([unit]) except KeyError: pass for horizontal in reversed(self.chain_indices): acquisition = horizontal - vertical if len(acquisition) > 1: pass elif horizontal == vertical: n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = n_rate_none elif acquisition == frozenset(): pass elif acquisition <= available and len( horizontal) == len(vertical) + 1: attained = next(iter(acquisition)) if attained == '5': n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate_5 n_rate_none -= rate_5 elif self.wants[attained]['rarity'] == '5': rate = self.wants[attained][ 'base prob'] + pity * self.wants[attained][ 'prob inc'] n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate n_rate_none -= rate else: rate = self.wants[attained][ 'base prob'] + pity * self.wants[attained][ 'prob inc'] n_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = rate n_rate_none -= rate self.n_chain_db[pity] = n_chain if MODE == 'Accurate': self.s_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)], dtype=np.dtype(Dec)) elif MODE == 'Approximate': self.s_chain = np.zeros( [len(self.chain_indices), len(self.chain_indices)]) for vertical in self.chain_indices: available = state_set - vertical cover = set() s_rate_5 = 1 s_rate_none = 1 for unit in available: if unit not in cover: try: if self.wants[unit]['rarity'] == '5': s_rate_5 -= self.wants[unit]['spec prob'] cover.update([unit]) except KeyError: pass for horizontal in reversed(self.chain_indices): acquisition = horizontal - vertical if len(acquisition) > 1: pass elif horizontal == vertical: self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = s_rate_none elif acquisition == frozenset(): pass elif acquisition <= available and len( horizontal) == len(vertical) + 1: attained = next(iter(acquisition)) if attained == '5': self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = s_rate_5 s_rate_none -= s_rate_5 elif self.wants[attained]['rarity'] == '5': self.s_chain[c_ref.index(vertical)][c_ref.index( horizontal)] = self.wants[attained]['spec prob'] s_rate_none -= self.wants[attained]['spec prob']
def _doc_normalize(d): """Performs various operations on a document (str) to normalize it, returning a multiset of its words.""" return FrozenMultiset( unidecode(d).lower().translate( str.maketrans("", "", string.punctuation)).split())
def __init__(self, glycan_library: Optional[pd.DataFrame], glycoforms: pd.Series, glycation: pd.Series) -> None: """ Assemble the glycoform graph from peptide mapping and glycation frequency data. :param pd.DataFrame glycan_library: a glycan library :param pd.Series glycoforms: list of glycoforms with abundances/errors :param pd.Series glycation: list of glycations with abundances/errors :raises ValueError: if a glycan with unknown monosaccharide composition is added :return: nothing :rtype: None """ super().__init__() # regex for extracting the first glycoform from a string like # "A2G0F/A2G1F or A2G1F/A2G0F" re_first_glycoform = re.compile("([^\s]*)") # series with a monosaccharide set as index # and abundances as values exp_abundances = glycoforms.reset_index() exp_abundances["sugar_set"] = exp_abundances["index_col"].apply( lambda v: FrozenMultiset(v.split("/"))) site_count = exp_abundances["sugar_set"].apply(len).unique() if site_count.size != 1: raise ValueError( translate( "correction", "Glycoforms have unequal number of glycosylation sites.")) else: site_count = int(site_count[0]) logging.info( translate("correction", "Glycoprotein has {} sites.").format(site_count)) exp_abundances = exp_abundances.set_index("sugar_set")["abundance"] # dict mapping hexose differences to abundances delta_ptm = { PTMComposition({"Hex": count}): abundance / 100 for count, abundance in glycation.iteritems() if count > 0 } gp = Glycoprotein(sites=site_count, library=glycan_library) glycoform_glycans = set() for v in exp_abundances.index.values: glycoform_glycans |= set(v) if glycan_library is None: # fill the glycan library from glycans in glycoforms logging.info( translate( "correction", "No glycan library specified. " "Extracting glycans from glycoforms ...")) for g in glycoform_glycans: try: gp.add_glycan(g) except ValueError as e: raise e else: # compare monosaccharide set of glycan library and glycoforms; # add glycans that only appear in the list of glycoforms # to the library, but this only works if they have a valid name library_glycans = set([n.name for n in gp.glycan_library]) glycans_only_in_library = library_glycans - glycoform_glycans if glycans_only_in_library: logging.warning( translate( "correction", "The following glycans only appear " "in the glycan library, " "but not in the list of glycoforms: {}.").format( ", ".join(glycans_only_in_library))) glycans_only_in_glycoforms = glycoform_glycans - library_glycans if glycans_only_in_glycoforms: logging.warning( translate( "correction", "The following glycans only appear in the list of " "glycoforms, but not in the glycan library: {}. " "They will be added to the library.").format( ", ".join(glycans_only_in_glycoforms))) for g in glycans_only_in_glycoforms: try: gp.add_glycan(g) except ValueError as e: raise e for glycoform in gp.unique_glycoforms(): # get the experimental abundance of a glycoform # use a default value of 0±0 if unavailable abundance = ufloat(0, 0) for name in glycoform.name.split(" or "): try: abundance = exp_abundances[FrozenMultiset(name.split("/"))] break except KeyError: continue glycoform.abundance = abundance # add the current glycoform as a node to the graph; # generate an edge to previous nodes if the difference is described # in the dict of PTM differences self.add_node(glycoform, abundance=abundance, label=re_first_glycoform.match( glycoform.name).group()) for n in self: d = glycoform - n try: c = delta_ptm[d] source = n sink = glycoform except KeyError: try: d = -d c = delta_ptm[d] source = glycoform sink = n except KeyError: continue self.add_edge(source, sink, label=d.composition_str(), c=c)
class Card(IntEnum): Red2 = 0 Yellow2 = 1 Purple2 = 2 Blue3 = 3 Orange3 = 4 Green4 = 5 Pink5 = 6 Unknown = 7 all_cards = FrozenMultiset({ Card.Red2: 2, Card.Yellow2: 2, Card.Purple2: 2, Card.Blue3: 3, Card.Orange3: 3, Card.Green4: 4, Card.Pink5: 5 }) no_cards = FrozenMultiset() # 2. Moves # ======== # # Moves are just namedtuples. Each move object (that is, object of type Move*) # has an associated move type MoveType.Move*. This move type will be used as a # key to look up if you performed this move type already (note that IntEnum # objects behave like ints). Move1 = namedtuple("Move1", ["cards"])
def react_move3(self, gamestate, move, cards): return FrozenMultiset([max(cards)])
def censor_cards(cards): return FrozenMultiset({Card.Unknown: len(cards)})
def derive_statistics(deck: FrozenMultiset, atk_range: range) -> Dict[int, DeckStatistics]: statistics = DeckStatistics(DrawSchemeStatistics(), DrawSchemeStatistics()) # Possible optimization: Pre-calculate denominators involving deck_length and sequence_length. # Of all possible unique decks, there's still a lot of non-unique rolling and terminator collections. # That means an optimization is possible here, caching the result of this loop for given collections. # (This isn't done, but should be done at some point) deck_length = len(deck) rolling_cards = [card for card in deck if card.rolling] terminator_cards = [card for card in deck if not card.rolling] counted_terminator_cards = Counter(terminator_cards) for terminator_card, count in counted_terminator_cards.items(): odds = Fraction(count, deck_length) terminated_aggregate = AggregatedLine.from_card(terminator_card) statistics.normal.add_aggregated_line(terminated_aggregate, odds) short_rolling_combinations = Counter() short_rolling_combinations.update( [FrozenMultiset(c) for c in itertools.combinations(rolling_cards, 1)]) lengthy_rolling_combinations = Counter() for length in range(2, len(rolling_cards) + 1): lengthy_rolling_combinations.update([ FrozenMultiset(c) for c in itertools.combinations(rolling_cards, length) ]) # unique_rolling_combinations = short_rolling_combinations + lengthy_rolling_combinations # validate_permutation_count(rolling_cards, unique_rolling_combinations) # Debug assertion short_rolling = terminate_rolling_combos(counted_terminator_cards, deck_length, short_rolling_combinations) for terminated_line in short_rolling: # Advantage gets odds times two because either order can happen and count when advantaged. statistics.advantage.add_aggregated_line( terminated_line.aggregated_line, terminated_line.odds * EITHER_ORDER) statistics.normal.add_aggregated_line(terminated_line.aggregated_line, terminated_line.odds) lengthy_rolling = terminate_rolling_combos(counted_terminator_cards, deck_length, lengthy_rolling_combinations) for terminated_line in lengthy_rolling: statistics.advantage.add_aggregated_line( terminated_line.aggregated_line, terminated_line.odds) statistics.normal.add_aggregated_line(terminated_line.aggregated_line, terminated_line.odds) two_card_odds_factor = Fraction(1, deck_length * (deck_length - 1)) advantaged_terminator_pairs = list( itertools.combinations(terminator_cards, 2)) deck_statistics_by_atk = {} for atk in atk_range: new_statistics = DeckStatistics(statistics.normal.make_copy(), statistics.advantage.make_copy()) for terminator_pair in advantaged_terminator_pairs: add_terminal_adv_to_stats(new_statistics.advantage, terminator_pair, atk, two_card_odds_factor) deck_statistics_by_atk[atk] = new_statistics deck_statistics_by_atk[atk].normal.calculate_expected_damage(atk) deck_statistics_by_atk[atk].advantage.calculate_expected_damage(atk) # for i in atk_range: # assert deck_statistics_by_atk[i].normal.total_odds == 1 # Debug assertion # assert deck_statistics_by_atk[i].advantage.total_odds == 1 # Debug assertion return deck_statistics_by_atk
def draw_cards(cards, n): return FrozenMultiset(random.sample(list(cards), n))