def save_game(self): """Check for players that sit out, and do not save their game""" for s, d in self.engine.data.items(): if 'f' in [i['action'] for i in d['preflop']]: balance_txt = self.site.parse_balances(self.img, s, True) if balance_txt == 'sit out': d['sitout'] = True ES.save_game(self.players, self.engine.data, self.engine.site_name, self.engine.vs, self.engine.board)
def __init__(self, site_name, button, players, sb, bb, ante=0, *args, **kwargs): logger.info(f'Engine site_name: {site_name}') logger.info(f'Engine button: {button}') logger.info(f'Engine players: {len(players)}') logger.info(f'Engine kwargs: {kwargs}') self.site_name = site_name self.button = button self.players = players self.sb_amt = sb self.bb_amt = bb self.ante = ante self.go_to_showdown = False self.mc = False if hasattr(kwargs, 'data'): self.data = kwargs['data'] else: self.data = {s: { 'status': 'in' if p.get('status') else 'out', 'sitout': False, 'hand': ['__', '__'] if p.get('status') else [' ', ' '], 'contrib': 0, 'matched': 0, 'preflop': [], 'flop': [], 'turn': [], 'river': [], 'showdown': [], # for error at stats 'is_SB': False, 'is_BB': False, } for s, p in players.items()} self.vs = sum([1 if d['status'] == 'in' else 0 for d in self.data.values()]) self.rivals = self.vs self.winner = None # leave empty: scraper compares length self.board = kwargs.get('board', []) self.pot = kwargs.get('pot', 0) self.phase = kwargs.get('phase', self.PHASE_PREFLOP) self.preflop = kwargs.get('preflop', {}) self.flop = kwargs.get('flop', {}) self.turn = kwargs.get('turn', {}) self.river = kwargs.get('river', {}) self.showdown = kwargs.get('showdown', {}) self.q = None self.pe_equities = {} # hand_strength = PE.hand_strength(['__', '__'], self.board, self.rivals) for s, d in self.data.items(): if 'in' not in d['status']: continue self.data[s]['stats'] = ES.player_stats(self, s) self.players[s]['hand_range'] = ES.cut_hand_range(self.data[s]['stats']) self.data[s]['strength'] = 0.20
class Ingestor: def __init__(self): self.es = ES() def ingest(self, dataset_name, dataset_source, dataset_description, dataset_author, dataset_notes, dataset_creation_time, dataset_tags, online=True): """ The following will clean, parse, and upload datasets to our database. :param dataset_name: Name of the dataset. :param dataset_source: Source of the dataset (i.e. filename or URL). :param dataset_description: Description of the dataset. :param dataset_author: Author of the dataset. :param dataset_notes: Any notes on the dataset by us. :param dataset_creation_time: Time the dataset was created. :param online: boolean of whether the data is a local file (offline) or a URL (online). """ if CSVParser.is_csv(dataset_source): if online: raw_documents = CSVParser.convert_csv_url_to_json_list( dataset_source) else: raw_documents = CSVParser.convert_csv_file_to_json_list( dataset_source) dataset_attributes = raw_documents[0].keys() es_documents = [ Document(dataset_name, raw_document).get_es_document() for raw_document in raw_documents ] self.es.bulk_upload(es_documents) else: print("Unsupported file format.") metadocument = { "dataset_name": dataset_name, "dataset_description": dataset_description, "dataset_notes": dataset_notes, "dataset_keywords": None, # TODO: Add explicit keywords for datasets through ML "dataset_tags": dataset_tags, "dataset_author": dataset_author, "time_ingested": calendar.timegm(time.gmtime()), "time_created": dataset_creation_time, "dataset_source": dataset_source, "dataset_attributes": dataset_attributes, "dataset_num_docs": len(es_documents), } self.es.bulk_upload( [Metadocument(metadocument, dataset_name).get_es_document()])
def test_showdown_hs(self): e = Engine( 'CoinPoker', 1, { 1: { 'name': 'joe', 'balance': 1000, 'status': 1 }, 2: { 'name': 'joe', 'balance': 1000, 'status': 1 }, }, 50, 100, 0, ) e.available_actions() # p1 e.do(['r', 100]) e.available_actions() # p2 e.do(['c']) e.available_actions() # p2 e.do(['k']) e.available_actions() # p1 e.do(['b', 200]) e.available_actions() # p2 has: # preflop_1 = l # preflop_2 = c # flop_1 = k hs = ES.showdown_hs(e, e.s, percentile=50) assert hs is not None assert 0 < hs < 1 hs2 = ES.showdown_hs(e, e.s, percentile=10) assert hs2 < hs hs3 = ES.showdown_hs(e, e.s, percentile=90) assert hs3 > hs res = ES.showdown_hs(e, e.s, 200) hits = res.hits.hits assert len(hits) == 200 assert hits[0]['_score'] > 4 assert hits[-1]['_score'] > 0
def test_player_stats(self): e = Engine( 'CoinPoker', 1, { 1: { 'name': 'joe', 'balance': 1000, 'status': 1 }, 2: { 'name': 'jane', 'balance': 1000, 'status': 1 }, }, 50, 100, 0, ) e.available_actions() stats = ES.player_stats(e, e.s) assert 'actions' in stats assert len(stats['actions']) >= 4 assert 0 < stats['hs'] < 100
def test_showdown_hs(self): e = Engine( 'CoinPoker', 1, { 1: {'name': 'joe', 'balance': 1000, 'status': 1}, 2: {'name': 'joe', 'balance': 1000, 'status': 1}, }, 50, 100, 0, ) e.available_actions() # p1 e.do(['r', 100]) e.available_actions() # p2 e.do(['c']) e.available_actions() # p2 e.do(['k']) e.available_actions() # p1 e.do(['b', 200]) e.available_actions() # p2 has: # preflop_1 = l # preflop_2 = c # flop_1 = k hs = ES.showdown_hs(e, e.s, percentile=50) assert hs is not None assert 0 < hs < 1 hs2 = ES.showdown_hs(e, e.s, percentile=10) assert hs2 < hs hs3 = ES.showdown_hs(e, e.s, percentile=90) assert hs3 > hs res = ES.showdown_hs(e, e.s, 200) hits = res.hits.hits assert len(hits) == 200 assert hits[0]['_score'] > 4 assert hits[-1]['_score'] > 0
def get_showdown_equities(self, e): """instead of using pokereval, use hs from se""" hss = {} for s, d in e.data.items(): if 'in' in d['status']: hss[s] = ES.showdown_hs(e, s, percentile=self.PERCENTILE) # calculate for hero if self.hero in hss: d = e.data[self.hero] hss[self.hero] = PE.hand_strength(d['hand'], e.board, e.rivals) # normalize total = sum(hs for hs in hss.values()) equities = {s: hs / total for s, hs in hss.items()} return equities
def test_player_stats(self): e = Engine( 'CoinPoker', 1, { 1: {'name': 'joe', 'balance': 1000, 'status': 1}, 2: {'name': 'jane', 'balance': 1000, 'status': 1}, }, 50, 100, 0, ) e.available_actions() stats = ES.player_stats(e, e.s) assert 'actions' in stats assert len(stats['actions']) >= 4 assert 0 < stats['hs'] < 100
def test_player_stats_on_hand(self): e = Engine( 'CoinPoker', 1, { 1: {'name': 'joe', 'balance': 1000, 'status': 1}, 2: {'name': 'jane', 'balance': 1000, 'status': 1}, 3: {'name': 'jane', 'balance': 1000, 'status': 1}, 4: {'name': 'jane', 'balance': 1000, 'status': 1}, 5: {'name': 'jane', 'balance': 1000, 'status': 1}, 6: {'name': 'jane', 'balance': 1000, 'status': 1}, }, 50, 100, 0, ) e.available_actions() # p4 e.do(['r', 100]) e.available_actions() # p5 e.do(['f']) e.available_actions() # p6 e.do(['f']) e.available_actions() # p1 e.do(['c']) e.available_actions() # p2 e.do(['c']) e.available_actions() # p3 e.do(['k']) e.available_actions() # p2 e.do(['b', 100]) e.available_actions() # p3 stats = ES.player_stats(e, e.s) # hs = res.aggregations['hs']['hs_agg']['values']['50.0'] assert len(stats['actions']) >= 4
def adjust_strength(self, s, d, a): """Adjust the min/max tuple of strength based on action taken The initialisation is done to help bridge the uknown. Taking all possible hands leads to shit decisions""" # take strength if pocket known # if d['hand'] and d['hand'] != ['__', '__'] and d['hand'] != [' ', ' ']: # strength = 1 - PE.hand_strength(d['hand'], self.board, self.rivals) # d['strength'] = strength # return # # # take hs from # if d['stats']['hs']: # d['strength'] = d['stats']['hs'] # return logger.info('adjusting strength for action {}'.format(a)) if a in ['f', 'k', 'sb', 'bb']: logger.debug('no aggression faced') return stats = d['stats']['actions'] logger.debug('player {} stats actions: {}'.format(s, stats)) dist = ES.dist_player_stats(stats) logger.debug(f'player {s} dist: {dist}') # update strength to fold limit # 1111111111 # fffffccccc # times first call of 50% during preflop # 1.00 * 50% = 0.50 # 0000011111 # ffffffffcc # times first call of 60% during flop # 0.50 * 20% = 0.10 # 0000000001 # from where action is met lower_bound = 0.0001 action_found = False for o in ['c', 'b', 'r', 'a']: if o == a: action_found = True # logger.debug('action {} found'.format(o)) if not action_found: # logger.debug('action {} not found yet'.format(a)) continue dist_vals = [k for k, v in dist.items() if v == o] # logger.debug('dist_vals {}'.format(dist_vals)) if not dist_vals: # logger.debug('no dist_vals...') continue lower_bound = min(dist_vals) # logger.debug('lower bound = {} (with {})'.format(lower_bound, o)) break new_strength = d['strength'] * (1 - lower_bound) # logger.debug('new strength = {} (old {} * {})'.format(new_strength, d['strength'], (1 - lower_bound))) d['strength'] = new_strength
def test_player_stats_on_hand(self): e = Engine( 'CoinPoker', 1, { 1: { 'name': 'joe', 'balance': 1000, 'status': 1 }, 2: { 'name': 'jane', 'balance': 1000, 'status': 1 }, 3: { 'name': 'jane', 'balance': 1000, 'status': 1 }, 4: { 'name': 'jane', 'balance': 1000, 'status': 1 }, 5: { 'name': 'jane', 'balance': 1000, 'status': 1 }, 6: { 'name': 'jane', 'balance': 1000, 'status': 1 }, }, 50, 100, 0, ) e.available_actions() # p4 e.do(['r', 100]) e.available_actions() # p5 e.do(['f']) e.available_actions() # p6 e.do(['f']) e.available_actions() # p1 e.do(['c']) e.available_actions() # p2 e.do(['c']) e.available_actions() # p3 e.do(['k']) e.available_actions() # p2 e.do(['b', 100]) e.available_actions() # p3 stats = ES.player_stats(e, e.s) # hs = res.aggregations['hs']['hs_agg']['values']['50.0'] assert len(stats['actions']) >= 4
def add_actions(self, e, parent): """Add actions available to this node If in GG phase then no actions possible, ever. Remove 'hand' Bets: - preflop are 2-4x BB - postflop are 40-100% pot Raise: - always double Allin: - only on river - if out of money then converted to allin Scale non-fold probabilities even though it should not have an effect. """ # logger.info('adding actions to {}'.format(parent.tag)) actions = e.available_actions() s, p = e.q[0] d = e.data[s] balance_left = p['balance'] - d['contrib'] if not actions: # logger.warn('no actions to add to node') return if 'gg' in actions: # logger.debug('no actions available, got gg') return actions.remove('hand') # remove fold if player can check if 'check' in actions: actions.remove('fold') # # logger.debug('removed fold when check available') # remove fold for hero # if s == self.hero and 'fold' in actions: # actions.remove('fold') # # logger.debug('removed fold from hero') # remove raise if player has already been aggressive if 'raise' in actions and any(pa['action'] in 'br' for pa in d[e.phase]): actions.remove('raise') # # logger.debug('removed raise as player has already been aggressive') # remove allin, but add it later with final stats (if increased from bet/raised) if 'allin' in actions: actions.remove('allin') # logger.debug('removed allin by default') # load stats (codes with counts) stats = ES.player_stats(e, s) max_contrib = max(pd['contrib'] for pd in e.data.values()) # contrib_short = max_contrib - d['contrib'] # allin needs to be the doc count # where bets and raises result in allin, add those prob dists to this # that will give proper probability go_allin = stats['actions'].get('a', 0) # # logger.info('filtered actions: {}'.format(actions)) # ev 0 instead of none because of root node sum when not all traversed it gives error action_nodes = [] for a in actions: node_data = { 'stats': stats['actions'].get(ACTIONS_TO_ABBR[a], 0.01), 'divider': 1, 'action': a, 'phase': e.phase, 'seat': s, 'name': p['name'], 'traversed': 0, 'ev': 0, } if a in ['bet', 'raise']: btps_and_amts = [] total_pot = sum(pd['contrib'] for pd in e.data.values()) + e.pot # for preflop only do 2x and 3x if e.phase == e.PHASE_PREFLOP: btps_and_amts.append(('double', e.bb_amt * 2)) btps_and_amts.append(('triple', e.bb_amt * 3)) # else do half and full pots else: btps_and_amts.append(('half_pot', total_pot * 0.50)) btps_and_amts.append(('full_pot', total_pot * 1.00)) # round bets up to a BB # btps_and_amts = [(btp, -(amt // -e.bb_amt) * e.bb_amt) # for btp, amt in btps_and_amts] betting_info = [] amts_seen = [] for btp, amt in btps_and_amts: if amt in amts_seen: # logger.debug('already using {}, skipping duplicate'.format(amt)) continue if a == 'bet' and amt < e.bb_amt: # logger.debug('bet cannot be less than BB {}'.format(e.bb_amt)) continue if a == 'raise' and amt < (max_contrib * 2): # logger.debug('raise cannot be less than 2x contrib of {}'.format(max_contrib * 2)) continue betting_info.append((btp, amt)) amts_seen.append(amt) # change raises that cause allin betting_info_final = [] for btp, amt in betting_info: # if amt is more than player balance, it is an allin if amt >= balance_left: go_allin += node_data['stats'] / len(betting_info) else: betting_info_final.append((btp, amt)) # all good, can have this bet as option for btp, amt in betting_info_final: node_data_copy = deepcopy(node_data) node_data_copy['divider'] = len(betting_info_final) node_data_copy['action'] = f'{a}_{btp}' node_data_copy['amount'] = amt action_nodes.append(node_data_copy) else: action_nodes.append(node_data) # allin will have doc counts (from stat, maybe from bets, maybe from raise) if go_allin: node_data = { 'stats': go_allin, 'divider': 1, 'action': 'allin', 'phase': e.phase, 'seat': s, 'name': p['name'], 'traversed': 0, 'ev': 0, 'amount': balance_left, } action_nodes.append(node_data) # logger.debug('added allin to actions with stat {}'.format(node_data['stats'])) # scale the stats (it is currently term counts aka histogram) and it is required to be # a probability distribution (p~1) # Also, certain actions like fold can be removed, and the total stats is not 1 total_stats = sum(an['stats'] / an['divider'] for an in action_nodes) for action_node in action_nodes: action_node['stats'] = max( 0.01, action_node['stats'] / action_node['divider'] / total_stats) action_node[ 'cum_stats'] = parent.data['cum_stats'] * action_node['stats'] node_tag = f'{action_node["action"]}_{s}_{e.phase}' identifier = f'{node_tag}_{str(uuid.uuid4())[:8]}' self.tree.create_node(identifier=identifier, tag=node_tag, parent=parent.identifier, data=action_node) # logger.debug('new {} for {} with data {}'.format(node_tag, s, action_node)) item = (1 - action_node['cum_stats'], self.leaf_path + [identifier]) self.queue.put(item)
def __init__(self): self.es = ES()
def main(): # create parser object parser = argparse.ArgumentParser( description= "nucleai cli for searching, ingesting, and downloading datasets.") # defining arguments for parser object parser.add_argument( "-ots", "--one_time_setup", nargs="*", help= "Setups up metadocument index in the elasticsearch. This only needs to be done once." ) parser.add_argument( "-s", "--search", nargs=1, help= "Searched metadocument index by keywords. Returns top 10 matches by reverse indexing." ) parser.add_argument( "-d", "--download", type=str, nargs=1, help="Downloads and pickles data based off dataset id.") parser.add_argument( "-dt", "--download_and_tensorize", type=str, nargs=1, help="Downloads, tensorizes, and pickles data based off dataset id.") parser.add_argument( "-dttws", "--download_tensorize_two_way_split", type=str, nargs=1, help= "Downloads, tensorizes, splits across two tensors, and pickles data based off dataset id." ) # The following should probably live in a different package as they help to ingest. parser.add_argument( "-iq", "--ingest_by_query", type=str, nargs=1, help="Ingests data from data.gov into the elasticsearch by keywords.") parser.add_argument( "-ip", "--ingest_by_package", type=str, nargs=1, help= "Ingests data from data.gov into the elasticsearch by name of data.gov package." ) # parse the arguments from standard input args = parser.parse_args() # calling functions depending on type of argument if args.one_time_setup != None: print("INFO: Setting up metadocument index.") ES().init_metadocument_index() elif args.ingest_by_query != None: query = args.ingest_by_query[0] print("INFO: Ingesting all datasets related to %s." % query) scraper = DataDotGovScraper(query) packages = scraper.get_packages() for package in packages: try: scraper.ingest_dataset(package) except: print("ERROR: Could not load data.") elif args.ingest_by_package != None: package = args.ingest_by_package[0] print("INFO: Ingesting dataset called to %s." % package) DataDotGovScraper(None).ingest_dataset(package) elif args.search != None: query = args.search[0] print("INFO: Searching our database for %s." % query) print(SearchClient().search_by_partial_match(query, 10)) elif args.download != None: dataset_id = args.download[0] cleaned_dataframe = get_cleaned_dataframe(dataset_id) print("INFO: Pickling %s dataset." % dataset_id) cleaned_dataframe.to_pickle("%s_dataframe.pkl" % dataset_id) elif args.download_and_tensorize != None: dataset_id = args.download_and_tensorize[0] cleaned_dataframe = get_cleaned_dataframe(dataset_id) torch_tensor = one_hot_encode_and_tensorize(dataset_id, cleaned_dataframe) print("INFO: Pickling %s dataset." % dataset_id) torch.save(torch_tensor, "%s.tensor" % dataset_id) elif args.download_tensorize_two_way_split != None: # For now, comment these lines out since the elasticsearch is down. # dataset_id = args.download[0] # cleaned_dataframe = get_cleaned_dataframe(dataset_id) # Temporarily, dataset_id will refer to the URL passed in. dataset_id = args.download_tensorize_two_way_split[0] cleaned_dataframe = get_cleaned_dataframe_by_url(dataset_id) num_rows = cleaned_dataframe.shape[0] num_columns = len(cleaned_dataframe.columns) first_split_rows_sample_size = randint(num_rows / 2 - 1, num_rows) second_split_rows_sample_size = randint(num_rows / 2 - 1, num_rows) first_split_columns_sample_size = randint(num_columns / 2 - 1, num_columns) second_split_columns_sample_size = randint(num_columns / 2 - 1, num_columns) first_split_dataframe = cleaned_dataframe.sample(first_split_rows_sample_size)\ .sample(first_split_columns_sample_size, axis=1).apply(np.random.permutation) second_split_dataframe = cleaned_dataframe.sample(second_split_rows_sample_size)\ .sample(second_split_columns_sample_size, axis=1).apply(np.random.permutation) first_torch_tensor = one_hot_encode_and_tensorize( dataset_id, first_split_dataframe) second_torch_tensor = one_hot_encode_and_tensorize( dataset_id, second_split_dataframe) print("INFO: Pickling %s dataset." % dataset_id) torch.save(first_torch_tensor, "tmp-%s-1.tensor" % hash(dataset_id)) torch.save(second_torch_tensor, "tmp-%s-2.tensor" % hash(dataset_id))
def __init__(self, site_name, button, players, sb, bb, ante=0, *args, **kwargs): logger.info(f'Engine site_name: {site_name}') logger.info(f'Engine button: {button}') logger.info(f'Engine players: {len(players)}') logger.info(f'Engine kwargs: {kwargs}') self.site_name = site_name self.button = button self.players = players self.sb_amt = sb self.bb_amt = bb self.ante = ante self.go_to_showdown = False self.mc = False if hasattr(kwargs, 'data'): self.data = kwargs['data'] else: self.data = { s: { 'status': 'in' if p.get('status') else 'out', 'sitout': False, 'hand': ['__', '__'] if p.get('status') else [' ', ' '], 'contrib': 0, 'matched': 0, 'preflop': [], 'flop': [], 'turn': [], 'river': [], 'showdown': [], # for error at stats 'is_SB': False, 'is_BB': False, } for s, p in players.items() } self.vs = sum( [1 if d['status'] == 'in' else 0 for d in self.data.values()]) self.rivals = self.vs self.winner = None # leave empty: scraper compares length self.board = kwargs.get('board', []) self.pot = kwargs.get('pot', 0) self.phase = kwargs.get('phase', self.PHASE_PREFLOP) self.preflop = kwargs.get('preflop', {}) self.flop = kwargs.get('flop', {}) self.turn = kwargs.get('turn', {}) self.river = kwargs.get('river', {}) self.showdown = kwargs.get('showdown', {}) self.q = None self.pe_equities = {} # hand_strength = PE.hand_strength(['__', '__'], self.board, self.rivals) for s, d in self.data.items(): if 'in' not in d['status']: continue self.data[s]['stats'] = ES.player_stats(self, s) self.players[s]['hand_range'] = ES.cut_hand_range( self.data[s]['stats']) self.data[s]['strength'] = 0.20
def es(rm): if rm: ES.delete_player(rm) else: ES.most_frequent_players()
def add_actions(self, e, parent): """Add actions available to this node If in GG phase then no actions possible, ever. Remove 'hand' Bets: - preflop are 2-4x BB - postflop are 40-100% pot Raise: - always double Allin: - only on river - if out of money then converted to allin Scale non-fold probabilities even though it should not have an effect. """ # logger.info('adding actions to {}'.format(parent.tag)) actions = e.available_actions() s, p = e.q[0] d = e.data[s] balance_left = p['balance'] - d['contrib'] if not actions: # logger.warn('no actions to add to node') return if 'gg' in actions: # logger.debug('no actions available, got gg') return actions.remove('hand') # remove fold if player can check if 'check' in actions: actions.remove('fold') # # logger.debug('removed fold when check available') # remove fold for hero # if s == self.hero and 'fold' in actions: # actions.remove('fold') # # logger.debug('removed fold from hero') # remove raise if player has already been aggressive if 'raise' in actions and any(pa['action'] in 'br' for pa in d[e.phase]): actions.remove('raise') # # logger.debug('removed raise as player has already been aggressive') # remove allin, but add it later with final stats (if increased from bet/raised) if 'allin' in actions: actions.remove('allin') # logger.debug('removed allin by default') # load stats (codes with counts) stats = ES.player_stats(e, s) max_contrib = max(pd['contrib'] for pd in e.data.values()) # contrib_short = max_contrib - d['contrib'] # allin needs to be the doc count # where bets and raises result in allin, add those prob dists to this # that will give proper probability go_allin = stats['actions'].get('a', 0) # # logger.info('filtered actions: {}'.format(actions)) # ev 0 instead of none because of root node sum when not all traversed it gives error action_nodes = [] for a in actions: node_data = { 'stats': stats['actions'].get(ACTIONS_TO_ABBR[a], 0.01), 'divider': 1, 'action': a, 'phase': e.phase, 'seat': s, 'name': p['name'], 'traversed': 0, 'ev': 0, } if a in ['bet', 'raise']: btps_and_amts = [] total_pot = sum(pd['contrib'] for pd in e.data.values()) + e.pot # for preflop only do 2x and 3x if e.phase == e.PHASE_PREFLOP: btps_and_amts.append(('double', e.bb_amt * 2)) btps_and_amts.append(('triple', e.bb_amt * 3)) # else do half and full pots else: btps_and_amts.append(('half_pot', total_pot * 0.50)) btps_and_amts.append(('full_pot', total_pot * 1.00)) # round bets up to a BB # btps_and_amts = [(btp, -(amt // -e.bb_amt) * e.bb_amt) # for btp, amt in btps_and_amts] betting_info = [] amts_seen = [] for btp, amt in btps_and_amts: if amt in amts_seen: # logger.debug('already using {}, skipping duplicate'.format(amt)) continue if a == 'bet' and amt < e.bb_amt: # logger.debug('bet cannot be less than BB {}'.format(e.bb_amt)) continue if a == 'raise' and amt < (max_contrib * 2): # logger.debug('raise cannot be less than 2x contrib of {}'.format(max_contrib * 2)) continue betting_info.append((btp, amt)) amts_seen.append(amt) # change raises that cause allin betting_info_final = [] for btp, amt in betting_info: # if amt is more than player balance, it is an allin if amt >= balance_left: go_allin += node_data['stats'] / len(betting_info) else: betting_info_final.append((btp, amt)) # all good, can have this bet as option for btp, amt in betting_info_final: node_data_copy = deepcopy(node_data) node_data_copy['divider'] = len(betting_info_final) node_data_copy['action'] = f'{a}_{btp}' node_data_copy['amount'] = amt action_nodes.append(node_data_copy) else: action_nodes.append(node_data) # allin will have doc counts (from stat, maybe from bets, maybe from raise) if go_allin: node_data = { 'stats': go_allin, 'divider': 1, 'action': 'allin', 'phase': e.phase, 'seat': s, 'name': p['name'], 'traversed': 0, 'ev': 0, 'amount': balance_left, } action_nodes.append(node_data) # logger.debug('added allin to actions with stat {}'.format(node_data['stats'])) # scale the stats (it is currently term counts aka histogram) and it is required to be # a probability distribution (p~1) # Also, certain actions like fold can be removed, and the total stats is not 1 total_stats = sum(an['stats'] / an['divider'] for an in action_nodes) for action_node in action_nodes: action_node['stats'] = max(0.01, action_node['stats'] / action_node['divider'] / total_stats) action_node['cum_stats'] = parent.data['cum_stats'] * action_node['stats'] node_tag = f'{action_node["action"]}_{s}_{e.phase}' identifier = f'{node_tag}_{str(uuid.uuid4())[:8]}' self.tree.create_node(identifier=identifier, tag=node_tag, parent=parent.identifier, data=action_node) # logger.debug('new {} for {} with data {}'.format(node_tag, s, action_node)) item = ( 1 - action_node['cum_stats'], self.leaf_path + [identifier] ) self.queue.put(item)