def __init__(self, extractors_with_weights): self.extractors = [e for e, _ in extractors_with_weights] self.weights = numpy.array([w for _, w in extractors_with_weights]) self.features = numpy.zeros( (len(card_info.card_names()), len(self.weights))) for ind, card in enumerate(card_info.card_names()): if card in card_info.EVERY_SET_CARDS: continue self.features[ind] = self.extract_features(card)
def __init__(self, extractors_with_weights): self.extractors = [e for e, _ in extractors_with_weights] self.weights = numpy.array([w for _, w in extractors_with_weights]) self.features = numpy.zeros((len(card_info.card_names()), len(self.weights))) for ind, card in enumerate(card_info.card_names()): if card in card_info.EVERY_SET_CARDS: continue self.features[ind] = self.extract_features(card)
def GET(self): web.header("Content-Type", "text/html; charset=utf-8") web.header("Access-Control-Allow-Origin", "*") query_dict = dict(urlparse.parse_qsl(web.ctx.env['QUERY_STRING'])) # params: # targets, opt # cond1, opt # cond2, opt # format? json, csv? db = utils.get_mongo_database() targets = query_dict.get('targets', '').split(',') if sum(len(t) for t in targets) == 0: targets = card_info.card_names() # print targets def str_card_index(card_name): title = card_info.sane_title(card_name) if title: return str(card_info.card_index(title)) return '' target_inds = map(str_card_index, targets) # print targets, target_inds cond1 = str_card_index(query_dict.get('cond1', '')) cond2 = str_card_index(query_dict.get('cond2', '')) if cond1 < cond2: cond1, cond2 = cond2, cond1 card_stats = {} for target_ind in target_inds: key = target_ind + ';' if cond1: key += cond1 if cond2: key += ',' + cond2 db_val = db.card_supply.find_one({'_id': key}) if db_val: small_gain_stat = SmallGainStat() small_gain_stat.from_primitive_object(db_val['vals']) card_name = card_info.card_names()[int(target_ind)] card_stats[card_name] = small_gain_stat format = query_dict.get('format', 'json') if format == 'json': readable_card_stats = {} for card_name, card_stat in card_stats.iteritems(): readable_card_stats[card_name] = ( card_stat.to_readable_primitive_object()) return json.dumps(readable_card_stats) return 'unsupported format ' + format
def main(): all_cards = ','.join(card_info.card_names()) all_data = [] for card in card_info.card_names(): url = ('http://councilroom.com/supply_win_api?' 'targets=%s&interaction=%s' % (card, all_cards)) time.sleep(.1) print card contents = urllib.urlopen(url).read() parsed_contents = json.loads(contents) all_data.extend(parsed_contents) print 'len data', len(all_data) open('card_conditional_data.json', 'w').write(json.dumps(all_data))
def main(): rankings = read_ranks_file('qvist_rankings.txt') ranker = Ranker([ #(win_margin, 1.287), #(prob_win_margin, 1), #(win_given_no_gain, -.05), #(win_weighted_gain, 20), #(frequency_purchased, .1), #(frequency_weighted_win_margin, .01), (win_given_any_gain, 30), (log_odds_any_gained, 1.3), (num_plus_actions, -1), (has_vp, -1.5), (is_reaction, 1.5), ]) # this should really be doing cross validation rank_eval = RankEvaluator(ranker, rankings, ranking_log_loss) learned_weights = [52.926, 1.358, -1.161, -1.712, 1.626] ranker.weights = learned_weights learned_weights = scipy.optimize.fmin_bfgs(rank_eval, rank_eval.ranker.weights, gtol=1e-3) ranker.weights = learned_weights print learned_weights display_large_errors(ranker, rankings) grouped_by_cost = collections.defaultdict(list) for card in card_info.card_names(): grouped_by_cost[card_info.cost(card)].append(card) for cost, card_list in grouped_by_cost.iteritems(): if len(card_list) >= 4: card_list.sort(key=ranker.score)
def main(): rankings = read_ranks_file('qvist_rankings.txt') ranker = Ranker([ #(win_margin, 1.287), #(prob_win_margin, 1), #(win_given_no_gain, -.05), #(win_weighted_gain, 20), #(frequency_purchased, .1), #(frequency_weighted_win_margin, .01), (win_given_any_gain, 30), (log_odds_any_gained, 1.3), (num_plus_actions, -1), (has_vp, -1.5), (is_reaction, 1.5), ]) # this should really be doing cross validation rank_eval = RankEvaluator(ranker, rankings, ranking_log_loss) learned_weights = [ 52.926, 1.358, -1.161, -1.712, 1.626] ranker.weights = learned_weights learned_weights = scipy.optimize.fmin_bfgs( rank_eval, rank_eval.ranker.weights, gtol=1e-3) ranker.weights = learned_weights print learned_weights display_large_errors(ranker, rankings) grouped_by_cost = collections.defaultdict(list) for card in card_info.card_names(): grouped_by_cost[card_info.cost(card)].append(card) for cost, card_list in grouped_by_cost.iteritems(): if len(card_list) >= 4: card_list.sort(key=ranker.score)
def main(): rankings = read_ranks_file('qvist_rankings.txt') ranker = Ranker([ #(win_margin, 1.287), #(prob_win_margin, 1), #(win_given_no_gain, -.05), #(win_weighted_gain, 1.0), #(frequency_purchased, .1), #(frequency_weighted_win_margin, .01), (win_given_any_gain, 2.5), (log_odds_any_gained, .05), (num_plus_actions, -.03), (has_vp, -.1), (is_reaction, .1), ]) rank_eval = RankEvaluator(ranker, rankings, ranking_accuracy) learned_weights = scipy.optimize.fmin(rank_eval, rank_eval.ranker.weights) ranker.weights = learned_weights grouped_by_cost = collections.defaultdict(list) for card in card_info.card_names(): grouped_by_cost[card_info.cost(card)].append(card) for cost, card_list in grouped_by_cost.iteritems(): if len(card_list) >= 4: card_list.sort(key=ranker.score) print cost, ','.join(card_list)
def GET(self): web.header("Content-Type", "text/html; charset=utf-8") query_dict = dict(urlparse.parse_qsl(web.ctx.env['QUERY_STRING'])) card_list = sorted(set(card_info.card_names()) - set(card_info.TOURNAMENT_WINNINGS)) card_x = query_dict.get('card_x', 'Minion') card_y = query_dict.get('card_y', 'Gold') if card_x < card_y: db_id = card_x + ':' + card_y swap_x_and_y = False else: db_id = card_y + ':' + card_x swap_x_and_y = True db = utils.get_mongo_database() db_val = db.optimal_card_ratios.find_one({'_id': db_id}) if not db_val: return 'No stats for "' + card_x + '" and "' + card_y + '".' tracker = DBCardRatioTracker() tracker.from_primitive_object(db_val) num_games = sum(meanvarstat.frequency() for meanvarstat in tracker.final.itervalues()) num_games_threshold = int(round(num_games * .002)) final_table = self.getHtmlTableForStats( tracker.final, swap_x_and_y, num_games, num_games_threshold) num_games = max(meanvarstat.frequency() for meanvarstat in tracker.progressive.itervalues()) num_games_threshold = int(round(num_games * .002)) progressive_table = self.getHtmlTableForStats( tracker.progressive, swap_x_and_y, num_games, num_games_threshold) render = web.template.render('') return render.optimal_card_ratios_template( card_list, card_x, card_y, final_table, progressive_table)
def GET(self): web.header("Content-Type", "text/html; charset=utf-8") web.header("Access-Control-Allow-Origin", "*") query_dict = dict(urlparse.parse_qsl(web.ctx.env['QUERY_STRING'])) # query_dict supports the following options. # targets: optional comma separated list of card names that want # stats for, if empty/not given, use all of them # interaction: optional comma separated list of cards that we want to # condition the target stats on. # nested: optional param, if given present, also get second order # contional stats. # unconditional: opt param, if present, also get unconditional stats. targets = query_dict.get('targets', '').split(',') if sum(len(t) for t in targets) == 0: targets = card_info.card_names() target_inds = map(self.str_card_index, targets) interaction_tuples = self.interaction_card_index_tuples(query_dict) card_stats = self.fetch_conditional_stats(target_inds, interaction_tuples) return self.readable_json_card_stats(card_stats)
def main(): ARCH = 'Archivist' card_data = json.load(open('card_conditional_data.json')) card_names = card_info.card_names() card_names.remove(ARCH) card_inds = {} for ind, card_name in enumerate(card_names): card_inds[card_name] = ind N = len(card_inds) # cluster based on gain prob, win rate given any gained, # avg gained per game, and win rate per gain M = 4 grouped_data = np.zeros((N, M, N)) for card_row in card_data: card_name = card_row['card_name'] condition = card_row['condition'][0] if card_name == ARCH or condition == ARCH: continue assert len(card_row['condition']) == 1 if card_name == condition: continue i = card_inds[card_name] j = card_inds[condition] stats = card_row['stats'] def parse(key): ret = MeanVarStat() ret.from_primitive_object(stats[key]) return ret wgag = parse('win_given_any_gain') wgng = parse('win_given_no_gain') wwg = parse('win_weighted_gain') total_games = wgag.frequency() + wgng.frequency() grouped_data[i][0][j] = wgag.frequency() / total_games grouped_data[i][1][j] = wgag.mean() #grouped_data[i][2][j] = wwg.frequency() / total_games # grouped_data[i][3][j] = wwg.mean() for i in range(N): for j in range(M): s = sum(grouped_data[i][j]) # make the self data == avg grouped_data[i][j][i] = s / (N - 1) for i in range(N): for j in range(M): grouped_data[i][j] = preprocessing.scale(grouped_data[i][j]) flattened_normed_data = np.zeros((N, 2 * N * M + len(bonus_feature_funcs))) for i in range(N): bonus_vec = get_bonus_vec(card_names[i]) v1, v2 = [], [] for j in range(M): for k in range(N): v1.append(grouped_data[i][j][k]) v2.append(grouped_data[k][j][i]) v1, v2 = np.array(v1), np.array(v2) catted = np.concatenate((v1, v1, bonus_vec)) flattened_normed_data[i] = catted flattened_normed_data, card_names = trim( lambda x: not (card_info.cost(x)[0] >= '5' or card_info.cost(x)[0] == '1' or card_info.cost(x)[0] == 'P') and not ( x in card_info.EVERY_SET_CARDS or card_info.cost(x)[0:2] == '*0'), flattened_normed_data, card_names) z = scipy.cluster.hierarchy.ward(flattened_normed_data) scipy.cluster.hierarchy.dendrogram(z, labels=card_names, orientation='left', leaf_font_size=4.5, ) pylab.savefig('expensive_group_win_prob.png', dpi=len(card_names) * 2.5, bbox_inches='tight')
def main(): ARCH = "Archivist" card_data = json.load(open("card_conditional_data.json")) card_names = card_info.card_names() card_names.remove(ARCH) card_inds = {} for ind, card_name in enumerate(card_names): card_inds[card_name] = ind N = len(card_inds) # cluster based on gain prob, win rate given any gained, # avg gained per game, and win rate per gain M = 4 grouped_data = np.zeros((N, M, N)) for card_row in card_data: card_name = card_row["card_name"] condition = card_row["condition"][0] if card_name == ARCH or condition == ARCH: continue assert len(card_row["condition"]) == 1 if card_name == condition: continue i = card_inds[card_name] j = card_inds[condition] stats = card_row["stats"] def parse(key): ret = MeanVarStat() ret.from_primitive_object(stats[key]) return ret wgag = parse("win_given_any_gain") wgng = parse("win_given_no_gain") wwg = parse("win_weighted_gain") total_games = wgag.frequency() + wgng.frequency() grouped_data[i][0][j] = wgag.frequency() / total_games grouped_data[i][1][j] = wgag.mean() # grouped_data[i][2][j] = wwg.frequency() / total_games # grouped_data[i][3][j] = wwg.mean() for i in range(N): for j in range(M): s = sum(grouped_data[i][j]) # make the self data == avg grouped_data[i][j][i] = s / (N - 1) for i in range(N): for j in range(M): grouped_data[i][j] = preprocessing.scale(grouped_data[i][j]) flattened_normed_data = np.zeros((N, 2 * N * M + len(bonus_feature_funcs))) for i in range(N): bonus_vec = get_bonus_vec(card_names[i]) v1, v2 = [], [] for j in range(M): for k in range(N): v1.append(grouped_data[i][j][k]) v2.append(grouped_data[k][j][i]) v1, v2 = np.array(v1), np.array(v2) catted = np.concatenate((v1 * 1, v2 * 0, 0 * bonus_vec)) flattened_normed_data[i] = catted nn_table = NearestNeighborTable(flattened_normed_data, card_names) # flattened_normed_data, card_names = trim( # lambda x: not (card_info.cost(x)[0] >= '5' or # card_info.cost(x)[0] == '1' or # card_info.cost(x)[0] == 'P') and not ( # x in card_info.EVERY_SET_CARDS or # card_info.cost(x)[0:2] == '*0'), # flattened_normed_data, card_names) # n_neighbors = 15 # n_components = 2 # iso_data = manifold.Isomap(15, 2).fit_transform(flattened_normed_data) abbrevs = map(card_info.abbrev, card_names)
def supply_common_extractor(g, game_state): ret = [] for card in ci.card_names(): ret.append(game_state.supply.get(card, 0)) return ret
def composition_deck_extractor(deck_comp, game_state, player): ret = [] for card in ci.card_names(): ret.append(deck_comp.get(card, 0)) return ret
import card_info as ci import game import random import utils def nice_feature_name(n): return n.replace(' ', '_').replace("'", '') def composition_deck_extractor(deck_comp, game_state, player): ret = [] for card in ci.card_names(): ret.append(deck_comp.get(card, 0)) return ret composition_deck_extractor.feature_names = map(nice_feature_name, ci.card_names()) def score_deck_extractor(deck_comp, game_state, player): return [game_state.player_score(player)] def deck_size_deck_extractor(deck_comp, game_state, player): return [sum(deck_comp.itervalues())] def action_balance_deck_extractor(deck_comp, game_state, player): ret = 0 for card, quant in deck_comp.iteritems(): ret += (ci.num_plus_actions(card) - ci.is_action(card)) * quant return [ret / (sum(deck_comp.itervalues()) or 1)] def unique_deck_extractor(deck_comp, game_state, player): return [len(deck_comp)]
def main(): ARCH = 'Archivist' card_data = json.load(open('card_conditional_data.json')) card_names = card_info.card_names() card_names.remove(ARCH) card_inds = {} for ind, card_name in enumerate(card_names): card_inds[card_name] = ind N = len(card_inds) # cluster based on gain prob, win rate given any gained, # avg gained per game, and win rate per gain M = 4 grouped_data = np.zeros((N, M, N)) for card_row in card_data: card_name = card_row['card_name'] condition = card_row['condition'][0] if card_name == ARCH or condition == ARCH: continue assert len(card_row['condition']) == 1 if card_name == condition: continue i = card_inds[card_name] j = card_inds[condition] stats = card_row['stats'] def parse(key): ret = MeanVarStat() ret.from_primitive_object(stats[key]) return ret wgag = parse('win_given_any_gain') wgng = parse('win_given_no_gain') wwg = parse('win_weighted_gain') total_games = wgag.frequency() + wgng.frequency() grouped_data[i][0][j] = wgag.frequency() / total_games grouped_data[i][1][j] = wgag.mean() #grouped_data[i][2][j] = wwg.frequency() / total_games # grouped_data[i][3][j] = wwg.mean() for i in range(N): for j in range(M): s = sum(grouped_data[i][j]) # make the self data == avg grouped_data[i][j][i] = s / (N - 1) for i in range(N): for j in range(M): grouped_data[i][j] = preprocessing.scale(grouped_data[i][j]) flattened_normed_data = np.zeros((N, 2 * N * M + len(bonus_feature_funcs))) for i in range(N): bonus_vec = get_bonus_vec(card_names[i]) v1, v2 = [], [] for j in range(M): for k in range(N): v1.append(grouped_data[i][j][k]) v2.append(grouped_data[k][j][i]) v1, v2 = np.array(v1), np.array(v2) catted = np.concatenate((v1 * 1 , v2 * 0 , 0 *bonus_vec)) flattened_normed_data[i] = catted fixed_radius_nn_table = NearestNeighborTable( flattened_normed_data, card_names, False) open('../static/fixed_radius_nn_table.html', 'w').write( render_knn_page( 'Councilroom.com: fixed radius nearest neighbor card groups', FIXED_RADIUS_NN_BLURB, fixed_radius_nn_table)) knn_table = NearestNeighborTable( flattened_normed_data, card_names, True) open('../static/knn_table.html', 'w').write( render_knn_page( 'Councilroom.com: K nearest neighbor card groups', KNN_BLURB, knn_table)) # flattened_normed_data, card_names = trim( # lambda x: not (card_info.cost(x)[0] >= '5' or # card_info.cost(x)[0] == '1' or # card_info.cost(x)[0] == 'P') and not ( # x in card_info.EVERY_SET_CARDS or # card_info.cost(x)[0:2] == '*0'), # flattened_normed_data, card_names) deleted_singleton_cards = [c for c in card_names if fixed_radius_nn_table.is_singleton(c)] flattened_normed_data, card_names = trim( lambda x: x not in deleted_singleton_cards, flattened_normed_data, card_names) dendro_plot(flattened_normed_data, card_names, '../static/plot_no_singletons.png') open('../static/card_group_main.html', 'w').write( MAIN_GROUPING_CARD_PAGE_TEMPLATE % ( 'Councilroom.com: Dominion Card Groupings', ', '.join(deleted_singleton_cards)))
import utils def nice_feature_name(n): return n.replace(' ', '_').replace("'", '') def composition_deck_extractor(deck_comp, game_state, player): ret = [] for card in ci.card_names(): ret.append(deck_comp.get(card, 0)) return ret composition_deck_extractor.feature_names = map(nice_feature_name, ci.card_names()) def score_deck_extractor(deck_comp, game_state, player): return [game_state.player_score(player)] def deck_size_deck_extractor(deck_comp, game_state, player): return [sum(deck_comp.itervalues())] def action_balance_deck_extractor(deck_comp, game_state, player): ret = 0.0 for card, quant in deck_comp.iteritems(): ret += (ci.num_plus_actions(card) - ci.is_action(card)) * quant return [ret / (sum(deck_comp.itervalues()) or 1)]
def name_getter(ind_str): return card_info.card_names()[int(ind_str)]