def plot_from_folder(folder, prior, noise_mag=0.2): """ Load results from folder and make scatterplot with normal play and cross play. """ r1s, r2s = load_results(folder, name=prior + '_Pfs_0') xr1s, xr2s = load_results(folder, name=prior + '_XPfs_0') r1s, r2s = add_noise(r1s, r2s, mag=noise_mag) xr1s, xr2s = add_noise(xr1s, xr2s, mag=noise_mag) sns.scatterplot(x=r1s, y=r2s, fc='none', ec='orange', linewidth=1.3, label="Jointly Trained") sns.scatterplot(x=xr1s, y=xr2s, fc='none', ec='blue', linewidth=1.3, label="Cross Tests") config = load_config(folder) env = get_game(**config, max_steps=100) polygon = env.outcomes_polygon() plt.fill(polygon[0], polygon[1], alpha=0.1, color='purple')
def test_save_results(self): results = utils.load_results() temp_file_name = utils.get_data_folder() + 'temp.json' utils.save_results(results, file_name=temp_file_name) temp = utils.load_results(file_name=temp_file_name) self.assertEqual(results, temp)
def main(): parser = argparse.ArgumentParser(description='Return AP Election data') parser.add_argument('-d', '--file', action='store') parser.add_argument('--races', action='store') parser.add_argument('-o', '--output', action='store') parser.add_argument('--json', action='store_true') parser.add_argument('--csv', action='store_true') parser.add_argument('--tsv', action='store_true') args = parser.parse_args() if args.file: race_ids = None if args.races: race_ids = args.races.split(',') electiondate, races = utils.open_file(args.file, race_ids) payload = utils.load_results(electiondate, races) if args.json: utils.output_json(payload) elif args.output and args.output == 'json': utils.output_json(payload) elif args.output and args.output == 'tsv': utils.output_tsv(payload) else: utils.output_csv(payload) else: print """Please specify a data file with -d '/path/to/json/file.json'"""
def get_weighted_scores(ids): # sum(log(dollars_per_player_i) / N for i in N) scores_weighted_all = {} mean = numpy.mean([i for i, _ in enumerate(ids)]) for i, contest_id in enumerate(ids): results = load_results('results/contest-standings-%s.csv' % contest_id) numentries = len(results) prize_map = get_prize_map(numentries) print contest_id, numentries # Weights: [0, ..., 2] weight = 1 # i / mean scores_weighted = analyze_results( results, lambda x: sum([get_weighted_score(y, prize_map) for y in x]) / float(len(x)) * weight ) for player, score in scores_weighted.iteritems(): if player in scores_weighted_all: scores_weighted_all[player].append(score) else: scores_weighted_all[player] = [score] for player, scores in scores_weighted_all.iteritems(): scores_weighted_all[player] = ( numpy.median(scores), #numpy.mean(scores), len(scores) ) # Return { player: (score, num data) } return scores_weighted_all
def parse_results(log_dirs: List[str], prefixes: List[str] = None): results = {} if prefixes is None: prefixes = ["" for _ in log_dirs] assert len(prefixes) == len(log_dirs) for prefix, log_dir in zip(prefixes, log_dirs): results.update(load_results(log_dir, prefix)) return results
def run(): salaries = load_salaries('salaries/dk_nba_salaries_2015_11_07.csv') results = load_results('results/contest-standings-13876064.csv') numentries = len(results) prize_map = get_prize_map(numentries) scores_mean = analyze_results( results, lambda x: numpy.mean(x) / len(results) ) scores_own = analyze_results( results, lambda x: len(x) / float(len(results)) ) scores_weighted = analyze_results( results, lambda x: sum([get_weighted_score(y, prize_map) for y in x]) / float(len(x)) ) sorted_mean = sorted(scores_mean.items(), key=lambda x: x[1], reverse=False) sorted_own = sorted(scores_own.items(), key=lambda x: x[1], reverse=True) sorted_weighted = sorted(scores_weighted.items(), key=lambda x: x[1], reverse=True) print 'Sorted by score:' print '\tPlayer\t\tScore\tOwn\tWeighted' for player, score in sorted_mean[:30]: if player in salaries: print ('\t%s\t%.4f\t%.2f\t%.4f\t%s' % (player[:15], score, scores_own[player] * 100, scores_weighted[player], salaries[player])) else: print ('\t%s\t%.4f\t%.2f\t%.4f' % (player[:15], score, scores_own[player] * 100, scores_weighted[player])) print 'Sorted by ownership:' print '\tPlayer\t\tScore\tOwn\tWeighted' for player, own in sorted_own[:30]: if player in salaries: print ('\t%s\t%.4f\t%.2f\t%.4f\t%s' % (player[:15], scores_mean[player], own * 100, scores_weighted[player], salaries[player])) else: print ('\t%s\t%.4f\t%.2f\t%.4f' % (player[:15], scores_mean[player], own * 100, scores_weighted[player])) print 'Sorted by weighted score:' print '\tPlayer\t\tScore\tOwn\tWeighted' for player, weighted_score in sorted_weighted[:50]: if player in salaries: print ('\t%s\t%.4f\t%.2f\t%.4f\t%s' % (player[:15], scores_mean[player], scores_own[player] * 100, weighted_score, salaries[player])) else: print ('\t%s\t%.4f\t%.2f\t%.4f' % (player[:15], scores_mean[player], scores_own[player] * 100, weighted_score))
def plot_res_with_pol1(folder, prior, noise_mag=0.2): """ For plotting results colour-coded by the p1's policy This is for use with one-shot policies from mem1 games """ r1s, r2s = load_results(folder, name=prior + '_Pfs_0') p1s, _ = load_results(folder, name='Pols_0') c1s = oneshot_policies_to_coords(p1s) r1s, r2s = add_noise(r1s, r2s, mag=noise_mag) sns.scatterplot(x=r1s, y=r2s, hue=coord_to_strat(c1s), linewidth=1.3) config = load_config(folder) env = get_game(**config, max_steps=100) polygon = env.outcomes_polygon() plt.fill(polygon[0], polygon[1], alpha=0.1, color='purple')
def __init__(self, built, connections_df, edge_labels,net_name): if not built: #node names to index the Series values self.frm_nodename, self.to_nodename = edge_labels self.net = self.build_network(connections_df) utils.save_results(self.net, net_name) else: self.net = utils.load_results(net_name)
def calc_total_linkage_matrix(number_of_docs, k): results = load_results() all_link_mat = [ res['results'].linkage_table for res in results if (res['params'].get_list()[-1] == number_of_docs) ] total_link = np.zeros((number_of_docs, number_of_docs)) for link_mat in all_link_mat: total_link = total_link + link_mat return total_link
def test_sort_by_followers(self): results = utils.load_results() app_ids = utils.sort_by_followers(results) self.assertGreater(len(app_ids), 0) first_amount = results[str(app_ids[0])]['steam_followers'] last_amount = results[str(app_ids[-1])]['steam_followers'] self.assertGreaterEqual(int(first_amount), int(last_amount))
def run(results_file, salaries, start=0, end=-1): positions = { 'PG': [], 'SG': [], 'SF': [], 'PF': [], 'C': [], 'F': [], 'G': [], 'UTIL': []} position_index_map = { 'PG': 0, 'SG': 1, 'SF': 2, 'PF': 3, 'C': 4, 'F': 5, 'G': 6, 'UTIL': 7} results = load_results(results_file) salary_means = {} for result in results[start:end]: for position in positions.keys(): positions[position].append(result[position_index_map[position]]) for position, players in positions.iteritems(): pos_salaries = [salaries[player][0] for player in players] salary_means[position] = numpy.mean(pos_salaries) return salary_means
def merge_all_results(self, gpx_results): # merge DataFrames current gpx_results with all_results history all_results = load_results(self.gps_func_description, self.all_results_path) if all_results is None: all_results = gpx_results elif self.author in all_results.index: all_results.loc[self.author, :] = gpx_results else: # merge all_results = pd.concat([all_results, gpx_results]) logger.debug( f"\nloaded all results history and merged with {self.author} results:\n" f"{all_results.head(30)}\n" ) return all_results
def __init__(self): fields_of_interest = ("owner", "fork", "forks_count", "created_at", "pushed_at", "updated_at", "pulls_url") #list of organizations in utils.data is in dept:[org] form, #this loop unpacks that into a dict with org:dept to get the department #value given a org key data = utils.data self.org_dept = dict() for dept in data.keys(): orgs = data[dept] for org in orgs: self.org_dept[org] = dept self.org_repos = utils.load_results("dept_repo_df")
def plot_oneshot_policies(folder): """ Makes a heatmap of policies. X-axis is all combinations of p1's policies for each type, Y-axis is p2 """ p1s, p2s = load_results(folder, name='Pols_0') c1s = oneshot_policies_to_coords(p1s) c2s = oneshot_policies_to_coords(p2s) hmap = oneshot_coords_to_heatmap(c1s, c2s) labels = ['AA', 'AB', 'BA', 'BB', 'None'] cmap = sns.light_palette((260, 75, 60), input="husl", as_cmap=True) sns.heatmap(data=hmap, xticklabels=labels, yticklabels=list(reversed(labels)), cmap=cmap, linewidths=.5, annot=True)
def main(args): results = load_results(args["results_file"]) actions = results["actions"] rewards = results["rewards"] optimal_arms = results["optimal_arms"] num_runs = results["num_runs"] num_episodes = results["num_episodes"] average_rewards = np.sum(rewards, axis=0) / num_runs optimal_actions = np.sum(actions == optimal_arms.reshape(num_runs, 1), axis=0) / num_runs steps = np.arange(1, num_episodes + 1) cumulative_average_rewards = np.cumsum(average_rewards) / steps optimal_action_prop = 100.0 * np.cumsum(optimal_actions) / steps fig = plt.figure(1) plt.subplot(211) plt.plot(steps, cumulative_average_rewards) plt.ylabel("Average Reward") plt.xlabel("Steps") plt.subplot(212) plt.plot(steps, optimal_action_prop) plt.ylabel("% Optimal Action") plt.xlabel("Steps") plt.yticks(np.arange(0, 101, 20)) percentage_fmt = '{x:.0f}%' yticks = mtick.StrMethodFormatter(percentage_fmt) fig.gca().yaxis.set_major_formatter(yticks) vis_dir = None if vis_dir is not None: vis_file = None # TODO plt.savefig(vis_file, transparent=True, bbox_inches='tight', pad_inches=0) else: plt.show()
def __init__(self, built): fork_fields = [ "watchers_count", "created_at", "updated_at", "pushed_at" ] gov_devs = Government_Collaborators(True).get_members() go = GovernmentOrganiations() self.org_list = go.get_org_list() self.org_dict = go.org_dept repos_df = go.org_repos repos_df = repos_df[repos_df["forks_count"] > 0] if (not built): self.fork_list = self.get_fork_records(repos_df, fork_fields, gov_devs) else: self.fork_list = utils.load_results("forks_df")
def __init__(self, dag_filename): self.dag_filename = os.path.abspath(dag_filename) self.nodes = None self.components = None self.enrichment_dir = None self.termdb_file = None self.saddlesum_cmd_path = None self.component_counter = None self.scale = None self.p2c_counts = None self._nr2complex = None data = load_results(self.dag_filename) self.__dict__.update(data) self._set_enrichment_params()
def main(): results = load_results(os.path.abspath(__file__)) plt.figure(figsize=(15, 8)) plt.suptitle( "X_Train: {}, X_Train: {}, Tests Run: {}, Runtime: {}, Max: {}, Min: {}" .format( results[0]["params"]["n_train"], results[0]["params"]["n_test"], len(results), ft(sum([x["runtime"] if "runtime" in x else 0.0 for x in results])), ft(max([x["runtime"] if "runtime" in x else 0.0 for x in results])), ft(min([x["runtime"] if "runtime" in x else 0.0 for x in results])))) plt.subplots_adjust(bottom=0.08, top=0.9, left=0.08, right=0.95, wspace=0.2, hspace=0.4) # Rows and cols to display # Note: All graphs are written to file when save_plots() is called # regardless of whether they are called here in main() global rows global cols rows = 3 cols = 3 # Comment out any graphs you wish to not display and adjust vals of rows & cols FCN_acc_v_hl(results) FCN_ttrain_v_hl(results) Conv_nc_v_acc(results) Pool_Comparison(results) Activation_Comparison_acc(results) Activation_Comparison_f1(results) channel_acc(results) epoch_acc(results) lr_acc(results) # Displayes graphs on screen plt.show()
def main(): results = load_results(os.path.abspath(__file__)) plt.figure(figsize=(15,8)) plt.suptitle("X_Train: {}, X_Train: {}, Tests Run: {}, Runtime: {}, Max: {}, Min: {}".format( results[0]["param"]["n_train"], results[0]["param"]["n_test"], len(results), ft(sum([x["run"]["runtime"] for x in results])), ft(max([x["run"]["runtime"] for x in results])), ft(min([x["run"]["runtime"] for x in results])))) plt.subplots_adjust(bottom=0.08, top=0.9, left=0.08, right=0.95, wspace=0.2, hspace=0.4) # Rows and cols to display # Note: All graphs are written to file when save_plots() is called # regardless of whether they are called here in main() global rows global cols rows = 4 cols = 3 # Comment out any graphs you wish to not display and adjust vals of rows & cols acc_v_pca(results) f1_pc_v_dimensionality(results) f1_v_pca(results) poly_c_v_acc(results) poly_c_v_f1(results) poly_degree_v_acc(results) poly_degree_v_f1(results) rbf_c_v_acc(results) rbf_c_v_f1(results) rbf_gamma_v_acc(results) rbf_gamma_v_f1(results) # Displayes graphs on screen plt.show()
except subprocess.CalledProcessError as e: print(e) print( 'exiting script because there was an error copying updated scores.txt to submissions' ) sys.exit() # load data team2results = dict() for team, folder in team2folder.items(): if team == 'paramitamirza': continue path_scores_txt = os.path.join(folder, 'scores.txt') team_results = utils.load_results(path_scores_txt) team2results[team] = team_results assert len(team2results) == 5 team2official_name = { '*Piek': '*NewsReader', 'CarlaAbreu': 'CarlaAbreu', 'superlyc': 'NAI-SEA', 'baseline1': 'Baseline', 'IDDE': 'ID-DE' } # create official results utils.create_official_results(team2results, team2official_name)
import tabulate sns.set(palette='Set2', font_scale=1.3) # %% # %load_ext autoreload # %% # %autoreload 1 # %aimport utils # %% import utils as u # %% results, unpivoted, models, datasets = u.load_results() # %% [markdown] # # Experiment 6: Midi-norm, single voice (SATB) # # 012 (soprano), 006 (alto), 008 (tenor), 010 # %% experiment6_models = ['012', '006', '008', '010'] experiment6 = results[results.model.isin(experiment6_models)] # %% sns.boxplot(data=experiment6, y='sdr', x='source', showfliers=False
import numpy as np import pandas as pd # import sklearn as sk import time from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score, accuracy_score, roc_auc_score import dirs import defines as defs from utils import load_results from vis_functions import plot_hyp modelName = "AdaBoost" cvResultsDf, predictions = load_results(modelName) plot_hyp(cvResultsDf, modelName) modelName = "Decision Tree" cvResultsDf, predictions = load_results(modelName) plot_hyp(cvResultsDf, modelName) modelName = "Nearest Neighbors" cvResultsDf, predictions = load_results(modelName) plot_hyp(cvResultsDf, modelName) modelName = "Random Forest" cvResultsDf, predictions = load_results(modelName) plot_hyp(cvResultsDf, modelName)
def setUp(self): self.electiondate, races = utils.open_file(self.file_path) self.candidate_reporting_units = utils.load_results( self.electiondate, races)
import pandas as pd from itertools import combinations from scipy.stats import wilcoxon, friedmanchisquare from utils import load_results results_df = load_results('data/offline_analysis_results.csv') lda_logloss = results_df[(results_df["number of sensors"]==2) & (results_df["Classifier"] == "LDA")].logloss.values rda_logloss = results_df[(results_df["number of sensors"]==2) & (results_df["Classifier"] == "RDA")].logloss.values qda_logloss = results_df[(results_df["number of sensors"]==2) & (results_df["Classifier"] == "QDA")].logloss.values st, p = friedmanchisquare(lda_logloss, rda_logloss, qda_logloss) print("Friedman test outcome") print("------------------------------------") print("statistic: {:.3f}, p-value {:.3e}".format(st, p)) print('\n') # Pair-wise comparisons alpha = 0.05 comparison_df = pd.DataFrame(columns=["Algorithm 1", "Algorithm 2", "p-value"]) df_idx = 0 algorithm_names = ["LDA", "RDA", "QDA"] algorithm_accuracies = [lda_logloss, rda_logloss, qda_logloss] pvals = [] for ((name_ii, name_jj), (ii, jj)) in zip(combinations(algorithm_names, r=2), combinations(algorithm_accuracies, r=2)): _, p = wilcoxon(ii,jj) comparison_df.loc[df_idx] = [name_ii, name_jj, p] df_idx += 1 pvals = comparison_df["p-value"].values num_comp = pvals.size
def train(args): model = NTMOneShotLearningModel(args) data_loader = OmniglotDataLoader(args) os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3" if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) if not os.path.exists(args.save_dir + '/' + args.model + '_' + args.label_type): os.makedirs(args.save_dir + '/' + args.model + '_' + args.label_type) with tf.Session() as sess: if args.restore_training: saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(args.save_dir + '/' + args.model + '_' + args.label_type) saver.restore(sess, ckpt.model_checkpoint_path) last_episode = int(str(ckpt.model_checkpoint_path).split('-')[-1]) all_acc_train, all_loss_train = load_results(args, last_episode, mode='train') all_acc_test, all_loss_test = load_results(args, last_episode, mode='test') else: saver = tf.train.Saver(tf.global_variables()) tf.global_variables_initializer().run() all_acc_train = all_acc_test = np.zeros( (0, args.seq_length / args.n_classes)) all_loss_train = all_loss_test = np.array([]) train_writer = tf.summary.FileWriter( args.tensorboard_dir + args.model + '_' + args.label_type + '/train/', sess.graph) test_writer = tf.summary.FileWriter(args.tensorboard_dir + args.model + '_' + args.label_type + '/test/') print( '---------------------------------------------------------------------------------------------' ) print(args) print( '---------------------------------------------------------------------------------------------' ) print( "1st\t2nd\t3rd\t4th\t5th\t6th\t7th\t8th\t9th\t10th\tepisode\tloss") for episode in range(args.num_episodes): # Train x_image, x_label, y = data_loader.fetch_batch( args, mode='train', augment=args.augment, sample_strategy=args.sample_strategy) feed_dict = { model.x_image: x_image, model.x_label: x_label, model.y: y } sess.run(model.train_op, feed_dict=feed_dict) if episode % args.disp_freq == 0 and episode > 0: output, train_loss = sess.run([model.o, model.loss], feed_dict=feed_dict) summary_train = sess.run(model.loss_summary, feed_dict=feed_dict) train_writer.add_summary(summary_train, episode) train_acc = compute_accuracy(args, y, output) all_acc_train, all_loss_train = display_and_save( args, all_acc_train, train_acc, all_loss_train, train_loss, episode, mode='train') # Test if episode % args.test_freq == 0 and episode > 0: x_image, x_label, y = data_loader.fetch_batch( args, mode='test', augment=args.augment, sample_strategy=args.sample_strategy) feed_dict = { model.x_image: x_image, model.x_label: x_label, model.y: y } output, test_loss = sess.run([model.o, model.loss], feed_dict=feed_dict) summary_test = sess.run(model.loss_summary, feed_dict=feed_dict) test_writer.add_summary(summary_test, episode) test_acc = compute_accuracy(args, y, output) all_acc_test, all_loss_test = display_and_save(args, all_acc_test, test_acc, all_loss_test, test_loss, episode, mode='test') # Save model if episode % args.save_freq == 0 and episode > 0: saver.save(sess, args.save_dir + '/' + args.model + '_' + args.label_type + '/model.tfmodel', global_step=episode)
def get_members(self): return utils.load_results("government_developers")
def test_print_formatted_results(self): results = utils.load_results() self.assertTrue(utils.print_formatted_results(results))
def setUp(self): benchmark_json = "yeast/benchmark/pre_assembly_stats.json" result_json = "yeast/0-rawreads/report/pre_assembly_stats.json" self.benchmark = load_results(benchmark_json) self.result = load_results(result_json)
def test_load_results(self): results = utils.load_results() self.assertGreater(len(results), 0)
def setUp(self): benchmark_json = "yeast/benchmark/asm_stats.json" result_json = "yeast/2-asm-falcon/asm_stats.json" self.benchmark = load_results(benchmark_json) self.result = load_results(result_json)