def main(args): if args.get('verbose', False): print(args) recovery_method_name = args["recovery_method"] recovery_params = args["recovery_params"] RecoveryMethodClass = getattr(recovery, recovery_method_name) graph = args.get("graph") if graph is None: graph = load_graph(args["graph_file"]) samples = args.get("samples") if samples is None: samples = load_samples(args["samples_file"]) recovery_method = RecoveryMethodClass(graph, samples, recovery_params) x = [graph.node[idx]['value'] for idx in sorted(graph.node)] x_hat = recovery_method.run() results = args.copy() results.update({"x_hat": x_hat, "nmse": nmse(x, x_hat)}) results_file = args.get("results_file") if results_file is None: return results else: dump_results(results, results_file)
def test(model, loss, loader, xp, args): if not len(loader): return 0 model.eval() metrics = xp.get_metric(tag=loader.tag, name='parent') timer = xp.get_metric(tag=loader.tag, name='timer') metrics.reset() if args.multiple_crops: epoch_test_multiple_crops(model, loader, xp, args.cuda) else: epoch_test(model, loader, xp, args.cuda) # measure elapsed time timer.update() xp.log_with_tag(loader.tag) if loader.tag == 'val': xp.Acc1_Val_Best.update(float(xp.acc1_val)).log() xp.Acck_Val_Best.update(float(xp.acck_val)).log() if args.verbosity: print_stats(xp, loader.tag) if args.eval: dump_results(xp, args)
def main(args): if args.get('verbose', False): print(args) sampling_method_name = args["sampling_method"] sampling_params = args["sampling_params"] SamplingMethodClass = getattr(sampling, sampling_method_name) graph = args.get("graph") if graph is not None: sampling_method = SamplingMethodClass(graph, sampling_params) else: graph_file = args["graph_file"] sampling_method = SamplingMethodClass(graph_file, sampling_params) results = args.copy() run_results = sampling_method.run() results.update(run_results) results_file = args.get("results_file") if results_file is None: return results else: dump_results(results, results_file)
def main(args): k = args["num_arms"] agent_cls_name = args["agent_class"] env = get_bandit(k, args["arms_mean"], args["arms_mean_params"], args["arms_std"], args["arms_std_params"]) num_runs = args["num_runs"] num_episodes = args["num_episodes"] epsilon = args["epsilon"] actions = np.zeros((num_runs, num_episodes), dtype=np.min_scalar_type(k)) rewards = np.zeros((num_runs, num_episodes), dtype=np.float32) optimal_arms = np.zeros(num_runs, dtype=np.min_scalar_type(k)) for run in range(num_runs): env.reset() agent = get_agent(env, agent_cls_name, num_episodes, epsilon) path = agent.learn() tuple_path = [(p["action"], p["reward"]) for p in path] actions[run, :], rewards[run, :] = list(zip(*tuple_path)) optimal_arms[run] = env.get_optimal_arm() results = args.copy() results["timestamp"] = datetime.now().isoformat() results["actions"] = actions results["rewards"] = rewards results["optimal_arms"] = optimal_arms results["epsilon"] = epsilon results_file = args.get("results_file") if results_file is not None: dump_results(results_file, results, file_format="pickle")
def test_dump_results(): expected = {"a": 1, "b": 2} utils.dump_results(expected, "./tmp/test_results.json") with open('./tmp/test_results.json', 'r') as f: result = json.load(f) assert_dict_equal(result, expected) os.remove("./tmp/test_results.json")
def test_create_with_graph_file_sample_file(self): graph = nx.Graph([(0,1), (1,2)]) samples = [0,1] graph_path = "./tmp/graph1.json" samples_path = "./tmp/samples1.json" dump_graph(graph, graph_path) dump_results({'sampling_set': samples}, samples_path) graph_recovery_algorithm = GraphRecoveryAlgorithm(graph_path, samples_path) # TODO: use nx.is_isomorphic? expected = { 'graph': json_graph.node_link_data(graph), 'samples': samples } result = { 'graph': json_graph.node_link_data(graph_recovery_algorithm.graph), 'samples': graph_recovery_algorithm.samples } assert_dict_equal(result, expected) os.remove(graph_path) os.remove(samples_path)
if t_class == x[1]: hits += 1 if last_hits < hits: succs += 1 if len(query_results) == 0: avg_dist = 0 else: avg_dist = class_distance / len(query_results) results[c_type][layer][n_components]['similarity_dist'].append( (worst_case - avg_dist) / (worst_case - best_case)) results[c_type][layer][n_components]['avg_time'].append(et - st) count += batch_size if count % 500 == 0: mean_dist = np.mean(results[c_type][layer][n_components]['similarity_dist']) mean_time = np.mean(results[c_type][layer][n_components]['avg_time']) print 'Evaluate Script :: C Type : ', c_type, ' // Layer : ', layer, ' // Dim : ', n_components, ' // Count : ', count print 'Evaluate Script :: Similarity Distance : ', mean_dist, ' // Avg Time : ', mean_time print "'Evaluate Script :: Success: " + str(succs) + " Hits: " + str(hits) results[c_type][layer][n_components]['similarity_dist'].append( (worst_case - avg_dist) / (worst_case - best_case)) results[c_type][layer][n_components]['avg_time'].append(et - st) utils.dump_results(results, c_type, distance_matrix_layer)