def one_run(env, n_turns, steepness, noise): env.max_turns = n_turns env.steepness = steepness env.noise_factor = noise trials = int(20 * 400 / n_turns) t = time.time() metrics_mcts_v3 = [] for i in range(trials): env.reset() m = Metric('step', 'score') root = Node(0, 10) mcts = Mcts(root) done = False while not done: action = mcts.decide() _, r, done, _ = env.step(action) mcts.register(r) for j, r in enumerate(root.results): m.add_record(j, r) metrics_mcts_v3.append(m) metrics_mcts_v3 = sum(metrics_mcts_v3) print('Time for MCTSv3:', time.time() - t) t = time.time() import random metrics_rnd = [] for i in range(trials): env.reset() m = Metric('step', 'score') rand_results = [] done = False while not done: action = random.random() * 10 _, r, done, _ = env.step(action) rand_results.append(r) for j, r in enumerate(rand_results): m.add_record(j, r) metrics_rnd.append(m) print('Time for RND:', time.time() - t) plot_group({ 'mcts_v3': metrics_mcts_v3, 'random': sum(metrics_rnd) }, 'temp', name=f'{n_turns}_st{steepness}_n{noise}')
def plot_2_ways(filter: str): metrics = [m for m in all_metrics if filter in m.name] unique_names = {m.name.split('$')[0] for m in metrics} grouped_ms = [] for name in unique_names: ms = [m for m in metrics if m.name == name] grouped_ms.append(sum(ms)) plot_group(grouped_ms, smoothen=False, name=f'{filter}_precise') plot_group(grouped_ms, name=f'{filter}_smooth')
def main(): filename = os.listdir(summary_dir)[0] path = os.path.join(summary_dir, filename) ms = to_metrics(path) plot_group({k: v for k, v in ms.items() if k in losses}, os.path.join(generated_dir, 'plots'), 'losses') plot_group({k: v for k, v in ms.items() if k in accuracies}, os.path.join(generated_dir, 'plots'), 'accuracies')
def generate_report(problem: Problem, metrics: Dict[SolverFactory, Dict[str, Metric]]): """ plot multiple curves from the metrics list""" timestamp = datetime.datetime.now().strftime("%m-%d_%H-%M-%S") problem_path = os.path.join(get_rootdir(), "reports", str(problem)) m_groups = defaultdict(list) for sf, ms_dict in metrics.items(): for key, m in ms_dict.items(): m.discard_warmup(0.15) m_groups[key].append(m) for key, ms in m_groups.items(): plot_group( ms, f"{problem_path}@{timestamp}", name=key, stdev_factor=0.1, smoothen=False, )
result = self.rwrd(x, y) self.ctr += 1 if self.ctr > self.max_turns: self.done = True return result if __name__ == "__main__": from ilya_ezplot import Metric, plot_group ga_1 = FakeGa(max_turns=400, noise_factor=5) def one_run(ga): ga.reset() m = Metric('steps', 'score') while not ga.done: r = ga.step() m.add_record(ga.ctr, r) return m metrics = [one_run(ga_1) for _ in range(100)] ga_rand = CompareGa(max_turns=400, noise_factor=5) metrics_rand = [one_run(ga_rand) for _ in range(100)] plot_group({'mcts': sum(metrics), 'random': sum(metrics_rand)}, 'temp')
def maping(env): try: return test_env(env, agent_constructor, n_tries=3, n_episodes=4) except Exception as e: print(e, 'In mapping') return None finally: time.sleep(1) if __name__ == "__main__": from concurrent import futures executor = futures.ProcessPoolExecutor() todos = {} for env in envs: todos[executor.submit(maping, env)] = env scores = {} for future in futures.as_completed(todos): scores[str(todos[future])] = future.result() for k, v in scores.items(): train, test = v plot_group({ 'train': train, 'test': test }, 'plots', name=f"{Ddpg.__name__} vs {k}")