Qs.extend(logger.load_Qs())
        if has_entropy:
            Es.extend(logger.load_policy_entropies())
        stats = logger.load_stats()
        if stats['best_J'] > best_J:
            best_stats = stats
            best_agent = logger.load_best_agent()
    
    if skip_cnt > 0: print('NUMBER OF FAILED RUNS:', '{}/{}'.format(skip_cnt, len(run_dirs)))
    
    print('Name:', res_id)
    print('Directory:', res_dir)

    logger = BenchmarkLogger(log_dir=res_dir, log_id=res_id, use_timestamp=False)

    logger.save_Js(Js)
    logger.save_Rs(Rs)
    logger.save_Qs(Qs)
    if has_entropy:
        logger.save_policy_entropies(Es)
    logger.save_stats(best_stats)
    logger.save_best_agent(best_agent)

    visualizer = BenchmarkVisualizer(logger)
    visualizer.save_report()


if __name__ == '__main__':
    res_dir, res_id = read_arguments_aggregate()
    run(res_dir, res_id)
        os.path.join(log_dir, 'agent_builder.pkl'))
    env_builder = BenchmarkLogger._load_pickle(
        os.path.join(log_dir, 'environment_builder.pkl'))

    logger = BenchmarkLogger(log_dir=log_dir,
                             log_id=log_id,
                             use_timestamp=False)

    logger.info('Starting experiment.')

    result = exec_run(agent_builder, env_builder, **run_args)

    logger.info('Saving result.')

    cmp_E = agent_builder.compute_policy_entropy

    logger.save_Js([result['Js']])
    logger.save_Rs([result['Rs']])
    logger.save_Qs([result['Qs']])
    if cmp_E:
        logger.save_policy_entropies([result['Es']])
    new_score = result['score']
    new_agent = result['builders']
    stats = dict(best_J=new_score[0], best_R=new_score[1], best_Q=new_score[2])
    if cmp_E:
        stats.update(dict(best_E=new_score[3]))
    logger.save_stats(stats=stats)
    logger.save_best_agent(new_agent)

    logger.info('Finished execution.')