def pt_evaluation_for_test(bn: OpenBooleanNetwork, test_params: Iterable): ''' Run a simulation for each set of test parameters. ''' data = [] for tp in test_params: ### Generate ad hoc configuration for simulation purposes only ################################ simconfig = GLOBALS.generate_sim_config() ### Generate simulation world file for simulation purposes only ################################ if not simconfig.webots_world_path.exists(): logger.info('Generated webots world file from template...') stub_utils.generate_webots_worldfile( GLOBALS.webots_world_path, simconfig.webots_world_path, simconfig.arena_params ) # May be launched parallel (... ?) data.append( evaluate_pt_bncontroller(simconfig, bn, tp) ) return tuple(list(e) for e in zip(*data))
def evaluate_pt_bncontroller(simconfig: Config, bn: OpenBooleanNetwork, on: tuple): ''' Evaluate the given BN model as a robot controller on the given set of points/parameters. Returns: * function score * final distance * light initial position * agent initial position * agent y-axis rotation ''' lpos, apos, yrot, *_ = on simconfig.sim_agent_position = apos simconfig.sim_light_position = lpos simconfig.sim_agent_yrot_rad = yrot stub_utils.run_simulation(simconfig, bn) data = read_json(simconfig.sim_data_path) score, dist = simconfig.eval_aggr_function(data, lpos) logger.info( 'iDistance: (m)', lpos.dist(apos), '|', 'yRot: (deg)', (yrot / math.pi * 180), '|', 'fDistance: (m)', dist, '|', 'score: (m2/W)', score, '|', ) return score, dist, lpos, apos, yrot
def test_bncontrollers(bns: dict): ''' Test each BN in the collection on the same set of points. Return the collected evaluation data for each test. ''' spawn_points = GLOBALS.generate_spawn_points() data = dict() for k in bns: logger.info(f"Boolean Network {k}") test_params = generate_test_params(spawn_points) data[k] = test_bncontroller(bns[k], test_params) return data
def pt_evaluation_for_train(bn: OpenBooleanNetwork, ctx: VNSEvalContext, spawn_points: list): ''' Aggregates test parameters and run a simulation for each set of them. ''' test_params = itertools.product( spawn_points['light_spawn_points'], spawn_points['agent_spawn_points'], spawn_points['agent_yrots'] ) fscores, *_ = pt_evaluation_for_test(bn, test_params) new_score = statistics.mean(fscores), statistics.stdev(fscores) logger.info( 'it:', ctx.it, 'flips:', ctx.n_flips, 'stalls:', ctx.n_stalls, 'stagnation: ', ctx.stagnation, 'dist --', 'old:', ctx.score, 'new:', new_score ) if ctx.comparator(new_score, ctx.score): stub_utils.save_subopt_model( GLOBALS.bn_model_path, new_score, ctx.it, bn.to_json(), save_subopt=ctx.comparator( new_score, GLOBALS.train_save_suboptimal_models ) ) return new_score ################################################################################################
def generate_or_load_bn(params: BNParams, path: Path, save_virgin=False): __bn = None if check_path(path, create_if_dir=True): generator = template_behaviour_generator(*params) __bn = generate_rbn(generator.new_obn, force_consistency=True) if save_virgin: p = path / 'virgin_bn_{date}.json'.format(date=FROZEN_DATE) write_json(__bn.to_json(), p) logger.info(f'Virgin BN saved to {p}.') else: __bn = OpenBooleanNetwork.from_json(read_json(path)) logger.info(f'BN loaded from {path}.') return __bn
### BN Generation / Loading #################################################### bn = generate_or_load_bn(params=GLOBALS.bn_params, path=GLOBALS.bn_model_path, save_virgin=True) ### Launch search algorithm ############################################## if not GLOBALS.train_generate_only: t = time.perf_counter() bn, ctx = GLOBALS.app_core_function(bn) logger.info(f"Search time: {time.perf_counter() - t}s") logger.info(ctx) savepath = GLOBALS.bn_model_path / 'behavioural_bn_{date}.json'.format( mode=GLOBALS.app['mode'], date=FROZEN_DATE) write_json(bn, savepath) logger.info(f'Output model saved to {savepath}.') logger.info('Closing...') logger.flush() exit(1)
futils.get_dir(GLOBALS.app_output_path, create_if_dir=True) / '{key}_{date}.log'.format( key=GLOBALS.app['mode'], date=futils.FROZEN_DATE, )) ### Load Test Model(s) from Template paths #################################### files, bns = collect_bn_models(GLOBALS.bn_model_path) ### Test ###################################################################### t = time.perf_counter() for i in range(GLOBALS.test_n_instances): logger.info(f'Test instance n°{i}') instance_data = GLOBALS.app_core_function(bns) for k, test_data in instance_data.items(): name = futils.gen_fname( futils.get_simple_fname(files[k].name, futils.FNAME_PATTERN, uniqueness=2), template='rtest_data_{name}' + f'_in{i}.json', ) test_data.to_json(GLOBALS.test_data_path / name, default_handler=jsonrepr)
if not isinstance(tTau, dict): GLOBALS.slct_target_transition_tau = { "a0": {"a0": tTau, "a1": tTau}, "a1": {"a0": tTau, "a1": tTau} } GLOBALS.slct_noise_rho = nRho GLOBALS.slct_target_n_attractors = aN GLOBALS.slct_input_steps_phi = iPhi t = time.perf_counter() bn = GLOBALS.app_core_function(mapper) logger.info(time.perf_counter() - t) while bn is None or not bn.attractors_input_map or None in bn.attractors_input_map: logger.info('Failure. Retrying...') t = time.perf_counter() bn = GLOBALS.app_core_function() logger.info(time.perf_counter() - t) logger.info(dict(**bn.attractors_input_map)) logger.info(dict(**bn.atm.dtableau)) logger.info(dict(**bn.atm.dattractors)) path = FOLDER / f'selective_bn_{iso8106(ms=3)}.json' bnjson = bn.to_json()