def define_jobs_context(self, context): options = self.get_options() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) config_solvers = get_conftools_tmdp_smdp_solvers() id_solvers = config_solvers.expand_names(options.solvers) for c2, id_mdp in iterate_context_names(context, id_mdps): c2.add_extra_report_keys(id_mdp=id_mdp) mdp = c2.comp_config(instance_mdp, id_mdp) from tmdp.programs.value_iteration import report_mdp_display r = c2.comp_config(report_mdp_display, mdp) c2.add_report(r, 'report_mdp_display') for cc, id_solver in iterate_context_names(c2, id_solvers): cc.add_extra_report_keys(id_solver=id_solver) solve_result = jobs_solve(cc, mdp, id_solver) points = [100, 200, 500] for c3, num_points in iterate_context_names(cc, points): c3.add_extra_report_keys(num_points=num_points) res = c3.comp(resample, mdp, solve_result, num_points=num_points) c3.add_report(c3.comp(report_resample, mdp, res), 'report_resample') c3.add_report(c3.comp(report_tension, mdp, res), 'report_tension')
def instance_reports(context): param1s = ['a', 'b'] param2s = [1, 2] for c1, param1 in iterate_context_names(context, param1s): c1.add_extra_report_keys(param1=param1) for c2, param2 in iterate_context_names(c1, param2s): c2.add_extra_report_keys(param2=param2) r = c2.comp(report_example1, param1=param1, param2=param2) c2.add_report(r, 'report_example1') r = c2.comp(report_example2, param1=param1, param2=param2) c2.add_report(r, 'report_example2')
def jobs_registrar(context, cm, create_reports=False): assert isinstance(cm, ConfigMaster) # Sep 15: remove name # context = context.child(cm.name) context = context.child("") names = sorted(cm.specs.keys()) names2test_objects = context.comp_config_dynamic(get_testobjects_promises, cm) for c, name in iterate_context_names(context, names): pairs = ComptestsRegistrar.objspec2pairs[name] functions = ComptestsRegistrar.objspec2tests[name] some = ComptestsRegistrar.objspec2testsome[name] some_pairs = ComptestsRegistrar.objspec2testsomepairs[name] c.comp_config_dynamic(define_tests_for, cm=cm, name=name, names2test_objects=names2test_objects, pairs=pairs, functions=functions, some=some, some_pairs=some_pairs, create_reports=create_reports) jobs_registrar_simple(context)
def define_jobs_context(self, context): batches_library = get_conftools_batches() batches = batches_library.expand_names(self.options.batches) for c, id_batch in iterate_context_names(context, batches): c.extra_report_keys['batch'] = id_batch batch = batches_library.instance(id_batch) create_bench_jobs(c, batch, alltestcases=self.options.alltestcases)
def jobs_learnp_and_servo(context, data_central, explogs_learn, explogs_test, agents, robots): """ Learn parallel, create servo field """ jobs_learn_parallel(context, data_central, explogs_learn, agents, robots) for c, id_robot in iterate_context_names(context, robots): jobs_servo_field_agents(c, data_central=data_central, id_robot=id_robot, agents=agents, episodes=explogs_test)
def jobs_servo(context, data_central, combinations, explogs_test): recipe_agent_servo(context, create_report=True) for c, id_robot, id_agent in iterate_context_combinations(context, combinations): for cc, e in iterate_context_names(c, explogs_test): jobs_servo_field(cc, data_central=data_central, id_robot=id_robot, id_agent=id_agent, id_episode=e.id_episode, **e.params)
def jobs_parallel_learning(context, data_central, id_agent, id_robot, episodes, intermediate_reports=True, final_report=True, episodes_per_tranche=1): """ In this way, the agent learns separately on each log, and then the instances are merged using the merge() function. Needs: "episode-ready", id_robot, id_episode Returns the promise for the agent with learning complete (tuple agent, state) """ agents = [] tranches = get_tranches(episodes, episodes_per_tranche=episodes_per_tranche) ntranches = len(tranches) tranches_names = ['tranche%02d' % i for i in range(ntranches)] for i, (c, _) in enumerate(iterate_context_names(context, tranches_names)): tranche_episodes = tranches[i] extra_dep = [] for id_episode in tranche_episodes: er = c.get_resource(RM_EPISODE_READY, id_robot=id_robot, id_episode=id_episode) extra_dep.append(er) agent_i = c.subtask(LearnLogNoSave, boot_root=data_central.get_boot_root(), agent=id_agent, robot=id_robot, episodes=tranche_episodes, add_job_prefix='', extra_dep=extra_dep) agents.append(agent_i) if intermediate_reports and ntranches > 1: progress = '%s' % id_episode report = c.comp_config(get_agentstate_report, agent_i, progress, job_id='report') c.add_report(report, 'agent_report_partial', id_agent=id_agent, id_robot=id_robot, progress=progress) agent_state = jobs_merging_recursive(context, agents) save = context.comp_config(save_state, data_central, id_agent, id_robot, agent_state) if final_report: report = context.comp(get_agentstate_report, agent_state, 'all', job_id='report') context.add_report(report, 'agent_report', id_agent=id_agent, id_robot=id_robot) return save
def define_tests_some(context, objspec, names2test_objects, some, create_reports): test_objects = names2test_objects[objspec.name] if not test_objects: msg = 'No test_objects for objects of kind %r.' % objspec.name print(msg) return if not some: msg = 'No mcdp_lang_tests specified for objects of kind %r.' % objspec.name print(msg) return db = context.cc.get_compmake_db() for x in some: f = x['function'] which = x['which'] dynamic = x['dynamic'] results = {} c = context.child(f.__name__) c.add_extra_report_keys(objspec=objspec.name, function=f.__name__) objects = expand_string(which, list(test_objects)) if not objects: msg = 'Which = %r did not give anything in %r.' % (which, test_objects) raise ValueError(msg) print('Testing %s for %s' % (f, objects)) it = iterate_context_names(c, objects, key=objspec.name) for cc, id_object in it: ob_job_id = test_objects[id_object] assert_job_exists(ob_job_id, db) ob = Promise(ob_job_id) # bjob_id = 'f' # XXX job_id = '%s-%s' % (f.__name__, id_object) params = dict(job_id=job_id, command_name=f.__name__) if dynamic: res = cc.comp_config_dynamic(wrap_func_dyn, f, id_object, ob, **params) else: res = cc.comp_config(wrap_func, f, id_object, ob, **params) results[id_object] = res if create_reports: r = c.comp(report_results_single, f, objspec.name, results) c.add_report(r, 'some')
def define_jobs_context(self, context): options = self.get_options() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) config_solvers = get_conftools_tmdp_smdp_solvers() id_solvers = config_solvers.expand_names(options.solvers) for c2, id_mdp in iterate_context_names(context, id_mdps): c2.add_extra_report_keys(id_mdp=id_mdp) mdp = c2.comp_config(instance_mdp, id_mdp) from tmdp.programs.value_iteration import report_mdp_display r = c2.comp_config(report_mdp_display, mdp) c2.add_report(r, 'report_mdp_display') for cc, id_solver in iterate_context_names(c2, id_solvers): cc.add_extra_report_keys(id_solver=id_solver) jobs_solve(cc, mdp, id_solver)
def define_jobs_context(self, context): options = self.get_options() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) for cc, id_mdp in iterate_context_names(context, id_mdps): cc.add_extra_report_keys(id_mdp=id_mdp) # jobs_vit_display(cc, id_mdp) jobs_vit_solve(cc, id_mdp) context.create_dynamic_index_job()
def define_jobs_context(self, context): options = self.get_options() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) for cc, id_mdp in iterate_context_names(context, id_mdps): cc.add_extra_report_keys(id_mdp=id_mdp) mdp = cc.comp_config(instance_mdp, id_mdp) cc.add_report(cc.comp(report_start_dist, mdp), 'start_dist') cc.add_report(cc.comp(report_actions, mdp), 'actions') context.create_dynamic_index_job()
def define_jobs_context(self, context): library = get_conftools_uncertain_image_distances() distances = library.expand_names(self.options.distances) library = get_conftools_testcases() testcases = library.expand_names(self.options.testcases) self.info('Using distances: %s' % distances) self.info('Using testcases: %s' % testcases) for c, id_distance in iterate_context_names(context, distances): c.comp_config(benchmark_distance, id_distance, testcases, repeat=self.options.repeat)
def define_jobs_context(self, context): distances_library = get_conftools_uncertain_image_distances() distances = distances_library.expand_names(self.options.distances) streams_library = get_conftools_streams() streams = streams_library.expand_names(self.options.streams) discdds_library = get_conftools_discdds() discdds = discdds_library.expand_names(self.options.dds) for c, id_discdds in iterate_context_names(context, discdds): create_predstats_jobs(context=c, distances=distances, id_discdds=id_discdds, streams=streams, maxd=10)
def jobs_vit_solve(context, id_mdp): config_smdps = get_conftools_tmdp_smdps() mdp = config_smdps.instance(id_mdp) vit_res = context.comp(vit_solve, mdp, gamma=1.0) context.add_report(context.comp(report_vit, mdp, vit_res), 'vit') context.add_report(context.comp(report_policy, mdp, vit_res), 'policy') betas = [0.001, 0.05, 0.051, 0.052, 0.15, 0.5, 5] its = [10, 75, 75, 100, 75, 75, 50] for i, (c, beta) in enumerate(iterate_context_names(context, betas)): c.add_extra_report_keys(beta=beta) iterations = its[i] fe_res = c.comp(free_energy_iteration, mdp, min_iterations=iterations, max_iterations=iterations, beta=beta) c.add_report(c.comp(report_free_energy, mdp, fe_res), 'report_free_energy')
def jobs_parallel_learning_concurrent(context, data_central, id_agent, id_robot, episodes, n, intermediate_reports=True, final_report=True): """ In this way, there are "n" instances of each agent. Each instance sees all logs, but is given a hint using the parallell_hint(i, n) function. The instances are merged using the merge() function. Needs: RM_EPISODE_READY, id_robot, id_episode """ agents = [] contexts = ['learn%02dof%02d' % (i + 1, n) for i in range(n)] for i, (c, progress) in enumerate(iterate_context_names(context, contexts)): for id_episode in episodes: c.needs(RM_EPISODE_READY, id_robot=id_robot, id_episode=id_episode) agent_i = c.subtask(LearnLogNoSaveHint, boot_root=data_central.get_boot_root(), agent=id_agent, robot=id_robot, episodes=episodes, parallel_hint=[i, n], add_job_prefix='') if intermediate_reports: report = c.comp(get_agentstate_report, agent_i, progress, job_id='report') c.add_report(report, 'agent_report_partial', id_agent=id_agent, id_robot=id_robot, progress=progress) agents.append(agent_i) agent_state = jobs_merging_recursive(context, agents) if final_report: report = context.comp(get_agentstate_report, agent_state, 'all', job_id='report') context.add_report(report, 'agent_report', id_agent=id_agent, id_robot=id_robot) save = context.comp(save_state, data_central, id_agent, id_robot, agent_state) return save
def jobs_parallel_learning_concurrent_reps(context, data_central, id_agent, id_robot, episodes, n, max_reps, intermediate_reports=True, final_report=True): """ This one cycles multiple times in the data. """ agents = [] contexts = ['learn%02dof%02d' % (i + 1, n) for i in range(n)] for i, (c, progress) in enumerate(iterate_context_names(context, contexts)): extra_dep = [] for id_episode in episodes: er = c.get_resource(RM_EPISODE_READY, id_robot=id_robot, id_episode=id_episode) extra_dep.append(er) agent_i = c.subtask(LearnLogNoSaveHintRepeated, boot_root=data_central.get_boot_root(), agent=id_agent, robot=id_robot, episodes=episodes, parallel_hint=[i, n], max_reps=max_reps, intermediate_reports=True, add_job_prefix='', extra_dep=extra_dep) if intermediate_reports: report = c.comp(get_agentstate_report, agent_i, progress, job_id='report') c.add_report(report, 'agent_report_partial', id_agent=id_agent, id_robot=id_robot, progress=progress) agents.append(agent_i) agent_state = jobs_merging_recursive(context, agents) if final_report: report = context.comp(get_agentstate_report, agent_state, 'all', job_id='report') context.add_report(report, 'agent_report', id_agent=id_agent, id_robot=id_robot) save = context.comp(save_state, data_central, id_agent, id_robot, agent_state) return save
def define_tests_single(context, objspec, names2test_objects, functions, create_reports): test_objects = names2test_objects[objspec.name] if not test_objects: msg = 'No test_objects for objects of kind %r.' % objspec.name print(msg) return if not functions: msg = 'No mcdp_lang_tests specified for objects of kind %r.' % objspec.name print(msg) db = context.cc.get_compmake_db() for x in functions: f = x['function'] dynamic = x['dynamic'] results = {} c = context.child(f.__name__) c.add_extra_report_keys(objspec=objspec.name, function=f.__name__) it = iterate_context_names(c, list(test_objects), key=objspec.name) for cc, id_object in it: ob_job_id = test_objects[id_object] assert_job_exists(ob_job_id, db) ob = Promise(ob_job_id) job_id = 'f' params = dict(job_id=job_id, command_name=f.__name__) if dynamic: res = cc.comp_config_dynamic(wrap_func_dyn, f, id_object, ob, **params) else: res = cc.comp_config(wrap_func, f, id_object, ob, **params) results[id_object] = res if create_reports: r = c.comp(report_results_single, f, objspec.name, results) c.add_report(r, 'single')
def cdtest_alt_obsmodel(context, t): """ In this test we look for alternative observations models. """ # id_pomdp = t['id_pomdp'] pomdp = t['pomdp'] results = [] cc = context.child('original') res = cc.comp(pomdp_list_states, pomdp) res = cc.comp(find_minimal_policy, res, pomdp) results.append(('original', res)) # normal agent horizons = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] # horizons = [3, 2, 1, 0] for cc, horizon in iterate_context_names(context, horizons): pomdp2 = cc.comp(get_alternative_pomdp, pomdp, horizon) res2 = cc.comp(alternate_observersations_an, res, pomdp, pomdp2) desc = 'horizon = %d ' % horizon results.append((desc, res2)) context.comp(cdtest_alt_obsmodel_check, results)
def define_jobs_context(self, context): names1 = ['a', 'b'] for c1, name1 in iterate_context_names(context, names1): c1.comp_dynamic(define_jobs1, name1)
def define_jobs_context(self, context): options = self.get_options() context.activate_dynamic_reports() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) for cc, id_mdp in iterate_context_names(context, id_mdps): # put all videos in the same place outdir = os.path.join(context.get_output_dir(), id_mdp) cc.add_extra_report_keys(id_mdp=id_mdp) pomdp = cc.comp_config(instance_mdp, id_mdp) res = cc.comp(pomdp_list_states, pomdp) """ Returns res['builder'] as a MDPBuilder """ res = cc.comp(find_minimal_policy, res, pomdp) cc.add_report(cc.comp(report_trajectories, res, name_obs=False), 'report_trajectories', sensing='original', named=False) cc.add_report(cc.comp(report_trajectories, res, name_obs=True), 'report_trajectories', sensing='original', named=True) cc.add_report(cc.comp(report_agent, res, pomdp), 'report_agent') # # cc.add_report(cc.comp(report_aliasing, res, pomdp), # 'report_aliasing') # Too long (too many states) # cc.add_report(cc.comp(report_sampled_mdp, res, pomdp), # 'sampled_mdp') # Too long (too many iterations) # cc.add_report(cc.comp(report_pictures, res, pomdp), # 'report_pictures') prefix = '%s-or' % (id_mdp) cc.comp_dynamic(jobs_videos, res, pomdp, outdir=outdir, prefix=prefix) # See if we can do the same policy with different # observation model results = [] results.append(('original', res)) if options.horizons: horizons = [0, 1, 2, 3, 4][::-1] for ch, horizon in iterate_context_names(cc, horizons, key='sensing'): pomdp2 = ch.comp(get_alternative_pomdp, pomdp, horizon) res2 = ch.comp(alternate_observersations_an, res, pomdp, pomdp2, rename_obs=False) ch.add_report(ch.comp(report_agent, res2, pomdp, job_id='report_agent_z'), 'report_agent_z') prefix = '%s-h%d' % (id_mdp, horizon) ch.comp_dynamic(jobs_videos, res2, pomdp2, outdir=outdir, prefix=prefix) ch.add_report(ch.comp(report_trajectories, res2, name_obs=False), 'report_trajectories', named=False) ch.add_report(ch.comp(report_trajectories, res2, name_obs=True), 'report_trajectories', named=True) results.append(('hor-%d' % horizon, res2)) cc.add_report(cc.comp(report_summary, results), 'summary')
def instance_reports1(context): param1s = ['a', 'b'] for c1, param1 in iterate_context_names(context, param1s, key='param1'): c1.comp_dynamic(instance_reports2, param1=param1)
def define_jobs_context(self, context): names1 = ['a', 'b'] names2 = ['m', 'n'] for c1, name1 in iterate_context_names(context, names1): for c2, name2 in iterate_context_names(c1, names2): c2.comp_dynamic(define_jobs1, name1 + name2)
def define_jobs_context(self, context): names = ['a', 'b', 'c'] for c, id_name in iterate_context_names(context, names): define_jobs(c, id_name)
def define_jobs_context(self, context): names = ['a', 'b'] for c, id_name in iterate_context_names(context, names): c.comp_dynamic(define_jobs1, id_name)
def define_jobs_context(self, context): options = self.get_options() context.activate_dynamic_reports() config_mdps = get_conftools_tmdp_smdps() id_mdps = config_mdps.expand_names(options.mdps) for cc, id_mdp in iterate_context_names(context, id_mdps): # put all videos in the same place outdir = os.path.join(context.get_output_dir(), id_mdp) cc.add_extra_report_keys(id_mdp=id_mdp) pomdp = cc.comp_config(instance_mdp, id_mdp) res = cc.comp(pomdp_list_states, pomdp) """ Returns res['builder'] as a MDPBuilder """ res = cc.comp(find_minimal_policy, res, pomdp) cc.add_report(cc.comp(report_trajectories, res, name_obs=False), 'report_trajectories', sensing='original', named=False) cc.add_report(cc.comp(report_trajectories, res, name_obs=True), 'report_trajectories', sensing='original', named=True) cc.add_report(cc.comp(report_agent, res, pomdp), 'report_agent') # # cc.add_report(cc.comp(report_aliasing, res, pomdp), # 'report_aliasing') # Too long (too many states) # cc.add_report(cc.comp(report_sampled_mdp, res, pomdp), # 'sampled_mdp') # Too long (too many iterations) # cc.add_report(cc.comp(report_pictures, res, pomdp), # 'report_pictures') prefix = '%s-or' % (id_mdp) cc.comp_dynamic(jobs_videos, res, pomdp, outdir=outdir, prefix=prefix) # See if we can do the same policy with different # observation model results = [] results.append(('original', res)) if options.horizons: horizons = [0, 1, 2, 3, 4][::-1] for ch, horizon in iterate_context_names(cc, horizons, key='sensing'): pomdp2 = ch.comp(get_alternative_pomdp, pomdp, horizon) res2 = ch.comp(alternate_observersations_an, res, pomdp, pomdp2, rename_obs=False) ch.add_report( ch.comp(report_agent, res2, pomdp, job_id='report_agent_z'), 'report_agent_z') prefix = '%s-h%d' % (id_mdp, horizon) ch.comp_dynamic(jobs_videos, res2, pomdp2, outdir=outdir, prefix=prefix) ch.add_report(ch.comp(report_trajectories, res2, name_obs=False), 'report_trajectories', named=False) ch.add_report(ch.comp(report_trajectories, res2, name_obs=True), 'report_trajectories', named=True) results.append(('hor-%d' % horizon, res2)) cc.add_report(cc.comp(report_summary, results), 'summary')
def define_jobs1(context, id_name): names2 = ['m', 'n'] for c2, name2 in iterate_context_names(context, names2): #assert c2.currently_executing == context.currently_executing c2.comp_dynamic(define_jobs2, id_name + name2)
def instance_reports2(context, param1): param2s = [1, 2] for c2, param2 in iterate_context_names(context, param2s, key='param2'): c2.comp_dynamic(instance_reports3, param1=param1, param2=param2)