def write_stream_statistics(externals, verbose): # TODO: estimate conditional to affecting history on skeleton # TODO: estimate conditional to first & attempt and success # TODO: relate to success for the full future plan # TODO: Maximum Likelihood Exponential - average (biased in general) if not externals: return if verbose: #dump_online_statistics(externals) dump_total_statistics(externals) pddl_name = externals[0].pddl_name # TODO: ensure the same previous_data = load_data(pddl_name) data = {} for external in externals: if not hasattr(external, 'instances'): continue # TODO: SynthesizerStreams #total_calls = 0 # TODO: compute these values previous_statistics = previous_data.get(external.name, {}) data[external.name] = merge_data(external, previous_statistics) if not SAVE_STATISTICS: return filename = get_data_path(pddl_name) ensure_dir(filename) write_pickle(filename, data) if verbose: print('Wrote:', filename)
def display_plan(tamp_problem, plan, display=True): from examples.continuous_tamp.viewer import ContinuousTMPViewer from examples.discrete_tamp.viewer import COLORS example_name = os.path.basename(os.path.dirname(__file__)) directory = os.path.join(VISUALIZATIONS_DIR, example_name + '/') ensure_dir(directory) colors = dict(zip(sorted(tamp_problem.initial.block_poses.keys()), COLORS)) viewer = ContinuousTMPViewer(SUCTION_HEIGHT, tamp_problem.regions, title='Continuous TAMP') state = tamp_problem.initial print() print(state) draw_state(viewer, state, colors) if display: user_input('Continue?') if plan is not None: for i, action in enumerate(plan): print(i, *action) for j, state in enumerate(apply_action(state, action)): print(i, j, state) draw_state(viewer, state, colors) viewer.save(os.path.join(directory, '{}_{}'.format(i, j))) if display: user_input('Continue?') if display: user_input('Finish?')
def write_sas_task(sas_task, temp_dir): translate_path = os.path.join(temp_dir, TRANSLATE_OUTPUT) #clear_dir(temp_dir) safe_remove(translate_path) ensure_dir(translate_path) with open(os.path.join(temp_dir, TRANSLATE_OUTPUT), "w") as output_file: sas_task.output(output_file) return translate_path
def write_pddl(domain_pddl, problem_pddl): # TODO: already in downward.py safe_rm_dir(TEMP_DIR) # Ensures not using old plan ensure_dir(TEMP_DIR) domain_path = TEMP_DIR + DOMAIN_INPUT problem_path = TEMP_DIR + PROBLEM_INPUT write(domain_path, domain_pddl) write(problem_path, problem_pddl) return domain_path, problem_path
def display_plan(tamp_problem, plan, display=True, time_step=0.01, sec_per_step=0.002): from examples.continuous_tamp.viewer import ContinuousTMPViewer from examples.discrete_tamp.viewer import COLORS example_name = os.path.basename(os.path.dirname(__file__)) directory = os.path.join(VISUALIZATIONS_DIR, example_name + '/') safe_rm_dir(directory) ensure_dir(directory) colors = dict(zip(sorted(tamp_problem.initial.block_poses.keys()), COLORS)) viewer = ContinuousTMPViewer(SUCTION_HEIGHT, tamp_problem.regions, title='Continuous TAMP') state = tamp_problem.initial print() print(state) duration = compute_duration(plan) real_time = None if sec_per_step is None else (duration * sec_per_step / time_step) print('Duration: {} | Step size: {} | Real time: {}'.format( duration, time_step, real_time)) draw_state(viewer, state, colors) if display: user_input('Start?') if plan is not None: #for action in plan: # i = action.start # print(action) # for j, state in enumerate(apply_action(state, action)): # print(i, j, state) # draw_state(viewer, state, colors) # viewer.save(os.path.join(directory, '{}_{}'.format(i, j))) # if display: # if sec_per_step is None: # user_input('Continue?') # else: # time.sleep(sec_per_step) for t in inclusive_range(0, duration, time_step): for action in plan: if action.start <= t <= get_end(action): update_state(state, action, t - action.start) print('t={} | {}'.format(t, state)) draw_state(viewer, state, colors) viewer.save(os.path.join(directory, 't={}'.format(t))) if display: if sec_per_step is None: user_input('Continue?') else: time.sleep(sec_per_step) if display: user_input('Finish?')
def write_stream_statistics(externals, verbose): # TODO: estimate conditional to affecting history on skeleton # TODO: estimate conditional to first & attempt and success # TODO: relate to success for the full future plan # TODO: Maximum Likelihood Exponential - average (biased in general) if not externals: return if verbose: #dump_local_statistics(externals) dump_total_statistics(externals) pddl_name = externals[0].pddl_name # TODO: ensure the same previous_data = load_data(pddl_name) data = {} for external in externals: #total_calls = 0 # TODO: compute these values previous_statistics = previous_data.get(external.name, {}) # TODO: compute distribution of successes given feasible # TODO: can estimate probability of success given feasible # TODO: single tail hypothesis testing (probability that came from this distribution) if not hasattr(external, 'instances'): continue # TODO: SynthesizerStreams distribution = [] for instance in external.instances.values(): if instance.results_history: #attempts = len(instance.results_history) #successes = sum(map(bool, instance.results_history)) #print(instance, successes, attempts) # TODO: also first attempt, first success last_success = -1 for i, results in enumerate(instance.results_history): if results: distribution.append(i - last_success) #successful = (0 <= last_success) last_success = i combined_distribution = previous_statistics.get('distribution', []) + distribution #print(external, distribution) #print(external, Counter(combined_distribution)) # TODO: count num failures as well # Alternatively, keep metrics on the lower bound and use somehow # Could assume that it is some other distribution beyond that point data[external.name] = { 'calls': external.total_calls, 'overhead': external.total_overhead, 'successes': external.total_successes, 'distribution': combined_distribution, } filename = get_data_path(pddl_name) ensure_dir(filename) write_pickle(filename, data) if verbose: print('Wrote:', filename)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-p', '--prefix', default=None, help='The prefix of trials') parser.add_argument('-l', '--lower', default=0, help='The minimum number of trials') parser.add_argument('-u', '--upper', default=INF, help='The minimum number of trials') # TODO: select the training policy # TODO: date range args = parser.parse_args() #data_path = os.path.dirname(__file__) all_dirname = os.path.join(DATA_DIR, '{}_all/'.format(args.prefix)) selected_trials = [] all_data = [] for trial_dirname in sorted(os.listdir(DATA_DIR)): if (args.prefix is not None) and not trial_dirname.startswith(args.prefix): continue trial_directory = os.path.join(DATA_DIR, trial_dirname) if not os.path.isdir(trial_directory) or ( trial_directory == all_dirname[:-1]): # TODO: sloppy continue for trial_filename in sorted(os.listdir(trial_directory)): if trial_filename.startswith(TRIAL_PREFIX): trial_path = os.path.join(trial_directory, trial_filename) data = list(read_data(trial_path)) if (len(data) < args.lower) or (args.upper < len(data)): continue print('\n{}'.format( os.path.join(DATA_DIR, trial_dirname, trial_filename))) # TODO: record the type of failure num_scored = sum(result[SCORE] is not None for result in data) print('Trials: {} | Scored: {}'.format(len(data), num_scored)) category_frequencies = { category: Counter(field for result in data if result.get(category, {}) for field in result.get(category, {})) for category in CATEGORIES } category_frequencies.update({ category: Counter(result[category] for result in data if category in result) for category in SPECIAL_CATEGORIES }) #if list(category_frequencies.get('policy', {})) != [TRAINING]: # continue for category in CATEGORIES + SPECIAL_CATEGORIES: frequencies = category_frequencies.get(category, {}) if frequencies: print('{} ({}): {}'.format(category, len(frequencies), sorted(frequencies.keys()))) category_values = get_category_values(data, include_score=False) context_values = category_values[FEATURE] for name in sorted(DISCRETE_FEATURES): if name in context_values: print('{}: {}'.format(name, Counter(context_values[name]))) selected_trials.append(trial_path) all_data.extend(data) # TODO: print num of successful trials #if len(data) < MIN_TRIALS: # response = user_input('Remove {}?'.format(trial_path)) # safe_remove(trial_path) # Get most recent of each skill if not os.listdir(trial_directory): print('Removed {}'.format(trial_directory)) safe_rm_dir(trial_directory) print() print(len(all_data)) ensure_dir(all_dirname) #write_results(os.path.join(all_dirname, TRIAL_PREFIX), all_data) #write_json(path, all_data) print(' '.join(selected_trials))
def reset_visualizations(): clear_dir(VISUALIZATIONS_DIR) ensure_dir(CONSTRAINT_NETWORK_DIR) ensure_dir(STREAM_PLAN_DIR)