def print_tower_constructability(args):
    logger = ActiveExperimentLogger(args.exp_path)

    for tx in range(14):
        towers = logger.get_towers_data(tx)
        for ti, tower_data in enumerate(towers):
            #if tx == 12 and ti == 9:
            #    import pdb; pdb.set_trace()
            tower, block_ids, label = tower_data
            print(
                f"Acquisition index: {tx}, Tower index: {ti}, Tower size: {len(tower)}, stable: {label}"
            )
Ejemplo n.º 2
0
def generate_multi_table(args):
    logger = ActiveExperimentLogger(args.exp_path)

    n_metrics = len(args.metrics)
    n_tasks = len(args.problems)
    table_latex = '\\begin{tabular}{|c|'
    # Add the appropriate number of columns: n_tasks*n_metrics
    for _ in range(n_tasks):
        single_task = '|' + '|'.join(['c']*n_metrics) + '|'
        table_latex += single_task
    table_latex += '}\n\\hline\n'

    # Add a title for each task.
    for tx, task in enumerate(args.problems):
        if tx == len(args.problems) - 1:
            task_name = ' & \\multicolumn{' + str(n_metrics) + '}{c|}{\\textbf{' + task_headings[task] + '}}'
        else:
            task_name = ' & \\multicolumn{' + str(n_metrics) + '}{c||}{\\textbf{' + task_headings[task] + '}}'
        table_latex += task_name
    table_latex += '\\\\ \n\\hline\n'

    # For each task, add metric columns.
    table_latex += '\\textbf{APF Model} '
    for _ in range(n_tasks):
        table_latex += '& ' +  ' & '.join(['\\textbf{' + metric_headings[m] + '}' for m in args.metrics]) 
        
    table_latex += '\\\\ \\hline\n'

    for method in args.planning_models:
        table_latex += method_headings[method] 

        for task in args.problems:
            results = logger.get_evaluation_labels(task, method, args.acquisition_step)
            if len(results) > 0:
                results = list(results.values())[0]
            else:
                results = None
        
            for metric in args.metrics:
                if results is None:
                    table_latex += ' & '
                else:
                    table_latex += ' & ' + metric_callbacks[metric](results, task) 
                
        table_latex += '\\\\\n'
        table_latex += '\\hline\n'

    table_latex += '\end{tabular}\n'
    return table_latex 
Ejemplo n.º 3
0
def run_active_toy2d(args):
    logger = ActiveExperimentLogger.setup_experiment_directory(args)

    # Initialize ensemble.
    ensemble = Ensemble(base_model=MLP,
                        base_args={
                            'n_hidden': args.n_hidden,
                            'dropout': args.dropout
                        },
                        n_models=args.n_models)

    # Sample initial dataset.
    gen = ToyDataGenerator()
    xs, ys = gen.generate_uniform_dataset(N=args.n_train_init)
    dataset = ToyDataset(xs, ys)
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    active_train(ensemble=ensemble,
                 dataset=dataset,
                 dataloader=dataloader,
                 data_sampler_fn=sample_unlabeled_data,
                 data_label_fn=get_labels,
                 data_pred_fn=get_predictions,
                 data_subset_fn=get_subset,
                 logger=logger,
                 args=args)
def visualize_train_constructability(args):
    logger = ActiveExperimentLogger(args.exp_path)

    tx = -1
    constructable_towers = []
    while True:
        if tx == -1:
            tower_dataset = logger.load_dataset(0)
            labels = tower_dataset.tower_labels
            acquired_data = {
                key: {
                    'labels': labels[key]
                }
                for key in labels.keys()
            }
        else:
            acquired_data, _ = logger.load_acquisition_data(tx)
        tx += 1

        if not acquired_data:
            break

        n_constructable = 0
        n_total = 0
        for th in acquired_data.keys():
            labels = acquired_data[th]['labels']
            for label in labels:
                n_total += 1
                if label:
                    n_constructable += 1
        if tx == 0:
            print('Initial dataset has %d/%d stable towers' %
                  (n_constructable, n_total))
        else:
            constructable_towers.append(n_constructable)

    fig, ax = plt.subplots()

    ax.bar(list(range(len(constructable_towers))), constructable_towers)
    ax.set_xlabel('Acquisition Step')
    ax.set_ylabel('Constructable Towers')
    ax.set_title(
        'Constructable Towers per Acquisition Step\n(out of %d Acquired Towers)'
        % len(labels))
    plt.tight_layout()
    plt.savefig(logger.get_figure_path('training_constructable_towers.png'))
Ejemplo n.º 5
0
def train_ensemble(args):
    logger = ActiveExperimentLogger.setup_experiment_directory(args)

    # Initialize ensemble.
    ensemble = Ensemble(base_model=FCGN,
                        base_args={
                            'n_hidden': args.n_hidden,
                            'n_in': 14
                        },
                        n_models=args.n_models)

    with open(args.data_fname, 'rb') as handle:
        dataset = pickle.load(handle)
    with open(args.data_fname, 'rb') as handle:
        val_dataset = pickle.load(handle)

    for k in dataset.tower_keys:
        shape = dataset.tower_tensors[k].shape
        dataset.tower_tensors[k][:, :, 7:9] += np.random.randn(
            shape[0] * shape[1] * 2).reshape(
                (shape[0], shape[1], 2)) * 0.0025 * 100
        val_dataset.tower_tensors[k][:, :, 7:9] += np.random.randn(
            shape[0] * shape[1] * 2).reshape(
                (shape[0], shape[1], 2)) * 0.0025 * 100

        train_mask = np.ones(dataset.tower_tensors[k].shape[0], dtype=bool)
        train_mask[::5] = False
        val_mask = ~train_mask

        dataset.tower_tensors[k] = dataset.tower_tensors[k][train_mask, ...]
        dataset.tower_labels[k] = dataset.tower_labels[k][train_mask, ...]

        val_dataset.tower_tensors[k] = val_dataset.tower_tensors[k][val_mask,
                                                                    ...]
        val_dataset.tower_labels[k] = val_dataset.tower_labels[k][val_mask,
                                                                  ...]

    dataset.get_indices()
    val_dataset.get_indices()

    sampler = TowerSampler(dataset=dataset,
                           batch_size=args.batch_size,
                           shuffle=True)
    dataloader = DataLoader(dataset, batch_sampler=sampler)

    val_sampler = TowerSampler(dataset=val_dataset,
                               batch_size=args.batch_size,
                               shuffle=False)
    val_dataloader = DataLoader(val_dataset, batch_sampler=val_sampler)

    logger.save_dataset(dataset, 0)

    # Initialize and train models.
    ensemble.reset()
    for model in ensemble.models:
        train(dataloader, val_dataloader, model, args.n_epochs)

    logger.save_ensemble(ensemble, 0)
Ejemplo n.º 6
0
def generate_single_table(args):
    logger = ActiveExperimentLogger(args.exp_path)

    n_metrics = len(args.metrics)
    table_latex = '\\begin{tabular}{|c|' + '|'.join(['c']*n_metrics) + '|}\n\\hline\n'
    table_latex += '\\textbf{APF Model} & ' + ' & '.join(['\\textbf{' + metric_headings[m] + '}' for m in args.metrics]) + ' \\\\\n'
    table_latex += '\\hline\n'

    for method in args.planning_models:
        table_latex += method_headings[method] 
        results = logger.get_evaluation_labels(args.problems[0], method, args.acquisition_step)
        results = list(results.values())[0]
    
        for metric in args.metrics:
            table_latex += ' & ' + metric_callbacks[metric](results, args.problem) 
        table_latex += '\\\\\n'
        table_latex += '\\hline\n'

    table_latex += '\end{tabular}\n'

    return table_latex 
Ejemplo n.º 7
0
def run_active_towers(args):
    logger = ActiveExperimentLogger.setup_experiment_directory(args)
    
    # Initialize agent with supplied blocks (only works with args.block_set_fname set)
    if len(args.pool_fname) > 0:
        raise NotImplementedError() 
    elif args.block_set_fname is not '':
        with open(args.block_set_fname, 'rb') as f: 
            block_set = pickle.load(f)
    else:
        raise NotImplementedError() 
    
    if args.exec_mode == 'simple-model' or args.exec_mode == 'noisy-model':
        agent = None
    elif args.exec_mode == 'sim' or args.exec_mode == 'real':
        if args.use_panda_server:
            agent = PandaClientAgent()
        else:
            block_set = load_blocks(fname=args.block_set_fname,
                                    num_blocks=10,
                                    remove_ixs=[1])
            agent = PandaAgent(block_set)
    
    # Initialize ensemble. 
    if args.model == 'fcgn':
        base_model = FCGN
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14}
    elif args.model == 'fcgn-fc':
        base_model = FCGNFC
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14}
    elif args.model == 'fcgn-con':
        base_model = ConstructableFCGN
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14}
    elif args.model == 'lstm':
        base_model = TowerLSTM
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14}
    elif args.model == 'bottomup-shared':
        base_model = BottomUpNet
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14, 'share_weights': True, 'max_blocks': 5}
    elif args.model == 'bottomup-unshared':
        base_model = BottomUpNet
        base_args = {'n_hidden': args.n_hidden, 'n_in': 14, 'share_weights': False, 'max_blocks': 5}

    else:
        raise NotImplementedError()

    ensemble = Ensemble(base_model=base_model,
                        base_args=base_args,
                        n_models=args.n_models)

    # Choose a sampler and check if we are limiting the blocks to work with.
    block_set = None
    if len(args.pool_fname) > 0:
        pool_sampler = PoolSampler(args.pool_fname)
        data_subset_fn = pool_sampler.get_subset
        data_sampler_fn = pool_sampler.sample_unlabeled_data
    elif args.block_set_fname is not '':
        data_subset_fn = get_subset
        with open(args.block_set_fname, 'rb') as f: 
            # TODO: Unify block loading
            block_set = pickle.load(f)
            if args.exec_mode == "sim" or args.exec_mode == "real":
                block_set = load_blocks(fname=args.block_set_fname,
                                        num_blocks=10)
        data_sampler_fn = lambda n: sample_unlabeled_data(n, block_set=block_set)
    else:
        data_subset_fn = get_subset
        data_sampler_fn = sample_unlabeled_data

    # Sample initial dataset.
    if len(args.init_data_fname) > 0:
        print(f'Loading an initial dataset from {args.init_data_fname}')
        # A good dataset to use is learning/data/random_blocks_(x40000)_5blocks_uniform_mass.pkl
        with open(args.init_data_fname, 'rb') as handle:
            towers_dict = pickle.load(handle)
        dataset = TowerDataset(towers_dict,
                               augment=True,
                               K_skip=4) # From this dataset, this means we start with 10 towers/size (before augmentation).
        with open('learning/data/random_blocks_(x1000.0)_constructable_val.pkl', 'rb') as handle:
            val_dict = pickle.load(handle)
        val_dataset = TowerDataset(val_dict, 
                                   augment=True,
                                   K_skip=10)
    elif args.sampler == 'sequential':
        print('Sampling initial dataset sequentially. Dataset NOT sampled on real robot.')
        towers_dict = sample_sequential_data(block_set, None, 40)
        towers_dict = get_labels(towers_dict, 'noisy-model', agent, logger, args.xy_noise)
        dataset = TowerDataset(towers_dict, augment=True, K_skip=1)

        val_towers_dict = sample_sequential_data(block_set, None, 40)
        val_towers_dict = get_labels(val_towers_dict, 'noisy-model', agent, logger, args.xy_noise)
        val_dataset = TowerDataset(val_towers_dict, augment=False, K_skip=1)
        
        if block_set is None:
            raise NotImplementedError()

        data_sampler_fn = lambda n_samples: sample_sequential_data(block_set, dataset, n_samples)
    else:
        print('Sampling initial dataset randomly.')
        towers_dict = sample_unlabeled_data(40, block_set=block_set)
        towers_dict = get_labels(towers_dict, args.exec_mode, agent, logger, args.xy_noise)
        dataset = TowerDataset(towers_dict, augment=True, K_skip=1)

        val_towers_dict = sample_unlabeled_data(40, block_set=block_set)
        val_towers_dict = get_labels(val_towers_dict, args.exec_mode, agent, logger, args.xy_noise)
        val_dataset = TowerDataset(val_towers_dict, augment=False, K_skip=1)


    if args.strategy == 'subtower-greedy':
        data_sampler_fn = lambda n_samples, bases: sample_next_block(n_samples, bases, block_set)
    if args.strategy == 'subtower':
        data_sampler_fn = lambda n: sample_unlabeled_data(n, block_set=block_set, range_n_blocks=(5, 5))

    #print(len(dataset), len(val_dataset)) 
    sampler = TowerSampler(dataset=dataset,
                           batch_size=args.batch_size,
                           shuffle=True,
                           oversample=False)
    dataloader = DataLoader(dataset,
                            batch_sampler=sampler)
    
    val_sampler = TowerSampler(dataset=val_dataset,
                               batch_size=args.batch_size,
                               shuffle=False)
    val_dataloader = DataLoader(val_dataset,
                                batch_sampler=val_sampler)

    print('Starting training from scratch.')
    if args.exec_mode == 'real':
        input('Press enter to confirm you want to start training from scratch.')
    active_train(ensemble=ensemble, 
                 dataset=dataset, 
                 val_dataset=val_dataset,
                 dataloader=dataloader, 
                 val_dataloader=val_dataloader,
                 data_sampler_fn=data_sampler_fn, 
                 data_label_fn=get_labels, 
                 data_pred_fn=get_predictions,
                 data_subset_fn=data_subset_fn,
                 logger=logger, 
                 agent=agent,
                 args=args)
Ejemplo n.º 8
0
                        type=int,
                        help='number of blocks in goal tower')

    args = parser.parse_args()

    if args.debug:
        import pdb
        pdb.set_trace()

    if args.block_set_fname is not '':
        with open(args.block_set_fname, 'rb') as f:
            block_set = pickle.load(f)
    else:
        block_set = [Object.random(f'obj_{ix}') for ix in range(args.n_blocks)]

    logger = ActiveExperimentLogger(args.exp_path)
    pre = 'discrete_' if args.discrete else ''

    ## RUN SEQUENTIAL PLANNER
    if args.method == 'sequential' or args.method == 'both':
        tower_sizes = [2, 3, 4, 5]
        tower_keys = [str(ts) + 'block' for ts in tower_sizes]
        max_height = args.max_height

        # Store regret for towers of each size.
        regrets = {k: [] for k in tower_keys}
        rewards = {k: [] for k in tower_keys}
        num_nodes = {k: [] for k in tower_keys}
        trees = []
        node_values = []
        highest_exp_values = []
Ejemplo n.º 9
0
            accs = pickle.load(handle)
        print(name)
        tower_keys = ['2block', '3block', '4block', '5block']

        #ref_acc = {'2block': .955, '3block': .925, '4block': .912, '5block': .913}

        for ix, ax in enumerate(axes):
            k = tower_keys[ix]
            max_x = 40 + 10 * len(accs[k])
            print(max_x)
            xs = np.arange(40, max_x, 10)

            ax.plot(xs, accs[k], label=name)
            #ax.plot([400, 4500], [ref_acc[k]]*2)
            #ax.axvline(x=4375)
            ax.set_xlabel('Number of Towers', fontsize=20)
            ax.set_ylabel('Accuracy', fontsize=20)
            ax.legend(prop={'size': 20})
            ax.tick_params(axis='both', which='major', labelsize=16)
    plt.savefig('learning/evaluate/plots/accuracy_comparison_seq.png')


if __name__ == '__main__':
    random_logger = ActiveExperimentLogger(RANDOM_PATH)
    bald_logger = ActiveExperimentLogger(BALD_PATH)

    plot_regret(random_logger, bald_logger, fname='height_regret_blocks')
    plot_regret(random_logger, bald_logger, fname='contact_regret')
    plot_regret(random_logger, bald_logger, fname='longest_overhang')
    plot_val_accuracy(random_logger, bald_logger)
Ejemplo n.º 10
0
        type=str,
        required=True,
        help=
        'evaluate only this acquisition step(use either this or --max-acquisitions)'
    )
    parser.add_argument('--debug',
                        action='store_true',
                        help='set to run in debug mode')

    # TODO: cannot do false positive and negative rates wit multiple exp paths
    args = parser.parse_args()

    if args.debug:
        import pdb
        pdb.set_trace()

    with open(args.test_set_fname, 'rb') as f:
        dataset = pickle.load(f)

    all_accuracies = []
    for exp_path in args.exp_paths:
        logger = ActiveExperimentLogger(exp_path)
        model_accuracies = calc_model_accuracy(logger, dataset, args, exp_path)
        all_accuracies.append(model_accuracies)

    if args.single_acquisition_step is None:
        plot_all_model_accuracies(all_accuracies)
    else:
        print('Accuracy per model: ')
        for i, acc in enumerate(all_accuracies):
            print('    Model ' + str(i) + ':', acc)
Ejemplo n.º 11
0
    parser.add_argument('--xy-noise',
                        type=float,
                        required=True,
                        help='noise to add to xy position of blocks')

    args = parser.parse_args()
    args.discrete = False
    args.tower_sizes = [5]
    args.max_acquisitions = None
    args.n_towers = 1
    acquisition_steps = [0, 5, 10, 15, 20, 25, 30, 35, 40]

    with open(args.block_set_fname, 'rb') as f:
        block_set = pickle.load(f)[:5]

    logger = ActiveExperimentLogger(args.exp_path)

    for tx in acquisition_steps:
        args.acquisition_step = tx
        if not os.path.exists(logger.get_figure_path(f'height_{tx}_0.png')):
            tallest_tower_regret_evaluation(logger,
                                            block_set,
                                            '',
                                            args,
                                            save_imgs=True)
        if not os.path.exists(logger.get_figure_path(f'overhang_{tx}_0.png')):
            longest_overhang_regret_evaluation(logger,
                                               block_set,
                                               '',
                                               args,
                                               save_imgs=True)
Ejemplo n.º 12
0
    else:
        try:
            args.n_blocks
        except:
            print('if no block set is provided, but give the number of blocks \
                    to generate in random set')
        block_set = [Object.random(f'obj_{ix}') for ix in range(n_blocks)]

    if args.problem == 'tallest':
        problem = Tallest(args.max_height)
    elif args.problem == 'overhang':
        problem = Overhang()
    elif args.problem == 'deconstruct':
        problem = Deconstruct()

    logger = ActiveExperimentLogger(args.exp_path)
    ensemble = logger.get_ensemble(tx)

    c_vals = [1.0]  #[0, 0.1, 0.2, 0.5, 1., np.sqrt(2), 7, 10, 15]
    num_c_vals = len(c_vals)
    runs = 1

    all_tallest_towers = np.zeros((args.timeout + 1, num_c_vals, runs))
    all_highest_exp_heights = np.zeros((args.timeout + 1, num_c_vals, runs))
    all_highest_values = np.zeros((args.timeout + 1, num_c_vals, runs))

    for ci, c in enumerate(c_vals):
        for ri in range(runs):
            tree, tallest_tower, highest_exp_height, highest_value, tower_stats, node_values = \
                plan_mcts(logger, args.timeout, block_set, problem, ensemble, c=c, discrete=args.discrete)
Ejemplo n.º 13
0
    parser.add_argument('--acquisition-step', 
                        type=int, 
                        required=True)
    parser.add_argument('--tower-number',
                        type=int,
                        required=True)
    parser.add_argument('--label',
                        type=float,
                        required=True)
    parser.add_argument('--debug', action='store_true')
    args = parser.parse_args()

    if args.debug:
        import pdb; pdb.set_trace()
        
    logger = ActiveExperimentLogger(args.exp_path)
    
    # regenerate erroneous tower file
    for tower_file in os.listdir(os.path.join(args.exp_path, 'towers')):
        tower_path_str = r'labeled_tower_(.*)_(.*)_%d_%d.pkl' % (args.tower_number, args.acquisition_step)
        matches = re.match(tower_path_str, tower_file)
        if matches: # sometimes other system files get saved here (eg. .DStore on a mac). don't parse these
            tower_path = matches.group(0)
    
    input('Press enter to edit file: %s' % tower_path)
    with open(os.path.join(args.exp_path, 'towers', tower_path), 'rb') as handle:
        tower_data = pickle.load(handle)
        
    print('original label: ', tower_data[2])
    tower_data[2] = args.label
    
def restart_active_towers(exp_path, args):
    logger = ActiveExperimentLogger.get_experiments_logger(exp_path, args)
    
    # starting dataset (must be at least one in the exp_path)
    dataset = logger.load_dataset(logger.acquisition_step)
    val_dataset = logger.load_val_dataset(logger.acquisition_step)
    
    sampler = TowerSampler(dataset=dataset,
                           batch_size=args.batch_size,
                           shuffle=True,
                           oversample=False)
    dataloader = DataLoader(dataset,
                            batch_sampler=sampler)
    
    val_sampler = TowerSampler(dataset=val_dataset,
                               batch_size=args.batch_size,
                               shuffle=False)
    val_dataloader = DataLoader(val_dataset,
                                batch_sampler=val_sampler)
    
    # only works with args.block_set_fname set
    if args.block_set_fname is '':
        raise NotImplementedError() 
    
    if args.exec_mode == 'simple-model' or args.exec_mode == 'noisy-model':
        agent = None
    elif args.exec_mode == 'sim' or args.exec_mode == 'real':
        if args.use_panda_server:
            agent = PandaClientAgent()
        else:
            agent = PandaAgent(block_set)

    # Choose a sampler and check if we are limiting the blocks to work with.
    block_set = None
    if len(args.pool_fname) > 0:
        pool_sampler = PoolSampler(args.pool_fname)
        data_subset_fn = pool_sampler.get_subset
        data_sampler_fn = pool_sampler.sample_unlabeled_data
    elif args.block_set_fname is not '':
        data_subset_fn = get_subset
        with open(args.block_set_fname, 'rb') as f: 
            block_set = pickle.load(f)
            if args.exec_mode == "sim" or args.exec_mode == "real":
                block_set = load_blocks(fname=args.block_set_fname,
                                        num_blocks=10)
        data_sampler_fn = lambda n: sample_unlabeled_data(n, block_set=block_set)
    else:
        data_subset_fn = get_subset
        data_sampler_fn = sample_unlabeled_data

    if args.sampler == 'sequential':
        data_sampler_fn = lambda n_samples: sample_sequential_data(block_set, dataset, n_samples)
    if args.strategy == 'subtower-greedy':
        data_sampler_fn = lambda n_samples, bases: sample_next_block(n_samples, bases, block_set)
    if args.strategy == 'subtower':
        data_sampler_fn = lambda n: sample_unlabeled_data(n, block_set=block_set, range_n_blocks=(5, 5))


    print("Setting up dataset")
    ensemble = setup_active_train(dataset,
                                    val_dataset,
                                    dataloader=dataloader,
                                    val_dataloader=val_dataloader,
                                    logger=logger, 
                                    data_sampler_fn=data_sampler_fn, 
                                    data_label_fn=get_labels, 
                                    data_pred_fn=get_predictions, 
                                    data_subset_fn=data_subset_fn, 
                                    agent=agent, 
                                    args=args)

    print("Restarting active learning")
    active_train(ensemble=ensemble, 
                 dataset=dataset, 
                 val_dataset=val_dataset,
                 dataloader=dataloader, 
                 val_dataloader=val_dataloader,
                 data_sampler_fn=data_sampler_fn, 
                 data_label_fn=get_labels, 
                 data_pred_fn=get_predictions,
                 data_subset_fn=data_subset_fn,
                 logger=logger, 
                 agent=agent,
                 args=args)
Ejemplo n.º 15
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--debug',
                        action='store_true',
                        help='set to run in debug mode')
    parser.add_argument('--exp-path', type=str, required=True)
    parser.add_argument('--discrete', action='store_true')
    args = parser.parse_args()

    if args.debug:
        import pdb
        pdb.set_trace()

    logger = ActiveExperimentLogger(args.exp_path)

    # plot median height of towers found with sequential method
    plot_comp('sequential', logger, args)
    # plot median height of towers found with total method
    #plot_comp('total', logger)

    # plot number of towers with max_heights blocks found during search
    plot_tower_stats(
        logger, args,
        mode='full towers')  # only works for sequential files and params
    # plot number of nodes in search tree
    plot_tower_stats(
        logger, args,
        mode='nodes expanded')  # only works for sequential files and params
    # plot median value of nodes throughout search
Ejemplo n.º 16
0
    if args.debug:
        import pdb
        pdb.set_trace()

    # find exp_paths with the given prefix
    exp_path_roots = ['%s-%d' % (args.exp_path_prefix, r) for r in args.runs]
    all_results = os.listdir(args.exp_path_root)
    relevant_exp_paths = []
    for result in all_results:
        for exp_path_root in exp_path_roots:
            if exp_path_root in result:
                relevant_exp_paths.append(
                    os.path.join(args.exp_path_root, result))
    print(relevant_exp_paths)

    loggers = []
    for exp_path in relevant_exp_paths:
        loggers.append(ActiveExperimentLogger(exp_path))

    y_axis = 'Regret'  # TODO: detect from file name?
    label = args.exp_path_prefix
    fnames = []
    for tower_size in args.tower_sizes:
        for problem in args.problems:
            fnames += [
                'random_planner_%s_%d_block_towers_regrets.pkl' %
                (problem, tower_size)
            ]
    for fname in fnames:
        plot_planner_performance(loggers, args, y_axis, label, fname)
Ejemplo n.º 17
0
 max_overhang_plot_data = {}
 all_plot_data = [tallest_tower_plot_data, min_contact_plot_data, max_overhang_plot_data]
 y_axis = 'Regret' # TODO: detect from file name?
 for exp_path_root in args.exp_path_prefixes:
     # find exp_paths with the given root
     exp_path_full_roots = [exp_path_root+'-'+str(r) for r in args.runs]
     all_paper_results = os.listdir(args.exp_path_root)
     exp_paths = []
     for result in all_paper_results:
         for exp_path_full_root in exp_path_full_roots:
             if exp_path_full_root in result:
                 exp_paths.append(result)
     
     loggers = []
     for exp_path in exp_paths:
         loggers.append(ActiveExperimentLogger(os.path.join(args.exp_path_root, exp_path)))
         
     label = exp_path_root
     fnames = []
     for tower_size in args.tower_sizes:
         for problem in args.problems:
             fnames += ['random_planner_%s_%d_block_towers_regrets.pkl' % (problem, tower_size)]
         
     for fname, task_plot_data in zip(fnames, all_plot_data):
         xs, plot_data = plot_planner_performance(loggers, args, y_axis, label, fname)
         task_plot_data[label] = plot_data
         
 tasks = []
 if 'tallest' in args.problems: tasks.append('Tallest Tower')
 if 'min_contact' in args.problems: tasks.append('Minimum Contact')
 if 'max_overhang' in args.problems: tasks.append('Maximum Overhang')