Exemplo n.º 1
0
def generatePlot(exp_paths):
    exp = ExperimentModel.load(exp_paths[0])
    results = loadResults(exp, 'step_return.csv')

    results_dict = splitOverParameter(results, 'tile-coder')

    # two new streams over results
    mine = results_dict['mine']
    tiles3 = results_dict['tiles3']

    # figure out what values of "tiles" we swept
    tiles = splitOverParameter(mine, 'tiles').keys()
    # figure out what values of "tilings" we swept
    tilings = splitOverParameter(mine, 'tilings').keys()

    f, axes = plt.subplots(len(tiles), len(tilings))

    # create an empty (2, tiles, tilings) shape array
    data = createEmptyList([2, len(tiles), len(tilings)])
    for tiles_idx, num_tiles in enumerate(tiles):
        for tilings_idx, num_tilings in enumerate(tilings):
            mine_result = whereParametersEqual(mine, { 'tiles': num_tiles, 'tilings': num_tilings })
            mine_result = getBest(mine_result, prefer='big')

            tiles3_result = whereParametersEqual(tiles3, { 'tiles': num_tiles, 'tilings': num_tilings })
            tiles3_result = getBest(tiles3_result, prefer='big')

            mine_data = [np.mean(curve) for curve in mine_result.load()]
            tiles3_data = [np.mean(curve) for curve in tiles3_result.load()]

            data[0][tiles_idx][tilings_idx] = mine_data
            data[1][tiles_idx][tilings_idx] = tiles3_data

    min_perf = np.min(data)
    max_perf = np.max(data)
    for rep, rep_name in enumerate(['Mine', 'Tiles3']):
        for tiles_idx, num_tiles in enumerate(tiles):
            for tilings_idx, num_tilings in enumerate(tilings):
                performance = data[rep][tiles_idx][tilings_idx]
                color = 'blue' if rep == 0 else 'red'

                kde = gaussian_kde(performance)
                lo = 0.95 * min_perf
                hi = 1.05 * max_perf
                dist_space = np.linspace(lo, hi, FIDELITY)
                dist = kde(dist_space)
                # dist = minMaxScale(kde(dist_space)) * DIST_HEIGHT
                axes[tiles_idx][tilings_idx].plot(dist_space, dist, label=rep_name, linewidth=2.0, color=color)
                axes[tiles_idx][tilings_idx].fill_between(dist_space, np.zeros(FIDELITY), dist, color=color, alpha=0.2)

                axes[tiles_idx][tilings_idx].set_xlim((0.95 * min_perf, 1.05 * max_perf))
                # axes[tiles_idx][tilings_idx].set_xlabel('Reward')

                title = f'({num_tiles} tiles, {num_tilings} tilings)'
                axes[tiles_idx][tilings_idx].set_title(title)

    plt.legend()

    return f, axes
Exemplo n.º 2
0
def generatePlot(ax, exp_paths):
    for alg in ALG_ORDER:
        exp_path = findExpPath(exp_paths, alg)

        exp = ExperimentModel.load(exp_path)
        results = loadResults(exp, 'step_return.h5')

        plotBest(results, ax, {
            'color': basicControlColors.get(alg),
            'label': alg,
            'prefer': 'big',
        })
Exemplo n.º 3
0
def generatePlot(ax, exp_paths, bestBy):
    reducer = getCurveReducer(bestBy)
    for alg in ALG_ORDER:
        exp_path = findExpPath(exp_paths, alg)
        exp = ExperimentModel.load(exp_path)
        results = loadResults(exp, 'returns.csv')

        best = getBest(results, reducer)
        plot(best, ax, {
            'label': alg,
            'color': colors[alg],
            'width': 0.75,
        })
def generatePlot(ax, exp_paths):
    exp = ExperimentModel.load(exp_paths[0])
    results = loadResults(exp, 'step_return.csv')

    results_dict = splitOverParameter(results, 'tile-coder')

    # two new streams over results
    mine = results_dict['mine']
    tiles3 = results_dict['tiles3']

    # figure out what values of "tiles" we swept
    tiles = splitOverParameter(mine, 'tiles').keys()
    # figure out what values of "tilings" we swept
    tilings = splitOverParameter(mine, 'tilings').keys()

    for num_tiles in tiles:
        for num_tilings in tilings:
            mine_result = whereParametersEqual(mine, {
                'tiles': num_tiles,
                'tilings': num_tilings
            })
            mine_result = getBest(mine_result, prefer='big')

            tiles3_result = whereParametersEqual(tiles3, {
                'tiles': num_tiles,
                'tilings': num_tilings
            })
            tiles3_result = getBest(tiles3_result, prefer='big')

            d_curves = []
            mine_curves = mine_result.load()
            tiles3_curves = tiles3_result.load()

            min_len = min(len(mine_curves), len(tiles3_curves))
            print(min_len)
            for i in range(min_len):
                d = mine_curves[i] - tiles3_curves[i]
                d_curves.append(d)

            mean = np.mean(d_curves, axis=0)
            stderr = np.std(d_curves, axis=0, ddof=1) / np.sqrt(len(d_curves))

            lineplot(ax,
                     mean,
                     stderr=stderr,
                     label=f'({num_tiles}, {num_tilings})')

    ax.set_xlim(0, 500)
    ax.axhline(0, color='black', linestyle='--', alpha=0.4)
    ax.set_ylabel(f'd = Mine - Tiles3 \n Bigger is better')
    plt.legend()
Exemplo n.º 5
0
def printStats(exp_paths, metric):
    print(f'-------------{metric}-------------')
    exp = ExperimentModel.load(exp_paths[0])
    results = loadResults(exp, f'{metric}.csv')

    results_dict = splitOverParameter(results, 'tile-coder')

    # two new streams over results
    mine = results_dict['mine']
    tiles3 = results_dict['tiles3']

    # figure out what values of "tiles" we swept
    tiles = splitOverParameter(mine, 'tiles').keys()
    # figure out what values of "tilings" we swept
    tilings = splitOverParameter(mine, 'tilings').keys()

    for num_tiles in tiles:
        for num_tilings in tilings:
            mine_results = list(
                whereParametersEqual(mine, {
                    'tiles': num_tiles,
                    'tilings': num_tilings
                }))
            tiles3_results = list(
                whereParametersEqual(tiles3, {
                    'tiles': num_tiles,
                    'tilings': num_tilings
                }))

            mine_means = []
            tiles3_means = []

            # loop over each value of alpha
            # this way we just get 3x as many samples of timing
            for i in range(len(mine_results)):
                mine_mean = mine_results[i].mean()[0]
                tiles3_mean = tiles3_results[i].mean()[0]

                mine_means.append(mine_mean)
                tiles3_means.append(tiles3_mean)

            mine_mean = np.mean(mine_means)
            tiles3_mean = np.mean(tiles3_means)

            # TODO: this is covering up a bug in results. Rerun results
            if metric == 'feature_utilization':
                mine_mean = mine_mean / (num_tilings * num_tiles**2)
                tiles3_mean = tiles3_mean / (num_tilings * num_tiles**2)

            print(
                f'({num_tiles}, {num_tilings}) -- {mine_mean}, {tiles3_mean}')
Exemplo n.º 6
0
def generatePlot(exp_paths):
    exp = ExperimentModel.load(exp_paths[0])
    results = loadResults(exp, 'step_return.csv')

    results_dict = splitOverParameter(results, 'tile-coder')

    # two new streams over results
    mine = results_dict['mine']
    tiles3 = results_dict['tiles3']

    # figure out what values of "tiles" we swept
    tiles = splitOverParameter(mine, 'tiles').keys()
    # figure out what values of "tilings" we swept
    tilings = splitOverParameter(mine, 'tilings').keys()

    f, axes = plt.subplots(len(tiles), len(tilings))

    for tiles_idx, num_tiles in enumerate(tiles):
        for tilings_idx, num_tilings in enumerate(tilings):
            mine_result = whereParametersEqual(mine, {
                'tiles': num_tiles,
                'tilings': num_tilings
            })
            mine_result = getBest(mine_result, prefer='big')

            tiles3_result = whereParametersEqual(tiles3, {
                'tiles': num_tiles,
                'tilings': num_tilings
            })
            tiles3_result = getBest(tiles3_result, prefer='big')

            plotBest(mine_result,
                     axes[tiles_idx][tilings_idx],
                     color='blue',
                     label='Mine')
            plotBest(tiles3_result,
                     axes[tiles_idx][tilings_idx],
                     color='red',
                     label='Tiles3')

            axes[tiles_idx][tilings_idx].set_title(
                f'Tiles: {num_tiles} Tilings: {num_tilings}')

    axes[0][0].legend()

    return f, axes
Exemplo n.º 7
0
def gatherMissing(experiment_paths, runs, groupSize, cores, total_hours):
    out = {}

    approximate_cost = np.zeros(2)

    for path in experiment_paths:
        exp = Experiment.load(path)

        indices = detectMissingIndices(exp, runs, 'step_return.h5')
        indices = sorted(indices)
        out[path] = indices

        approximate_cost += estimateUsage(indices, groupSize, cores,
                                          total_hours)

        # figure out how many indices to expect
        size = exp.numPermutations() * runs

        # log how many are missing
        print(path, f'{len(indices)} / {size}')

    return out, approximate_cost
Exemplo n.º 8
0
# prints a progress bar
def printProgress(size, it):
    for i, _ in enumerate(it):
        print(f'{i + 1}/{size}', end='\r')
        if i - 1 == size:
            print()
        yield _


# ----------------
# Scheduling logic
# ----------------
for path in experiment_paths:
    print(path)
    # load the experiment json file
    exp = Experiment.load(path)
    # load the slurm config file
    slurm = Slurm.fromFile(slurm_path)

    if exp.agent in SLOW_ALGS:
        slurm.sequential = 1

    # figure out how many indices to use
    size = exp.numPermutations() * runs

    paths = listResultsPaths(exp, runs)
    res_path = first(paths)

    data = []
    data_path = f'{res_path}/returns.csv'
    if os.path.exists(data_path):
Exemplo n.º 9
0
if len(sys.argv) < 2:
    print('run again with:')
    print('python3 src/main.py <path/to/description.json> <idx>')
    exit(1)

# try to detect if we are on a Cedar server
prod = 'cdr' in socket.gethostname()
# if we are local, then turn on info level logging
# otherwise, keep the console clear to improve performance and decrease clutter
if not prod:
    logging.basicConfig(level=logging.INFO)

torch.set_num_threads(1)

exp = ExperimentModel.load(sys.argv[1])
idx = int(sys.argv[2])

max_steps = exp.max_steps
run = exp.getRun(idx)

collector = Collector()

# set random seeds accordingly
np.random.seed(run)
torch.manual_seed(run)

Problem = getProblem(exp.problem)
problem = Problem(exp, idx)

agent = problem.getAgent(collector)