Esempio n. 1
0
    def default_config():
        """Default configuration values."""
        data_root = serialize.get_output_dir()  # where models are read from
        log_root = serialize.get_output_dir()  # where results are written to
        n_bootstrap = 1000  # number of bootstrap samples
        alpha = 95  # percentile confidence interval
        aggregate_kinds = ("bootstrap", "studentt", "sample")

        _ = locals()
        del _
Esempio n. 2
0
def default_config():
    """Default configuration values."""
    data_root = serialize.get_output_dir()  # where values are read from
    log_root = serialize.get_output_dir()  # where results are written to
    vals_path = None  # aggregated data values

    # Reward configurations: models to compare
    x_reward_cfgs = None
    y_reward_cfgs = None
    heatmap_kwargs = {}

    _ = locals()
    del _
Esempio n. 3
0
def default_config():
    """Default configuration values."""
    # Reward parameters
    exp_name = "default"
    discount = 0.99

    # Figure parameters
    vmin = None
    vmax = None
    log_root = os.path.join(serialize.get_output_dir(), "plot_gridworld_reward")
    fmt = "pdf"  # file type

    styles = ["paper", "gridworld-heatmap", "gridworld-heatmap-2in1"]
    ncols = 2
    # We can't use actual LaTeX since it draws too thick hatched lines :(
    # This means we can't use our macros `figsymbols.sty`, so manually transcribe them
    # to be as close as possible using matplotlib native features.
    rewards = [
        (r"$\mathtt{Sparse}$", "sparse_goal"),
        (r"$\mathtt{Dense}$", "transformed_goal"),
        (r"$\mathtt{Penalty}$", "sparse_penalty"),
        (r"$\mathtt{Center}$", "center_goal"),
        (r"$\mathtt{Path}$", "dirt_path"),
        (r"$\mathtt{Cliff}$", "cliff_walk"),
    ]

    _ = locals()  # quieten flake8 unused variable warning
    del _
Esempio n. 4
0
def default_config():
    """Default configuration."""
    log_root = serialize.get_output_dir()  # where results are written to
    configs = {}
    run_tag = "default"
    _ = locals()
    del _
Esempio n. 5
0
def _canonicalize_data_root(path: str) -> str:
    if path.endswith("dummy"):
        path = "dummy"
    for root_prefix in DATA_ROOT_PREFIXES:
        if path.startswith(root_prefix):
            path = path.replace(root_prefix, serialize.get_output_dir())
            break
    return path
Esempio n. 6
0
    def default_config():
        """Default configuration values."""
        env_name = "evaluating_rewards/PointMassLine-v0"
        kinds = POINT_MASS_KINDS
        data_root = serialize.get_output_dir()  # where models are read from
        log_root = serialize.get_output_dir()  # where results are written to
        n_bootstrap = 1000  # number of bootstrap samples
        alpha = 95  # percentile confidence interval
        aggregate_kinds = ("bootstrap", "studentt", "sample")
        vals_path = None

        # Reward configurations: models to compare
        x_reward_cfgs = None
        y_reward_cfgs = None

        _ = locals()
        del _
def point_maze():
    """IRL config for PointMaze environment."""
    env_name = "imitation/PointMazeLeftVel-v0"
    rollout_path = os.path.join(
        serialize.get_output_dir(),
        "train_experts/ground_truth/20201203_105631_297835/imitation_PointMazeLeftVel-v0",
        "evaluating_rewards_PointMazeGroundTruthWithCtrl-v0/best/rollouts/final.pkl",
    )
    total_timesteps = 1e6
    _ = locals()
    del _
Esempio n. 8
0
def experiment_main(experiment: sacred.Experiment,
                    name: str,
                    sacred_symlink: bool = True):
    """Returns a main function for experiment."""

    sacred_dir = os.path.join(serialize.get_output_dir(), "sacred", name)
    observer = observers.FileStorageObserver.create(sacred_dir)
    if sacred_symlink:
        experiment.pre_run_hook(add_sacred_symlink(observer))
    experiment.observers.append(observer)
    experiment.run_commandline()
Esempio n. 9
0
def point_maze_learned_checkpoint_cfgs(
    prefix: str = "transfer_point_maze",
    target_num: Optional[int] = None,
    shard: Optional[Tuple[int, int]] = None,
) -> List[RewardCfg]:
    """Configurations for learned rewards in PointMaze for each checkpoint.

    Args:
        prefix: The directory to locate results under.
        target_num: The target number of checkpoints to return for each algorithm.
            May return less than this if there are fewer checkpoints than `target_num`.
            May return up to twice this number.
        shard: Optional tuple of `(shard_num, total_shards)`. If specified, partitions
            checkpoints into `total_shards` of (approximately) equally-spaced checkpoints,
            returning only the `shard_num`'th partition. This gives a simple way to parallelize,
            or just run sequentially with increasing levels of resolution but still full coverage.

    Returns:
        Reward configurations for checkpoints for each reward model.
    """
    res = {}
    for kind, fstr_path in _POINT_MAZE_CFG:
        glob_path = os.path.join(
            serialize.get_output_dir(), prefix, "reward", fstr_path.format("[0-9]*")
        )
        paths = sorted(glob.glob(glob_path))
        cfgs = [(kind, path) for path in paths]

        if target_num and len(cfgs) > target_num:
            subsample = math.floor(len(cfgs) / target_num)
            cfgs = cfgs[::subsample]
            assert target_num <= len(cfgs) <= 2 * target_num

        if shard:
            shard_num, total_shards = shard
            cfgs = cfgs[shard_num::total_shards]

        res[(kind, fstr_path)] = cfgs

    empty = {k: bool(v) for k, v in res.items()}
    if any(empty) and not all(empty):
        raise ValueError(f"No checkpoints found for some algorithms: {empty}")

    return list(itertools.chain(*res.values()))
Esempio n. 10
0
def default_config():
    """Default configuration for combined_distances."""
    vals_paths = []
    log_root = serialize.get_output_dir(
    )  # where results are read from/written to
    experiment_kinds = {}
    distance_kinds_order = ("epic", "npec", "erc", "rl")
    config_updates = {}  # config updates applied to all subcommands
    named_configs = {}
    skip = {}
    target_reward_type = None
    target_reward_path = None
    pretty_models = {}
    pretty_algorithms = {}
    # Output formats
    output_fn = latex_table
    styles = ["paper", "tex", "training-curve", "training-curve-1col"]
    tag = "default"
    _ = locals()
    del _
Esempio n. 11
0
def default_config():
    """Default configuration values."""
    normalize = False

    # Dataset parameters
    log_root = serialize.get_output_dir(
    )  # where results are read from/written to
    discount = 0.99
    reward_subset = None

    # Figure parameters
    kind = "npec"
    styles = [
        "paper", "heatmap", "heatmap-1col", "heatmap-1col-fatlabels", "tex"
    ]
    save_kwargs = {
        "fmt": "pdf",
    }

    _ = locals()
    del _
Esempio n. 12
0
def add_logging_config(experiment, name):
    experiment.add_config(
        {"log_root": os.path.join(serialize.get_output_dir(), name)})
    experiment.config(logging_config)
def add_logging_config(experiment: sacred.Experiment, name: str) -> None:
    experiment.add_config(
        {"log_root": os.path.join(serialize.get_output_dir(), name)})
    experiment.config(logging_config)