Exemplo n.º 1
0
def cpp_export(
    save_dir: pyrado.PathLike,
    policy: Policy,
    env: Optional[SimEnv] = None,
    policy_export_name: str = "policy_export",
    write_policy_node: bool = True,
    policy_node_name: str = "policy",
):
    """
    Convenience function to export the policy using PyTorch's scripting or tracing, and the experiment's XML
    configuration if the environment from RcsPySim.

    :param save_dir: directory to save in
    :param policy: (trained) policy
    :param env: environment the policy was trained in
    :param policy_export_name: name of the exported policy file without the file type ending
    :param write_policy_node: if `True`, write the PyTorch-based control policy into the experiment's XML configuration.
                              This requires the experiment's XML configuration to be exported beforehand.
    :param policy_node_name: name of the control policies node in the XML file, e.g. 'policy' or 'preStrikePolicy'
    """
    if not osp.isdir(save_dir):
        raise pyrado.PathErr(given=save_dir)
    if not isinstance(policy, Policy):
        raise pyrado.TypeErr(given=policy, expected_type=Policy)
    if not isinstance(policy_export_name, str):
        raise pyrado.TypeErr(given=policy_export_name, expected_type=str)

    # Use torch.jit.trace / torch.jit.script (the latter if recurrent) to generate a torch.jit.ScriptModule
    ts_module = policy.double().script(
    )  # can be evaluated like a regular PyTorch module

    # Serialize the script module to a file and save it in the same directory we loaded the policy from
    policy_export_file = osp.join(save_dir, f"{policy_export_name}.pt")
    ts_module.save(policy_export_file)  # former .zip, and before that .pth
    print_cbt(f"Exported the loaded policy to {policy_export_file}",
              "g",
              bright=True)

    # Export the experiment config for C++
    exp_export_file = osp.join(save_dir, "ex_config_export.xml")
    if env is not None and isinstance(inner_env(env), RcsSim):
        inner_env(env).save_config_xml(exp_export_file)
        print_cbt(f"Exported experiment configuration to {exp_export_file}",
                  "g",
                  bright=True)

    # Open the XML file again to add the policy node
    if write_policy_node and osp.isfile(exp_export_file):
        tree = et.parse(exp_export_file)
        root = tree.getroot()
        policy_node = et.Element(policy_node_name)
        policy_node.set("type", "torch")
        policy_node.set("file", f"{policy_export_name}.pt")
        root.append(policy_node)
        tree.write(exp_export_file)
        print_cbt(
            f"Added {policy_export_name}.pt to the experiment configuration.",
            "g")
Exemplo n.º 2
0
    def load_snapshot(load_dir: pyrado.PathLike, load_name: str = "algo"):
        """
        Load an algorithm from file, i.e. unpickle it.

        :param load_dir: experiment directory to load from
        :param load_name: name of the algorithm's pickle file without the ending
        """
        if not osp.isdir(load_dir):
            raise pyrado.PathErr(given=load_dir)

        file = osp.join(load_dir, f"{load_name}.pkl")
        if not osp.isfile(file):
            raise pyrado.PathErr(given=file)

        algo = joblib.load(file)

        if not isinstance(algo, Algorithm):
            raise pyrado.TypeErr(given=algo, expected_type=Algorithm)

        return algo
Exemplo n.º 3
0
def load_dict_from_yaml(yaml_file: str) -> dict:
    """
    Load a list of dicts (e.g. hyper-parameters) of an experiment from a YAML-file.

    :param yaml_file: path to the YAML-file that
    :return: a dict containing names as keys and a dict of parameter values
    """
    if not osp.isfile(yaml_file):
        raise pyrado.PathErr(given=yaml_file)

    with open(yaml_file, 'r') as yaml_file:
        data = yaml.load(yaml_file, Loader=AugmentedSafeLoader)
    return data
Exemplo n.º 4
0
def split_path_custom_common(path: Union[str, Experiment]) -> (str, str):
    """
    Split a path at the point where the machine-dependent and the machine-independent part can be separated.

    :param path: (complete) experiment path to be split
    :return: name of the base directory ('experiments' for `pyrado.EXP_DIR` or 'temp' for `pyrado.TEMP_DIR`) where the
             experiment was located, and machine-independent part of the path
    """
    def _split_path_at(path, keyword):
        """
        Split a path at the point where the machine-dependent and the machine-independent part can be separated.
        In general, the paths look like this
        `/CUSTOM_FOR_EVERY_MACHINE/SimuRLacra/Pyrado/pyrado/../data/CUSTOM_FOR_EVERY_EXPERIMENT'`
        Thus, we look for the first occurrence of the word 'data'.

        :param path: (complete) experiment path to be split
        :param keyword: keyword to split the path after
        :return: part of the path until 'data/, and machine-independent part of the path
        """
        if isinstance(path, (Experiment, os.PathLike)):
            path = os.fspath(
                path)  # convert Experiment to PathLike a.k.a. string
        # Convert the PathLike a.k.a. string into a pathlib Path object
        path = Path(path)
        # Search for the keyword in the individual parts of the path
        idx = path.parts.index(keyword) if keyword in path.parts else -1
        if idx == -1:
            # The keyword was not found in the path
            return None, None
        else:
            idx += +1  # +1 for the actual keyword
            return osp.join(*path.parts[:idx]), osp.join(*path.parts[idx:])

    # First try to split at pyrado.EXP_DIR
    custom, common = _split_path_at(path, keyword='experiments')
    if custom is None or common is None:
        # If that did not work, try to split at pyrado.TEMP_DIR
        custom, common = _split_path_at(path, keyword='temp')
    if custom is None or common is None:
        # If that did not work, try to split at the pytest's temporary path
        custom, common = _split_path_at(
            path, keyword='tmp'
        )  # actually they are reversed, but we don't care for tests
    if custom is None or common is None:
        # If that also did not work, there is sth wrong
        raise pyrado.PathErr(
            msg=
            'Failed to split the path between the machine-dependent and machine-independent part.'
        )

    return custom, common
Exemplo n.º 5
0
    def __setstate__(self, state):
        # Assemble the directory on unpickling
        self.__dict__ = state
        common_part = state["_save_dir_common"]

        # First, try if it has been split at pyrado.EXP_DIR
        self._save_dir = osp.join(pyrado.EXP_DIR, common_part)
        if not osp.isdir(self._save_dir):
            # If that did not work, try if it has been split at pyrado.TEMP_DIR
            self._save_dir = osp.join(pyrado.TEMP_DIR, common_part)
            if not osp.isdir(self._save_dir):
                # If that did not work, try if it has been split at the pytest's temporary path
                self._save_dir = osp.join("/tmp", common_part)
                if not osp.isdir(self._save_dir):
                    raise pyrado.PathErr(given=self._save_dir)
Exemplo n.º 6
0
    def __setstate__(self, state):
        common_part = state["dir_common"]

        # First, try if it has been split at pyrado.EXP_DIR
        self.dir = osp.join(pyrado.EXP_DIR, common_part)
        if not osp.isdir(self.dir):
            # If that did not work, try if it has been split at pyrado.TEMP_DIR
            self.dir = osp.join(pyrado.TEMP_DIR, common_part)
            if not osp.isdir(self.dir):
                # If that did not work, try if it has been split at the pytest's temporary path
                self.dir = osp.join("/tmp", common_part)
                if not osp.isdir(self.dir):
                    raise pyrado.PathErr(given=self.dir)

        self.step = state["step"]
        self.writer = SummaryWriter(log_dir=self.dir)
Exemplo n.º 7
0
    def __setstate__(self, state):
        common_part = state["file_common"]

        # First, try if it has been split at pyrado.EXP_DIR
        self.file = osp.join(pyrado.EXP_DIR, common_part)
        if not osp.isfile(self.file):
            # If that did not work, try if it has been split at pyrado.TEMP_DIR
            self.file = osp.join(pyrado.TEMP_DIR, common_part)
            if not osp.isfile(self.file):
                # If that did not work, try if it has been split at the pytest's temporary path
                self.file = osp.join("/tmp", common_part)
                if not osp.isfile(self.file):
                    raise pyrado.PathErr(given=self.file)

        self._fd = open(self.file, "a")
        self._writer = csv.writer(self._fd)
Exemplo n.º 8
0
def load_rollouts_from_dir(
    ex_dir: str,
    key: Optional[str] = "rollout",
    file_exts: Tuple[str] = ("pt", "pkl")
) -> Tuple[List[StepSequence], List[str]]:
    """
    Crawl through the given directory, sort the files, and load all rollouts, i.e. all files that include the key.

    :param ex_dir: directory, e.g. and experiment folder
    :param key: word or part of a word that needs to the in the name of a file for it to be loaded
    :param file_exts: file extensions to be considered for loading
    :return: list of loaded rollouts, and list of file names without extension
    """
    if not osp.isdir(ex_dir):
        raise pyrado.PathErr(given=ex_dir)
    if not isinstance(key, str):
        raise pyrado.TypeErr(given=key, expected_type=str)
    if not is_iterable(file_exts):
        raise pyrado.TypeErr(given=file_exts, expected_type=Iterable)

    rollouts = []
    names = []
    for root, dirs, files in os.walk(ex_dir):
        dirs.clear()  # prevents walk() from going into subdirectories
        natural_sort(files)
        for f in files:
            f_ext = f[f.rfind(".") + 1:]
            if key in f and f_ext in file_exts:
                name = f[:f.rfind(".")]
                names.append(name)
                rollouts.append(pyrado.load(f"{name}.{f_ext}", load_dir=root))

    if not rollouts:
        raise pyrado.ValueErr(msg="No rollouts have been found!")

    if isinstance(rollouts[0], list):
        if not check_all_types_equal(rollouts):
            raise pyrado.TypeErr(
                msg=
                "Some rollout savings contain lists of rollouts, others don't!"
            )
        # The rollout files contain lists of rollouts, flatten them
        rollouts = list(itertools.chain(*rollouts))

    return rollouts, names
Exemplo n.º 9
0
    def __init__(
        self,
        rollouts_dir: str,
        embedding: Embedding,
        num_segments: int = None,
        len_segments: int = None,
        rand_init_rollout: bool = True,
    ):
        """
        Constructor

        :param rollouts_dir: directory where to find the of pre-recorded rollouts
        :param num_segments: number of segments in which the rollouts are split into. For every segment, the initial
                             state of the simulation is reset, and thus for every set the features of the trajectories
                             are computed separately. Either specify `num_segments` or `len_segments`.
        :param embedding: embedding used for pre-processing the data before (later) passing it to the posterior
        :param len_segments: length of the segments in which the rollouts are split into. For every segment, the initial
                            state of the simulation is reset, and thus for every set the features of the trajectories
                            are computed separately. Either specify `num_segments` or `len_segments`.
        :param rand_init_rollout: if `True`, chose the first rollout at random, and then cycle through the list
        """
        if not os.path.isdir(rollouts_dir):
            raise pyrado.PathErr(given=rollouts_dir)

        Serializable._init(self, locals())

        super().__init__(None, None, embedding, num_segments, len_segments)

        # Crawl through the directory and load every file that starts with the word rollout
        rollouts_rec = []
        for root, dirs, files in os.walk(rollouts_dir):
            dirs.clear()  # prevents walk() from going into subdirectories
            rollouts_rec = [pyrado.load(name=f, load_dir=root) for f in files if f.startswith("rollout")]
            check_all_lengths_equal(rollouts_rec)
        if not rollouts_rec:
            raise pyrado.ValueErr(msg="No rollouts have been found!")

        self.rollouts_dir = rollouts_dir
        self.rollouts_rec = rollouts_rec
        self._ring_idx = np.random.randint(0, len(rollouts_rec)) if rand_init_rollout else 0
        self._set_action_field(self.rollouts_rec)
Exemplo n.º 10
0
def plot_policy(args, ex_dir):
    plt.rc("text", usetex=args.use_tex)

    # Get the experiment's directory to load from
    eval_parent_dir = osp.join(ex_dir, "eval_domain_grid")
    if not osp.isdir(eval_parent_dir):
        raise pyrado.PathErr(given=eval_parent_dir)

    if args.load_all:
        list_eval_dirs = [tmp[0] for tmp in os.walk(eval_parent_dir)][1:]
    else:
        list_eval_dirs = [
            osp.join(eval_parent_dir, "ENV_NAME", "ALGO_NAME"),
        ]

    # Loop over all evaluations
    for eval_dir in list_eval_dirs:
        assert osp.isdir(eval_dir)

        # Load the data
        pickle_file = osp.join(eval_dir, "df_sp_grid_1d.pkl")
        if not osp.isfile(pickle_file):
            print(f"{pickle_file} is not a file! Skipping...")
            continue
        df = pd.read_pickle(pickle_file)

        # Remove constant rows
        df = df.loc[:, df.apply(pd.Series.nunique) != 1]

        _plot_and_save(
            df,
            "gravity_const",
            r"$gravity_const$",
            nom_dp_value=9.81,
            save_figure=args.save,
            save_dir=eval_dir,
        )

    plt.show()
Exemplo n.º 11
0
from pyrado.environment_wrappers.domain_randomization import MetaDomainRandWrapper
from pyrado.domain_randomization.utils import print_domain_params
from pyrado.logger.experiment import ask_for_experiment, load_dict_from_yaml
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.argparser import get_argparser
from pyrado.utils.input_output import print_cbt
from pyrado.utils.data_types import RenderMode

if __name__ == '__main__':
    # Parse command line arguments
    args = get_argparser().parse_args()

    # Get the experiment's directory to load from
    ex_dir = ask_for_experiment()
    if not osp.isdir(ex_dir):
        raise pyrado.PathErr(given=ex_dir)

    # Load the environment randomizer
    env_sim = joblib.load(osp.join(ex_dir, 'env_sim.pkl'))
    hparam = load_dict_from_yaml(osp.join(ex_dir, 'hyperparams.yaml'))

    # Override the time step size if specified
    if args.dt is not None:
        env_sim.dt = args.dt

    # Crawl through the given directory and check how many init policies and candidates there are
    for root, dirs, files in os.walk(ex_dir):
        if args.load_all:
            found_policies = [p for p in files if p.endswith('_policy.pt')]
            found_cands = [c for c in files if c.endswith('_candidate.pt')]
        else:
Exemplo n.º 12
0
    # Get the experiment's directory to load from
    ex_dir = ask_for_experiment(
        hparam_list=args.show_hparams) if args.dir is None else args.dir

    # Find and load the Optuna data base
    study, study_name = None, None
    for file in os.listdir(ex_dir):
        if file.endswith(".db"):
            study_name = file[:
                              -3]  # the file is named like the study, just need to cut the ending
            storage = f"sqlite:////{osp.join(ex_dir, file)}"
            study = optuna.load_study(study_name, storage)
            break  # assuming there is only one database

    if study is None:
        pyrado.PathErr(msg=f"No Optuna study found in {ex_dir}!")

    # Extract the values of all trials (Optuna was set to solve a minimization problem)
    trials = [t for t in study.trials
              if t.value is not None]  # broken trials return None
    values = np.array([t.value for t in trials])
    if study.direction == StudyDirection.MINIMIZE:
        idcs_best = values.argsort()
    else:
        idcs_best = values.argsort()[::-1]

    # Print the best parameter configurations
    print_cbt(
        f"The best parameter set of study {study_name} was found in trial_{study.best_trial.number} with value "
        f"{study.best_value} (average return on independent test rollouts).",
        "g",
Exemplo n.º 13
0
def _main():
    # Parse command line arguments
    argparser = get_argparser()
    argparser.add_argument(
        "--average",
        action="store_true",
        help=
        "average over all loaded policies (default: False); create only a single heatmap",
    )
    argparser.add_argument(
        "--save_dir",
        help="if --average is set, the directory to save the plot to")
    args = argparser.parse_args()

    # Get the experiment's directory to load from
    if args.dir is None:
        ex_dirs = []
        while True:
            ex_dirs.append(
                ask_for_experiment(
                    show_hyper_parameters=args.show_hyperparameters,
                    max_display=50))
            if input("Ask for more (Y/n)? ") == "n":
                break
    else:
        ex_dirs = [d.strip() for d in args.dir.split(",")]
    eval_parent_dirs = []
    for ex_dir in ex_dirs:
        eval_parent_dir = osp.join(ex_dir, "eval_domain_grid")
        if not osp.isdir(eval_parent_dir):
            raise pyrado.PathErr(given=eval_parent_dir)
        eval_parent_dirs.append(eval_parent_dir)

    if args.load_all:
        list_eval_dirs = []
        for eval_parent_dir in eval_parent_dirs:
            list_eval_dirs += [tmp[0] for tmp in os.walk(eval_parent_dir)][1:]
    else:
        list_eval_dirs = [
            osp.join(eval_parent_dir, "ENV_NAME", "ALGO_NAME")
            for eval_parent_dir in eval_parent_dirs
        ]

    dataframes, eval_dirs = [], []
    for eval_dir in list_eval_dirs:
        assert osp.isdir(eval_dir)

        # Load the data
        pickle_file = osp.join(eval_dir, "df_sp_grid_2d.pkl")
        if not osp.isfile(pickle_file):
            print(f"{pickle_file} is not a file! Skipping...")
            continue
        df = pd.read_pickle(pickle_file)

        dataframes.append(df)
        eval_dirs.append(eval_dir)

    if args.average:
        _plot([sum(dataframes) / len(dataframes)], [args.save_dir], True)
    else:
        _plot(dataframes, eval_dirs, args.save)
Script to load the data from a real-world rollouts, written to a file by the RcsPySim DAtaLogger class.
"""
import os.path as osp

import pandas as pd

import pyrado
from pyrado.environments.rcspysim.mini_golf import MiniGolfIKSim, MiniGolfJointCtrlSim
from pyrado.sampling.step_sequence import StepSequence
from pyrado.utils.argparser import get_argparser

if __name__ == "__main__":
    # Parse command line arguments
    args = get_argparser().parse_args()
    if not osp.isfile(args.file):
        raise pyrado.PathErr(given=args.file)
    if args.dir is None:
        # Use the file's directory by default
        args.dir = osp.dirname(args.file)
    elif not osp.isdir(args.dir):
        raise pyrado.PathErr(given=args.dir)

    df = pd.read_csv(args.file)

    if args.env_name == MiniGolfIKSim.name:
        env = MiniGolfIKSim()
    elif args.env_name == MiniGolfJointCtrlSim.name:
        env = MiniGolfJointCtrlSim()
    else:
        raise NotImplementedError
Exemplo n.º 15
0
 def save_dir(self, save_dir: pyrado.PathLike):
     """Set the directory where the data is saved to."""
     if not osp.isdir(save_dir):
         raise pyrado.PathErr(given=save_dir)
     self._save_dir = save_dir
Exemplo n.º 16
0
import pyrado
from pyrado.algorithms.base import Algorithm
from pyrado.logger.step import CSVPrinter, TensorBoardPrinter
from pyrado.utils.argparser import get_argparser


if __name__ == '__main__':
    # Parse command line arguments
    parser = get_argparser()
    parser.add_argument('--new_ex_dir', type=str, nargs='?',
                        help="path to the directory where the experiment should be saved/moved to")
    args = parser.parse_args()

    if not osp.isdir(args.ex_dir):
        raise pyrado.PathErr(given=args.ex_dir)
    if args.new_ex_dir is None:
        raise pyrado.ValueErr(msg='Provide the path to the new experiment directory using --new_ex_dir')

    # Create the new directory and test it
    os.makedirs(args.new_ex_dir, exist_ok=True)
    if not osp.isdir(args.new_ex_dir):
        raise pyrado.PathErr(given=args.new_ex_dir)

    # Load the old algorithm including the loggers
    algo = Algorithm.load_snapshot(args.ex_dir)

    # Update all entries that contain information about where the experiment is stored
    algo.save_dir = args.new_ex_dir
    for printer in algo.logger.printers:
        if isinstance(printer, CSVPrinter):
Exemplo n.º 17
0
    def __init__(
        self,
        name: str,
        parent_dir: str,
        incl_pattern: str = None,
        excl_pattern: str = None,
        latest_evals_only: bool = False,
        eval_subdir_name: str = "evaluation",
        sort: bool = False,
    ):
        """
        Constructor

        :param name: label for the data, e.g. name of the algorithm
        :param parent_dir: path to the algorithm's directory
        :param incl_pattern: only include experiments if their names partially contain the include pattern
        :param excl_pattern: exclude experiments if their names do not even partially contain the exclude pattern
        :param latest_evals_only: if `True` only the very latest evaluation file is loaded to estimate the returns
        :param sort: sort the found experiments by name, i.e. by date
        """
        if not osp.isdir(parent_dir):
            raise pyrado.PathErr(given=parent_dir)
        if incl_pattern is not None and not isinstance(incl_pattern, str):
            raise pyrado.TypeErr(given=incl_pattern, expected_type=str)
        if excl_pattern is not None and not isinstance(excl_pattern, str):
            raise pyrado.TypeErr(given=excl_pattern, expected_type=str)

        self.name = name
        self.parent_dir = parent_dir
        self.incl_pattern = incl_pattern
        self.excl_pattern = excl_pattern
        self.latest_evals_only = latest_evals_only
        self.eval_subdir_name = eval_subdir_name

        # Include experiments
        self.matches = get_immediate_subdirs(parent_dir)
        if sort:
            self.matches = natural_sort(self.matches)

        if self.incl_pattern is not None:
            # Only include experiments if their names partially contain the include pattern
            self.matches = list(filter(lambda d: self.incl_pattern in d, self.matches))

        if self.excl_pattern is not None:
            # Exclude experiments if their names do not even partially contain the exclude pattern
            self.matches = list(filter(lambda d: self.excl_pattern not in d, self.matches))

        self._returns_est_per_ex = []
        self.returns_est = []
        cnt_nonexist_dirs = 0
        for match in self.matches:
            # Get the evaluation subdirectory
            eval_dir = osp.join(match, self.eval_subdir_name)

            if osp.exists(eval_dir):
                # Crawl through the experiment's evaluation directory
                rets = []  # empirical returns from the experiments
                num_samples = []  # number of samples per return estimate
                for root, dirs, files in os.walk(eval_dir):
                    files.sort(reverse=True)  # in case there are multiple evaluations
                    # Only include the latest evaluation found in the folder if flag is set
                    for f in files if not self.latest_evals_only else files[:1]:
                        if f.endswith(".npy"):
                            rets.append(np.load(osp.join(eval_dir, f)))
                            num_samples.append(len(rets))
                        elif f.endswith(".pt"):
                            rets.append(to.load(osp.join(eval_dir, f)).cpu().numpy())
                        else:
                            raise FileNotFoundError

            else:
                cnt_nonexist_dirs += 1

            # Store the estimated return per evaluation run (averaged over individual evaluations)
            self._returns_est_per_ex.append(np.mean(np.asarray(rets), axis=1))
            self.returns_est.extend(np.mean(np.asarray(rets), axis=1))

        # Print what has been loaded
        ex_names = ["..." + m[m.rfind("/") :] for m in self.matches]  # cut off everything until the experiment's name
        print(
            tabulate(
                [[ex_name, ret] for ex_name, ret in zip(ex_names, self._returns_est_per_ex)],
                headers=["Loaded directory", "Returns averaged per experiment"],
            )
        )

        if cnt_nonexist_dirs == 0:
            print_cbt("All evaluation sub-directories have been found.", "g")
        else:
            print_cbt(f"{cnt_nonexist_dirs} evaluation sub-directories have been missed.", "y")
Exemplo n.º 18
0
from pyrado.utils.order import get_immediate_subdirs, natural_sort

if __name__ == '__main__':
    # Parse command line arguments
    args = get_argparser().parse_args()
    plt.rc('text', usetex=args.use_tex)

    # Get the experiments' directories to load from
    if args.ex_dir is None:
        parent_dir = input(
            'Please enter the parent directory for the experiments to compare:\n'
        )
    else:
        parent_dir = args.ex_dir
    if not osp.isdir(parent_dir):
        raise pyrado.PathErr(parent_dir)
    dirs = get_immediate_subdirs(parent_dir)
    dirs = natural_sort(dirs)

    # Collect average and best returns per iteration
    df = pd.DataFrame()
    best_returns = []

    # Plot progress of each experiment
    fig, axs = plt.subplots(2, figsize=pyrado.figsize_IEEE_1col_18to10)
    for idx, d in enumerate(dirs):
        # Load an experiment's data
        file = os.path.join(d, 'progress.csv')
        data = read_csv_w_replace(file)

        # Append one column per experiment
Exemplo n.º 19
0
    def load_posterior(
        load_dir: pyrado.PathLike,
        idx_iter: int = -1,
        idx_round: int = -1,
        obj: Optional[Any] = None,
        verbose: bool = False,
    ) -> Optional[DirectPosterior]:
        """
        Load the posterior of a given iteration (and round).

        :param load_dir: experiment's directory to crawl through
        :param idx_iter: iteration to load, to load the latest pass -1
        :param idx_round: round to load, to load the latest pass -1, ignored if the experiment was not multi-round
        :param obj: object for state dict loading, forwarded to `pyrado.load()`, by default no state dict loading
        :param verbose: if `True`, print the path of what has been loaded, forwarded to `pyrado.load()`
        :return: loaded sbi posterior, or `None` if there is no posterior with the given iteration / round index
        """
        if not os.path.isdir(load_dir):
            raise pyrado.PathErr(given=load_dir)
        if not isinstance(idx_iter, int):
            raise pyrado.TypeErr(given=idx_iter, expected_type=int)
        if not isinstance(idx_round, int):
            raise pyrado.TypeErr(given=idx_round, expected_type=int)

        if idx_iter == -1:
            # Check what is the latest iteration
            cnt_iter_max = -1
            for root, dirs, files in os.walk(load_dir):
                dirs.clear()  # prevents walk() from going into subdirectories
                for f in files:
                    if f.startswith("iter_") and f.endswith("_posterior.pt"):
                        cnt_iter = int(f[f.find("iter_") + len("iter_")])
                        cnt_iter_max = cnt_iter if cnt_iter > cnt_iter_max else cnt_iter_max
            idx_iter = cnt_iter_max

        # Check if the experiment was run in a multi-round setting
        multi_round_setting = False
        for root, dirs, files in os.walk(load_dir):
            dirs.clear()  # prevents walk() from going into subdirectories
            for f in files:
                if f.startswith(f"iter_") and "round" in f:
                    multi_round_setting = True
                    break

        if multi_round_setting:
            if idx_round == -1:
                # Check what is the latest round
                cnt_round_max = -1
                for root, dirs, files in os.walk(load_dir):
                    dirs.clear(
                    )  # prevents walk() from going into subdirectories
                    for f in files:
                        if "round" in f and f.endswith("_posterior.pt"):
                            cnt_round = int(f[f.find("round_") +
                                              len("round_")])
                            cnt_round_max = cnt_round if cnt_round > cnt_round_max else cnt_round_max
                idx_round = cnt_round_max

        # Check before loading, and print a warning message if there can not be a posterior with the obtained indices
        if idx_iter == -1:
            print_cbt(
                f"Invalid iteration index {idx_iter}! Check if there is a posterior in {load_dir}.",
                "r")
        if idx_round == -1 and multi_round_setting:
            print_cbt(
                f"Invalid round index {idx_round}! Check if there is a posterior in {load_dir}.",
                "r")

        # Load the current posterior
        str_round = f"_round_{idx_round}" if multi_round_setting else ""
        try:
            posterior = pyrado.load(
                name=f"iter_{idx_iter}{str_round}_posterior.pt",
                load_dir=load_dir,
                obj=obj,
                verbose=verbose)
        except FileNotFoundError:
            print_cbt("No posterior was loaded.", "y")
            posterior = None

        return posterior
Exemplo n.º 20
0
        {"varied_params": list(param_spec.keys())},
        {"num_rpp": args.num_rollouts_per_config, "seed": args.seed},
        {"metrics": dict_arraylike_to_float(metrics)},
        save_dir=save_dir,
        file_name="summary",
    )
    pyrado.save(df, f"df_sp_grid_{len(param_spec) if param_spec_dim is None else param_spec_dim}d.pkl", save_dir)


if __name__ == "__main__":
    # Parse command line arguments
    g_args = get_argparser().parse_args()

    if g_args.load_all:
        if not g_args.dir:
            raise pyrado.PathErr(msg="load_all was set but no dir was given")
        if not os.path.isdir(g_args.dir):
            raise pyrado.PathErr(given=g_args.dir)

        g_ex_dirs = [tmp[0] for tmp in os.walk(g_args.dir, followlinks=True) if "policy.pt" in tmp[2]]

    elif g_args.dir is None:
        g_ex_dirs = [ask_for_experiment(hparam_list=g_args.show_hyperparameters, max_display=50)]

    else:
        g_ex_dirs = [g_args.dir]

    print(f"Evaluating all of {g_ex_dirs}.")
    for g_ex_dir in g_ex_dirs:
        print(f"Evaluating {g_ex_dir}.")
        evaluate_policy(g_args, g_ex_dir)
Exemplo n.º 21
0
            "gravity_const",
            r"$gravity_const$",
            nom_dp_value=9.81,
            save_figure=args.save,
            save_dir=eval_dir,
        )

    plt.show()


if __name__ == "__main__":
    # Parse command line arguments
    g_args = get_argparser().parse_args()
    if g_args.load_all and g_args.dir:
        if not os.path.isdir(g_args.dir):
            raise pyrado.PathErr(given=g_args.dir)

        g_ex_dirs = [
            tmp[0] for tmp in os.walk(g_args.dir) if "policy.pt" in tmp[2]
        ]
    elif g_args.dir is None:
        g_ex_dirs = [
            ask_for_experiment(
                show_hyper_parameters=g_args.show_hyperparameters,
                max_display=50)
        ]
    else:
        g_ex_dirs = [g_args.dir]

    print(f"Plotting all of {g_ex_dirs}.")
    for g_ex_dir in g_ex_dirs:
Exemplo n.º 22
0
    return loss_trn


if __name__ == "__main__":
    # Parse command line arguments
    args = get_argparser().parse_args()

    if args.dir is None:
        ex_dir = setup_experiment("hyperparams", TSPred.name, f"{TSPred.name}_{ADNPolicy.name}")
        study_dir = osp.join(pyrado.TEMP_DIR, ex_dir)
        print_cbt(f"Starting a new Optuna study.", "c", bright=True)
    else:
        study_dir = args.dir
        if not osp.isdir(study_dir):
            raise pyrado.PathErr(given=study_dir)
        print_cbt(f"Continuing an existing Optuna study.", "c", bright=True)

    name = f"{TSPred.name}_{TSPred.name}_{ADNPolicy.name}"
    study = optuna.create_study(
        study_name=name,
        storage=f"sqlite:////{osp.join(study_dir, f'{name}.db')}",
        direction="maximize",
        load_if_exists=True,
    )

    # Start optimizing
    study.optimize(functools.partial(train_and_eval, study_dir=study_dir, seed=args.seed), n_trials=100, n_jobs=16)

    # Save the best hyper-parameters
    save_dicts_to_yaml(
Exemplo n.º 23
0
                fig_cb.savefig(osp.join(save_dir, f'cb-{name}.pdf'))


if __name__ == '__main__':
    # Parse command line arguments
    args = get_argparser().parse_args()
    plt.rc('text', usetex=args.use_tex)

    # Commonly scale the colorbars of all plots
    accnorm = AccNorm()

    # Get the experiment's directory to load from
    ex_dir = ask_for_experiment() if args.ex_dir is None else args.ex_dir
    eval_parent_dir = osp.join(exp_dir, 'eval_domain_grid')
    if not osp.isdir(eval_parent_dir):
        raise pyrado.PathErr(given=eval_parent_dir)

    if args.load_all:
        list_eval_dirs = [tmp[0] for tmp in os.walk(eval_parent_dir)][1:]
    else:
        list_eval_dirs = [
            osp.join(eval_parent_dir, 'ENV_NAME', 'ALGO_NAME'),
        ]

    # Loop over all evaluations
    for eval_dir in list_eval_dirs:
        assert osp.isdir(eval_dir)

        # Load the data
        df = pd.read_pickle(osp.join(eval_dir, 'df_sp_grid_nd.pkl'))
Exemplo n.º 24
0
from pyrado.utils.experiments import read_csv_w_replace
from pyrado.utils.ordering import get_immediate_subdirs, natural_sort

if __name__ == "__main__":
    # Parse command line arguments
    args = get_argparser().parse_args()
    plt.rc("text", usetex=args.use_tex)

    # Get the experiments' directories to load from
    if args.dir is None:
        parent_dir = input(
            "Please enter the directory for the experiments to compare:\n")
    else:
        parent_dir = args.dir
    if not osp.isdir(parent_dir):
        raise pyrado.PathErr(given=parent_dir)
    dirs = get_immediate_subdirs(parent_dir)
    dirs = natural_sort(dirs)

    # Collect average and best returns per iteration
    df = pd.DataFrame()
    best_returns = []

    # Plot progress of each experiment
    fig, axs = plt.subplots(2, figsize=(12, 8))
    for idx, d in enumerate(dirs):
        # Load an experiment's data
        file = os.path.join(d, "progress.csv")
        data = read_csv_w_replace(file)

        # Append one column per experiment