from fannypack.utils import pdb_safety_net from dynamics_learning.custom.lr_functions import lr5 from dynamics_learning.data.datasets import MITPushDatasetConfig from dynamics_learning.networks.baseline.planet_baseline import ( OverShoot, PlaNetBaselineFilterConfig, ) from dynamics_learning.networks.estimator import EstimatorConfig from dynamics_learning.training.configs import ExpConfig from dynamics_learning.training.experiments import train model_config: EstimatorConfig exp_config: ExpConfig pdb_safety_net() hyperparameter_defaults = dict(batch_size=16, learning_rate=1e-3, epochs=400, latent_dim=16) HyperParameterConfig = namedtuple("HyperParameterConfig", list(hyperparameter_defaults.keys())) hy_config = HyperParameterConfig(**hyperparameter_defaults) dataset_config = MITPushDatasetConfig( traj_len=50, # 10 for cylinder, 20 for mit push num_viz_trajectories=20, pixel_res=256, raw=True, cond=False,
def evaluate( exp_config: ExpConfig, dataset_config: DatasetConfig, experiment_name: Optional[str] = None, save_summary: bool = False, debug: bool = False, ) -> None: """Evaluates a network. Parameters ---------- exp_config : ExpConfig Specifies all information regarding the experiment. experiment_name : str Name of experiment to load. save_summary : bool, default=False Flag indicating whether to visualize a summary of the evaluation. debug : bool, default=False Flag indicating whether to evaluate in debug mode. """ pdb_safety_net() # set random seed for repeatability np.random.seed(0) torch.manual_seed(0) check_valid(exp_config) # build model estimator = exp_config.model.create() # set up buddy # some hacks to get around serialization # TODO figure out a better way to do this metadata_dir = "metadata" dir = tempfile.gettempdir() assert exp_config.name is not None buddy = Buddy(exp_config.name, estimator, optimizer_type="adam", metadata_dir=dir) buddy._metadata_dir = metadata_dir # load the checkpoint if experiment_name is None: buddy.load_checkpoint() else: buddy.load_checkpoint(label="final", experiment_name=experiment_name) # provide network diagnostics. print() print("Model Architecture:") print(estimator) print(f"Total parameters: {count_parameters(estimator)}") print(f"Total trainable parameters: {count_parameters(estimator, trainable=True)}") print(f"Latent dim size: {exp_config.model.latent_dim}") print() # model performance if debug: dataset_config = replace(dataset_config, num_viz_trajectories=3) dataset = dataset_config.create() vis_data = dataset.get_viz_data(buddy.device) # reporting eval losses: (vis_data, filter_times, predict_times) start_time = time.time() if isinstance(dataset_config, MITPushDatasetConfig): prediction_points = 25 else: prediction_points = 50 estimator.eval_loss(vis_data, 5, prediction_points) estimator.eval_loss(vis_data, 25, prediction_points) end_time = time.time() print(f"Total evaluation time: {end_time - start_time}") # summary plots if save_summary: assert exp_config.name is not None estimator.summary_plot(vis_data, exp_config.name, debug=debug)