def train_dynamics( config, train_dir, # str: directory to save output ): # set random seed for reproduction set_seed(config['train']['random_seed']) st_epoch = config['train'][ 'resume_epoch'] if config['train']['resume_epoch'] > 0 else 0 tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w') tensorboard_dir = os.path.join(train_dir, "tensorboard") if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) writer = SummaryWriter(log_dir=tensorboard_dir) # save the config save_yaml(config, os.path.join(train_dir, "config.yaml")) print(config) # load the data episodes = load_episodes_from_config(config) action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config( config) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: print("Loading data for %s" % phase) datasets[phase] = MultiEpisodeDataset( config, action_function=action_function, observation_function=observation_function, episodes=episodes, phase=phase) dataloaders[phase] = DataLoader( datasets[phase], batch_size=config['train']['batch_size'], shuffle=True if phase == 'train' else False, num_workers=config['train']['num_workers']) data_n_batches[phase] = len(dataloaders[phase]) use_gpu = torch.cuda.is_available() # compute normalization parameters if not starting from pre-trained network . . . ''' define model for dynamics prediction ''' model_dy = None if config['train']['resume_epoch'] >= 0: # if resume from a pretrained checkpoint state_dict_path = os.path.join( train_dir, 'net_dy_epoch_%d_iter_%d_state_dict.pth' % (config['train']['resume_epoch'], config['train']['resume_iter'])) print("Loading saved ckp from %s" % state_dict_path) # why is this needed if we already do torch.load??? model_dy.load_state_dict(torch.load(state_dict_path)) # don't we also need to load optimizer state from pre-trained??? else: # not starting from pre-trained create the network and compute the # normalization parameters model_dy = DynaNetMLP(config) # compute normalization params stats = datasets["train"].compute_dataset_statistics() obs_mean = stats['observations']['mean'] obs_std = stats['observations']['std'] observations_normalizer = DataNormalizer(obs_mean, obs_std) action_mean = stats['actions']['mean'] action_std = stats['actions']['std'] actions_normalizer = DataNormalizer(action_mean, action_std) model_dy.action_normalizer = actions_normalizer model_dy.state_normalizer = observations_normalizer print("model_dy #params: %d" % count_trainable_parameters(model_dy)) # criterion criterionMSE = nn.MSELoss() # optimizer params = model_dy.parameters() optimizer = optim.Adam(params, lr=config['train']['lr'], betas=(config['train']['adam_beta1'], 0.999)) scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.9, patience=10, verbose=True) if use_gpu: model_dy = model_dy.cuda() best_valid_loss = np.inf global_iteration = 0 epoch_counter_external = 0 try: for epoch in range(st_epoch, config['train']['n_epoch']): phases = ['train', 'valid'] epoch_counter_external = epoch writer.add_scalar("Training Params/epoch", epoch, global_iteration) for phase in phases: model_dy.train(phase == 'train') meter_loss_rmse = AverageMeter() # bar = ProgressBar(max_value=data_n_batches[phase]) loader = dataloaders[phase] for i, data in enumerate(loader): global_iteration += 1 with torch.set_grad_enabled(phase == 'train'): n_his, n_roll = config['train']['n_history'], config[ 'train']['n_rollout'] n_samples = n_his + n_roll if config['env']['type'] in ['PusherSlider']: states = data['observations'] actions = data['actions'] if use_gpu: states = states.cuda() actions = actions.cuda() # states, actions = data assert states.size(1) == n_samples # normalize states and actions once for entire rollout states = model_dy.state_normalizer.normalize( states) actions = model_dy.action_normalizer.normalize( actions) B = states.size(0) loss_mse = 0. # state_cur: B x n_his x state_dim state_cur = states[:, :n_his] for j in range(n_roll): state_des = states[:, n_his + j] # action_cur: B x n_his x action_dim action_cur = actions[:, j:j + n_his] if actions is not None else None # state_pred: B x state_dim # state_cur: B x n_his x state_dim # state_pred: B x state_dim state_pred = model_dy(state_cur, action_cur) loss_mse_cur = criterionMSE( state_pred, state_des) loss_mse += loss_mse_cur / n_roll # update state_cur # state_pred.unsqueeze(1): B x 1 x state_dim state_cur = torch.cat([ state_cur[:, 1:], state_pred.unsqueeze(1) ], 1) meter_loss_rmse.update(np.sqrt(loss_mse.item()), B) if phase == 'train': optimizer.zero_grad() loss_mse.backward() optimizer.step() if i % config['train']['log_per_iter'] == 0: log = '%s [%d/%d][%d/%d] LR: %.6f' % ( phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase], get_lr(optimizer)) log += ', rmse: %.6f (%.6f)' % (np.sqrt( loss_mse.item()), meter_loss_rmse.avg) print(log) # log data to tensorboard # only do it once we have reached 500 iterations if global_iteration > 500: writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration) writer.add_scalar("Loss/train", loss_mse.item(), global_iteration) writer.add_scalar("RMSE average loss/train", meter_loss_rmse.avg, global_iteration) if phase == 'train' and i % config['train'][ 'ckp_per_iter'] == 0: save_model( model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i)) log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % ( phase, epoch, config['train']['n_epoch'], meter_loss_rmse.avg, best_valid_loss) print(log) if phase == 'valid': scheduler.step(meter_loss_rmse.avg) writer.add_scalar("RMSE average loss/valid", meter_loss_rmse.avg, global_iteration) if meter_loss_rmse.avg < best_valid_loss: best_valid_loss = meter_loss_rmse.avg save_model(model_dy, '%s/net_best_dy' % (train_dir)) writer.flush() # flush SummaryWriter events to disk except KeyboardInterrupt: # save network if we have a keyboard interrupt save_model( model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external)) writer.flush() # flush SummaryWriter events to disk
loader = pil_loader trans_to_tensor = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) ''' store results ''' os.system('mkdir -p ' + args.evalf) log_path = os.path.join(args.evalf, 'log.txt') tee = Tee(log_path, 'w') def evaluate(roll_idx, video=True, image=True): eval_path = os.path.join(args.evalf, str(roll_idx)) n_split = 3 split = 4 if image: os.system('mkdir -p ' + eval_path) print('Save images to %s' % eval_path) if video: video_path = eval_path + '.avi'
def train_dynamics(config, train_dir, # str: directory to save output multi_episode_dict, # multi_episode_dict ): use_precomputed_keypoints = config['dataset']['visual_observation']['enabled'] and config['dataset']['visual_observation']['descriptor_keypoints'] # set random seed for reproduction set_seed(config['train']['random_seed']) st_epoch = config['train']['resume_epoch'] if config['train']['resume_epoch'] > 0 else 0 tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w') tensorboard_dir = os.path.join(train_dir, "tensorboard") if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) writer = SummaryWriter(log_dir=tensorboard_dir) # save the config save_yaml(config, os.path.join(train_dir, "config.yaml")) action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config(config) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: print("Loading data for %s" % phase) datasets[phase] = MultiEpisodeDataset(config, action_function=action_function, observation_function=observation_function, episodes=multi_episode_dict, phase=phase) dataloaders[phase] = DataLoader( datasets[phase], batch_size=config['train']['batch_size'], shuffle=True if phase == 'train' else False, num_workers=config['train']['num_workers'], drop_last=True) data_n_batches[phase] = len(dataloaders[phase]) use_gpu = torch.cuda.is_available() # compute normalization parameters if not starting from pre-trained network . . . ''' define model for dynamics prediction ''' model_dy = build_visual_dynamics_model(config) K = config['vision_net']['num_ref_descriptors'] print("model_dy.vision_net._reference_descriptors.shape", model_dy.vision_net._ref_descriptors.shape) print("model_dy.vision_net.descriptor_dim", model_dy.vision_net.descriptor_dim) print("model_dy #params: %d" % count_trainable_parameters(model_dy)) camera_name = config['vision_net']['camera_name'] W = config['env']['rgbd_sensors']['sensor_list'][camera_name]['width'] H = config['env']['rgbd_sensors']['sensor_list'][camera_name]['height'] diag = np.sqrt(W**2 + H**2) # use this to scale the loss # sample reference descriptors unless using precomputed keypoints if not use_precomputed_keypoints: # sample reference descriptors episode_names = list(datasets["train"].episode_dict.keys()) episode_names.sort() episode_name = episode_names[0] episode = datasets["train"].episode_dict[episode_name] episode_idx = 0 camera_name = config["vision_net"]["camera_name"] image_data = episode.get_image_data(camera_name, episode_idx) des_img = torch.Tensor(image_data['descriptor']) mask_img = torch.Tensor(image_data['mask']) ref_descriptor_dict = sample_descriptors(des_img, mask_img, config['vision_net']['num_ref_descriptors']) model_dy.vision_net._ref_descriptors.data = ref_descriptor_dict['descriptors'] model_dy.vision_net.reference_image = image_data['rgb'] model_dy.vision_net.reference_indices = ref_descriptor_dict['indices'] else: metadata_file = os.path.join(get_data_root(), config['dataset']['descriptor_keypoints_dir'], 'metadata.p') descriptor_metadata = load_pickle(metadata_file) # [32, 2] ref_descriptors = torch.Tensor(descriptor_metadata['ref_descriptors']) # [K, 2] ref_descriptors = ref_descriptors[:K] model_dy.vision_net._ref_descriptors.data = ref_descriptors model_dy.vision_net._ref_descriptors_metadata = descriptor_metadata # this is just a sanity check assert model_dy.vision_net.num_ref_descriptors == K print("reference_descriptors", model_dy.vision_net._ref_descriptors) # criterion criterionMSE = nn.MSELoss() l1Loss = nn.L1Loss() # optimizer params = model_dy.parameters() lr = float(config['train']['lr']) optimizer = optim.Adam(params, lr=lr, betas=(config['train']['adam_beta1'], 0.999)) # setup scheduler sc = config['train']['lr_scheduler'] scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=sc['factor'], patience=sc['patience'], threshold_mode=sc['threshold_mode'], cooldown= sc['cooldown'], verbose=True) if use_gpu: print("using gpu") model_dy = model_dy.cuda() print("model_dy.vision_net._ref_descriptors.device", model_dy.vision_net._ref_descriptors.device) print("model_dy.vision_net #params: %d" %(count_trainable_parameters(model_dy.vision_net))) best_valid_loss = np.inf global_iteration = 0 epoch_counter_external = 0 try: for epoch in range(st_epoch, config['train']['n_epoch']): phases = ['train', 'valid'] epoch_counter_external = epoch writer.add_scalar("Training Params/epoch", epoch, global_iteration) for phase in phases: model_dy.train(phase == 'train') meter_loss_rmse = AverageMeter() step_duration_meter = AverageMeter() # bar = ProgressBar(max_value=data_n_batches[phase]) loader = dataloaders[phase] for i, data in enumerate(loader): step_start_time = time.time() global_iteration += 1 with torch.set_grad_enabled(phase == 'train'): n_his, n_roll = config['train']['n_history'], config['train']['n_rollout'] n_samples = n_his + n_roll if DEBUG: print("global iteration: %d" %(global_iteration)) # visual_observations = data['visual_observations'] visual_observations_list = data['visual_observations_list'] observations = data['observations'] actions = data['actions'] if use_gpu: observations = observations.cuda() actions = actions.cuda() # states, actions = data assert actions.size(1) == n_samples B = actions.size(0) loss_mse = 0. # compute the output of the visual model for all timesteps visual_model_output_list = [] for visual_obs in visual_observations_list: # visual_obs is a dict containing observation for a single # time step (of course across a batch however) # visual_obs[<camera_name>]['rgb_tensor'] has shape [B, 3, H, W] # probably need to cast input to cuda dynamics_net_input = None if use_precomputed_keypoints: # note precomputed descriptors stored on disk are of size # K = 32. We need to trim it down to the appropriate size # [B, K_disk, 2] where K_disk is num keypoints on disk keypoints = visual_obs[camera_name]['descriptor_keypoints'] # [B, 32, 2] where K is num keypoints keypoints = keypoints[:,:K] if DEBUG: print("keypoints.shape", keypoints.shape) dynamics_net_input = keypoints.flatten(start_dim=1) else: out_dict = model_dy.vision_net.forward(visual_obs) # [B, vision_model_out_dim] dynamics_net_input = out_dict['dynamics_net_input'] visual_model_output_list.append(dynamics_net_input) # concatenate this into a tensor # [B, n_samples, vision_model_out_dim] visual_model_output = torch.stack(visual_model_output_list, dim=1) # cast this to float so it can be concatenated below visual_model_output = visual_model_output.type_as(observations) if DEBUG: print('visual_model_output.shape', visual_model_output.shape) print("observations.shape", observations.shape) print("actions.shape", actions.shape) # states is gotten by concatenating visual_observations and observations # [B, n_samples, vision_model_out_dim + obs_dim] states = torch.cat((visual_model_output, observations), dim=-1) # state_cur: B x n_his x state_dim state_cur = states[:, :n_his] if DEBUG: print("states.shape", states.shape) for j in range(n_roll): if DEBUG: print("n_roll j: %d" %(j)) state_des = states[:, n_his + j] # action_cur: B x n_his x action_dim action_cur = actions[:, j : j + n_his] if actions is not None else None # state_pred: B x state_dim # state_pred: B x state_dim input = {'observation': state_cur, 'action': action_cur, } if DEBUG: print("state_cur.shape", state_cur.shape) print("action_cur.shape", action_cur.shape) state_pred = model_dy.dynamics_net(input) # normalize by diag to ensure the loss is in [0,1] range loss_mse_cur = criterionMSE(state_pred/diag, state_des/diag) loss_mse += loss_mse_cur / n_roll # l1Loss loss_l1 = l1Loss(state_pred, state_des) # update state_cur # state_pred.unsqueeze(1): B x 1 x state_dim # state_cur: B x n_his x state_dim state_cur = torch.cat([state_cur[:, 1:], state_pred.unsqueeze(1)], 1) meter_loss_rmse.update(np.sqrt(loss_mse.item()), B) step_duration_meter.update(time.time() - step_start_time) if phase == 'train': optimizer.zero_grad() loss_mse.backward() optimizer.step() if (i % config['train']['log_per_iter'] == 0) or (global_iteration % config['train']['log_per_iter'] == 0): log = '%s [%d/%d][%d/%d] LR: %.6f' % ( phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase], get_lr(optimizer)) log += ', rmse: %.6f (%.6f)' % ( np.sqrt(loss_mse.item()), meter_loss_rmse.avg) log += ', step time %.6f' %(step_duration_meter.avg) step_duration_meter.reset() print(log) # log data to tensorboard # only do it once we have reached 100 iterations if global_iteration > 100: writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration) writer.add_scalar("Loss_MSE/%s" %(phase), loss_mse.item(), global_iteration) writer.add_scalar("L1/%s" %(phase), loss_l1.item(), global_iteration) writer.add_scalar("L1_fraction/%s" %(phase), loss_l1.item()/diag, global_iteration) writer.add_scalar("RMSE average loss/%s" %(phase), meter_loss_rmse.avg, global_iteration) if phase == 'train' and i % config['train']['ckp_per_iter'] == 0: save_model(model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i)) log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % ( phase, epoch, config['train']['n_epoch'], meter_loss_rmse.avg, best_valid_loss) print(log) if phase == 'valid': if config['train']['lr_scheduler']['enabled']: scheduler.step(meter_loss_rmse.avg) # print("\nPhase == valid") # print("meter_loss_rmse.avg", meter_loss_rmse.avg) # print("best_valid_loss", best_valid_loss) if meter_loss_rmse.avg < best_valid_loss: best_valid_loss = meter_loss_rmse.avg save_model(model_dy, '%s/net_best_dy' % (train_dir)) writer.flush() # flush SummaryWriter events to disk except KeyboardInterrupt: # save network if we have a keyboard interrupt save_model(model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external)) writer.flush() # flush SummaryWriter events to disk
def train_dynamics( config, train_dir, # str: directory to save output multi_episode_dict=None, spatial_descriptors_idx=None, metadata=None, spatial_descriptors_data=None, ): assert multi_episode_dict is not None # assert spatial_descriptors_idx is not None # set random seed for reproduction set_seed(config['train']['random_seed']) st_epoch = config['train'][ 'resume_epoch'] if config['train']['resume_epoch'] > 0 else 0 tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w') tensorboard_dir = os.path.join(train_dir, "tensorboard") if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) writer = SummaryWriter(log_dir=tensorboard_dir) # save the config save_yaml(config, os.path.join(train_dir, "config.yaml")) if metadata is not None: save_pickle(metadata, os.path.join(train_dir, 'metadata.p')) if spatial_descriptors_data is not None: save_pickle(spatial_descriptors_data, os.path.join(train_dir, 'spatial_descriptors.p')) training_stats = dict() training_stats_file = os.path.join(train_dir, 'training_stats.yaml') # load the data action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config( config) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: print("Loading data for %s" % phase) datasets[phase] = MultiEpisodeDataset( config, action_function=action_function, observation_function=observation_function, episodes=multi_episode_dict, phase=phase) dataloaders[phase] = DataLoader( datasets[phase], batch_size=config['train']['batch_size'], shuffle=True if phase == 'train' else False, num_workers=config['train']['num_workers'], drop_last=True) data_n_batches[phase] = len(dataloaders[phase]) use_gpu = torch.cuda.is_available() # compute normalization parameters if not starting from pre-trained network . . . ''' Build model for dynamics prediction ''' model_dy = build_dynamics_model(config) camera_name = config['vision_net']['camera_name'] # criterion criterionMSE = nn.MSELoss() l1Loss = nn.L1Loss() smoothL1 = nn.SmoothL1Loss() # optimizer params = model_dy.parameters() lr = float(config['train']['lr']) optimizer = optim.Adam(params, lr=lr, betas=(config['train']['adam_beta1'], 0.999)) # setup scheduler sc = config['train']['lr_scheduler'] scheduler = None if config['train']['lr_scheduler']['enabled']: if config['train']['lr_scheduler']['type'] == "ReduceLROnPlateau": scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=sc['factor'], patience=sc['patience'], threshold_mode=sc['threshold_mode'], cooldown=sc['cooldown'], verbose=True) elif config['train']['lr_scheduler']['type'] == "StepLR": step_size = config['train']['lr_scheduler']['step_size'] gamma = config['train']['lr_scheduler']['gamma'] scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma) else: raise ValueError("unknown scheduler type: %s" % (config['train']['lr_scheduler']['type'])) if use_gpu: print("using gpu") model_dy = model_dy.cuda() # print("model_dy.vision_net._ref_descriptors.device", model_dy.vision_net._ref_descriptors.device) # print("model_dy.vision_net #params: %d" %(count_trainable_parameters(model_dy.vision_net))) best_valid_loss = np.inf valid_loss_type = config['train']['valid_loss_type'] global_iteration = 0 counters = {'train': 0, 'valid': 0} epoch_counter_external = 0 loss = 0 index_map = get_object_and_robot_state_indices(config) object_state_indices = torch.LongTensor(index_map['object_indices']) robot_state_indices = torch.LongTensor(index_map['robot_indices']) object_state_shape = config['dataset']['object_state_shape'] try: for epoch in range(st_epoch, config['train']['n_epoch']): phases = ['train', 'valid'] epoch_counter_external = epoch writer.add_scalar("Training Params/epoch", epoch, global_iteration) for phase in phases: # only validate at a certain frequency if (phase == "valid") and ( (epoch % config['train']['valid_frequency']) != 0): continue model_dy.train(phase == 'train') average_meter_container = dict() step_duration_meter = AverageMeter() # bar = ProgressBar(max_value=data_n_batches[phase]) loader = dataloaders[phase] for i, data in enumerate(loader): loss_container = dict() # store the losses for this step step_start_time = time.time() global_iteration += 1 counters[phase] += 1 with torch.set_grad_enabled(phase == 'train'): n_his, n_roll = config['train']['n_history'], config[ 'train']['n_rollout'] n_samples = n_his + n_roll if DEBUG: print("global iteration: %d" % (global_iteration)) print("n_samples", n_samples) # [B, n_samples, obs_dim] observations = data['observations'] visual_observations_list = data[ 'visual_observations_list'] # [B, n_samples, action_dim] actions = data['actions'] B = actions.shape[0] if use_gpu: observations = observations.cuda() actions = actions.cuda() # compile the visual observations # compute the output of the visual model for all timesteps visual_model_output_list = [] for visual_obs in visual_observations_list: # visual_obs is a dict containing observation for a single # time step (of course across a batch however) # visual_obs[<camera_name>]['rgb_tensor'] has shape [B, 3, H, W] # probably need to cast input to cuda # [B, -1, 3] keypoints = visual_obs[camera_name][ 'descriptor_keypoints_3d_world_frame'] # [B, K, 3] where K = len(spatial_descriptors_idx) keypoints = keypoints[:, spatial_descriptors_idx] B, K, _ = keypoints.shape # [B, K*3] keypoints_reshape = keypoints.reshape([B, K * 3]) if DEBUG: print("keypoints.shape", keypoints.shape) print("keypoints_reshape.shape", keypoints_reshape.shape) visual_model_output_list.append(keypoints_reshape) visual_model_output = None if len(visual_model_output_list) > 0: # concatenate this into a tensor # [B, n_samples, vision_model_out_dim] visual_model_output = torch.stack( visual_model_output_list, dim=1) else: visual_model_output = torch.Tensor( ) # empty tensor # states, actions = data assert actions.shape[1] == n_samples # cast this to float so it can be concatenated below visual_model_output = visual_model_output.type_as( observations) # we don't have any visual observations, so states are observations # states is gotten by concatenating visual_observations and observations # [B, n_samples, vision_model_out_dim + obs_dim] states = torch.cat((visual_model_output, observations), dim=-1) # state_cur: B x n_his x state_dim # state_cur = states[:, :n_his] # [B, n_his, state_dim] state_init = states[:, :n_his] # We want to rollout n_roll steps # actions = [B, n_his + n_roll, -1] # so we want action_seq.shape = [B, n_roll, -1] action_start_idx = 0 action_end_idx = n_his + n_roll - 1 action_seq = actions[:, action_start_idx: action_end_idx, :] if DEBUG: print("states.shape", states.shape) print("state_init.shape", state_init.shape) print("actions.shape", actions.shape) print("action_seq.shape", action_seq.shape) # try using models_dy.rollout_model instead of doing this manually rollout_data = rollout_model(state_init=state_init, action_seq=action_seq, dynamics_net=model_dy, compute_debug_data=False) # [B, n_roll, state_dim] state_rollout_pred = rollout_data['state_pred'] # [B, n_roll, state_dim] state_rollout_gt = states[:, n_his:] if DEBUG: print("state_rollout_gt.shape", state_rollout_gt.shape) print("state_rollout_pred.shape", state_rollout_pred.shape) # the loss function is between # [B, n_roll, state_dim] state_pred_err = state_rollout_pred - state_rollout_gt # [B, n_roll, object_state_dim] object_state_err = state_pred_err[:, :, object_state_indices] B, n_roll, object_state_dim = object_state_err.shape # [B, n_roll, *object_state_shape] object_state_err_reshape = object_state_err.reshape( [B, n_roll, *object_state_shape]) # num weights J = object_state_err_reshape.shape[2] weights = model_dy.weight_matrix assert len( weights) == J, "len(weights) = %d, but J = %d" % ( len(weights), J) # loss mse object, note the use of broadcasting semantics # [B, n_roll] object_state_loss_mse = weights * torch.pow( object_state_err_reshape, 2).sum(dim=-1) object_state_loss_mse = object_state_loss_mse.mean() l2_object = (weights * torch.norm( object_state_err_reshape, dim=-1)).mean() l2_object_final_step = (weights * torch.norm( object_state_err_reshape[:, -1], dim=-1)).mean() # [B, n_roll, robot_state_dim] robot_state_err = state_pred_err[:, :, robot_state_indices] robot_state_loss_mse = torch.pow(robot_state_err, 2).sum(dim=-1).mean() loss_container[ 'object_state_loss_mse'] = object_state_loss_mse loss_container[ 'robot_state_loss_mse'] = robot_state_loss_mse loss_container['l2_object'] = l2_object loss_container[ 'l2_object_final_step'] = l2_object_final_step # total loss loss = object_state_loss_mse + robot_state_loss_mse loss_container['loss'] = loss for key, val in loss_container.items(): if not key in average_meter_container: average_meter_container[key] = AverageMeter() average_meter_container[key].update(val.item(), B) step_duration_meter.update(time.time() - step_start_time) if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() if (i % config['train']['log_per_iter'] == 0) or (global_iteration % config['train']['log_per_iter'] == 0): log = '%s [%d/%d][%d/%d] LR: %.6f' % ( phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase], get_lr(optimizer)) # log += ', l2: %.6f' % (loss_container['l2'].item()) # log += ', l2_final_step: %.6f' %(loss_container['l2_final_step'].item()) log += ', step time %.6f' % (step_duration_meter.avg) step_duration_meter.reset() print(log) # log data to tensorboard # only do it once we have reached 100 iterations if global_iteration > 100: writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration) writer.add_scalar("Loss_train/%s" % (phase), loss.item(), global_iteration) for loss_type, loss_obj in loss_container.items(): plot_name = "Loss/%s/%s" % (loss_type, phase) writer.add_scalar(plot_name, loss_obj.item(), counters[phase]) # only plot the weights if we are in the train phase . . . . if phase == "train": for i in range(len(weights)): plot_name = "Weights/%d" % (i) writer.add_scalar(plot_name, weights[i].item(), counters[phase]) if phase == 'train' and global_iteration % config['train'][ 'ckp_per_iter'] == 0: save_model( model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i)) log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % ( phase, epoch, config['train']['n_epoch'], average_meter_container[valid_loss_type].avg, best_valid_loss) print(log) # record all average_meter losses for key, meter in average_meter_container.items(): writer.add_scalar("AvgMeter/%s/%s" % (key, phase), meter.avg, epoch) if phase == "train": if (scheduler is not None) and ( config['train']['lr_scheduler']['type'] == "StepLR"): scheduler.step() if phase == 'valid': if (scheduler is not None) and ( config['train']['lr_scheduler']['type'] == "ReduceLROnPlateau"): scheduler.step( average_meter_container[valid_loss_type].avg) if average_meter_container[ valid_loss_type].avg < best_valid_loss: best_valid_loss = average_meter_container[ valid_loss_type].avg training_stats['epoch'] = epoch training_stats['global_iteration'] = counters['valid'] save_yaml(training_stats, training_stats_file) save_model(model_dy, '%s/net_best_dy' % (train_dir)) writer.flush() # flush SummaryWriter events to disk except KeyboardInterrupt: # save network if we have a keyboard interrupt save_model( model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external)) writer.flush() # flush SummaryWriter events to disk
def train_dynamics(config, data_path, train_dir): # access dict values as attributes config = edict(config) # set random seed for reproduction set_seed(config.train.random_seed) st_epoch = config.train.resume_epoch if config.train.resume_epoch > 0 else 0 tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w') print(config) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: print("Loading data for %s" % phase) datasets[phase] = MultiEpisodeDataset(config, data_path, phase=phase) dataloaders[phase] = DataLoader( datasets[phase], batch_size=config.train.batch_size, shuffle=True if phase == 'train' else False, num_workers=config.train.num_workers) data_n_batches[phase] = len(dataloaders[phase]) use_gpu = torch.cuda.is_available() ''' define model for dynamics prediction ''' model_dy = DynaNetMLP(config) print("model_dy #params: %d" % count_trainable_parameters(model_dy)) if config.train.resume_epoch >= 0: # if resume from a pretrained checkpoint model_dy_path = os.path.join( train_dir, 'net_dy_epoch_%d_iter_%d.pth' % ( config.train.resume_epoch, config.train.resume_iter)) print("Loading saved ckp from %s" % model_dy_path) model_dy.load_state_dict(torch.load(model_dy_path)) # criterion criterionMSE = nn.MSELoss() # optimizer params = model_dy.parameters() optimizer = optim.Adam(params, lr=config.train.lr, betas=(config.train.adam_beta1, 0.999)) scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.9, patience=10, verbose=True) if use_gpu: model_dy = model_dy.cuda() best_valid_loss = np.inf for epoch in range(st_epoch, config.train.n_epoch): phases = ['train', 'valid'] for phase in phases: model_dy.train(phase == 'train') meter_loss_rmse = AverageMeter() bar = ProgressBar(max_value=data_n_batches[phase]) loader = dataloaders[phase] for i, data in bar(enumerate(loader)): if use_gpu: if isinstance(data, list): data = [d.cuda() for d in data] else: data = data.cuda() with torch.set_grad_enabled(phase == 'train'): n_his, n_roll = config.train.n_history, config.train.n_rollout n_samples = n_his + n_roll if config.env.type in ['PusherSlider']: states, actions = data assert states.size(1) == n_samples B = states.size(0) loss_mse = 0. # state_cur: B x n_his x state_dim state_cur = states[:, :n_his] for j in range(n_roll): state_des = states[:, n_his + j] # action_cur: B x n_his x action_dim action_cur = actions[:, j : j + n_his] if actions is not None else None # state_pred: B x state_dim state_pred = model_dy(state_cur, action_cur) loss_mse_cur = criterionMSE(state_pred, state_des) loss_mse += loss_mse_cur / config.train.n_rollout # update state_cur state_cur = torch.cat([state_cur[:, 1:], state_pred.unsqueeze(1)], 1) meter_loss_rmse.update(np.sqrt(loss_mse.item()), B) if phase == 'train': optimizer.zero_grad() loss_mse.backward() optimizer.step() if i % config.train.log_per_iter == 0: log = '%s [%d/%d][%d/%d] LR: %.6f' % ( phase, epoch, config.train.n_epoch, i, data_n_batches[phase], get_lr(optimizer)) log += ', rmse: %.6f (%.6f)' % ( np.sqrt(loss_mse.item()), meter_loss_rmse.avg) print(log) if phase == 'train' and i % config.train.ckp_per_iter == 0: torch.save(model_dy.state_dict(), '%s/net_dy_epoch_%d_iter_%d.pth' % (train_dir, epoch, i)) log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % ( phase, epoch, config.train.n_epoch, meter_loss_rmse.avg, best_valid_loss) print(log) if phase == 'valid': scheduler.step(meter_loss_rmse.avg) if meter_loss_rmse.avg < best_valid_loss: best_valid_loss = meter_loss_rmse.avg torch.save(model_dy.state_dict(), '%s/net_best_dy.pth' % (train_dir))
def train_dynamics( config, train_dir, # str: directory to save output multi_episode_dict=None, visual_observation_function=None, metadata=None, spatial_descriptors_data=None, ): assert multi_episode_dict is not None # assert spatial_descriptors_idx is not None # set random seed for reproduction set_seed(config['train']['random_seed']) st_epoch = config['train'][ 'resume_epoch'] if config['train']['resume_epoch'] > 0 else 0 tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w') tensorboard_dir = os.path.join(train_dir, "tensorboard") if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) writer = SummaryWriter(log_dir=tensorboard_dir) # save the config save_yaml(config, os.path.join(train_dir, "config.yaml")) if metadata is not None: save_pickle(metadata, os.path.join(train_dir, 'metadata.p')) if spatial_descriptors_data is not None: save_pickle(spatial_descriptors_data, os.path.join(train_dir, 'spatial_descriptors.p')) training_stats = dict() training_stats_file = os.path.join(train_dir, 'training_stats.yaml') action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config( config) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: print("Loading data for %s" % phase) datasets[phase] = MultiEpisodeDataset( config, action_function=action_function, observation_function=observation_function, episodes=multi_episode_dict, phase=phase, visual_observation_function=visual_observation_function) print("len(datasets[phase])", len(datasets[phase])) dataloaders[phase] = DataLoader( datasets[phase], batch_size=config['train']['batch_size'], shuffle=True if phase == 'train' else False, num_workers=config['train']['num_workers'], drop_last=True) data_n_batches[phase] = len(dataloaders[phase]) use_gpu = torch.cuda.is_available() # compute normalization parameters if not starting from pre-trained network . . . if False: dataset = datasets["train"] data = dataset[0] print("data['observations_combined'].shape", data['observations_combined'].shape) print("data.keys()", data.keys()) print("data['observations_combined']", data['observations_combined'][0]) print("data['observations_combined'].shape", data['observations_combined'].shape) print("data['actions'].shape", data['actions'].shape) print("data['actions']\n", data['actions']) quit() ''' Build model for dynamics prediction ''' model_dy = build_dynamics_model(config) if config['dynamics_net'] == "mlp_weight_matrix": raise ValueError("can't use weight matrix with standard setup") # criterion criterionMSE = nn.MSELoss() l1Loss = nn.L1Loss() smoothL1 = nn.SmoothL1Loss() # optimizer params = model_dy.parameters() lr = float(config['train']['lr']) optimizer = optim.Adam(params, lr=lr, betas=(config['train']['adam_beta1'], 0.999)) # setup scheduler sc = config['train']['lr_scheduler'] scheduler = None if config['train']['lr_scheduler']['enabled']: if config['train']['lr_scheduler']['type'] == "ReduceLROnPlateau": scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=sc['factor'], patience=sc['patience'], threshold_mode=sc['threshold_mode'], cooldown=sc['cooldown'], verbose=True) elif config['train']['lr_scheduler']['type'] == "StepLR": step_size = config['train']['lr_scheduler']['step_size'] gamma = config['train']['lr_scheduler']['gamma'] scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma) else: raise ValueError("unknown scheduler type: %s" % (config['train']['lr_scheduler']['type'])) if use_gpu: print("using gpu") model_dy = model_dy.cuda() # print("model_dy.vision_net._ref_descriptors.device", model_dy.vision_net._ref_descriptors.device) # print("model_dy.vision_net #params: %d" %(count_trainable_parameters(model_dy.vision_net))) best_valid_loss = np.inf valid_loss_type = config['train']['valid_loss_type'] global_iteration = 0 counters = {'train': 0, 'valid': 0} epoch_counter_external = 0 loss = 0 try: for epoch in range(st_epoch, config['train']['n_epoch']): phases = ['train', 'valid'] epoch_counter_external = epoch writer.add_scalar("Training Params/epoch", epoch, global_iteration) for phase in phases: # only validate at a certain frequency if (phase == "valid") and ( (epoch % config['train']['valid_frequency']) != 0): continue model_dy.train(phase == 'train') average_meter_container = dict() step_duration_meter = AverageMeter() # bar = ProgressBar(max_value=data_n_batches[phase]) loader = dataloaders[phase] for i, data in enumerate(loader): loss_container = dict() # store the losses for this step step_start_time = time.time() global_iteration += 1 counters[phase] += 1 with torch.set_grad_enabled(phase == 'train'): n_his, n_roll = config['train']['n_history'], config[ 'train']['n_rollout'] n_samples = n_his + n_roll if DEBUG: print("global iteration: %d" % (global_iteration)) print("n_samples", n_samples) # [B, n_samples, obs_dim] states = data['observations_combined'] # [B, n_samples, action_dim] actions = data['actions'] B = actions.shape[0] if use_gpu: states = states.cuda() actions = actions.cuda() # state_cur: B x n_his x state_dim # state_cur = states[:, :n_his] # [B, n_his, state_dim] state_init = states[:, :n_his] # We want to rollout n_roll steps # actions = [B, n_his + n_roll, -1] # so we want action_seq.shape = [B, n_roll, -1] action_start_idx = 0 action_end_idx = n_his + n_roll - 1 action_seq = actions[:, action_start_idx: action_end_idx, :] if DEBUG: print("states.shape", states.shape) print("state_init.shape", state_init.shape) print("actions.shape", actions.shape) print("action_seq.shape", action_seq.shape) # try using models_dy.rollout_model instead of doing this manually rollout_data = rollout_model(state_init=state_init, action_seq=action_seq, dynamics_net=model_dy, compute_debug_data=False) # [B, n_roll, state_dim] state_rollout_pred = rollout_data['state_pred'] # [B, n_roll, state_dim] state_rollout_gt = states[:, n_his:] if DEBUG: print("state_rollout_gt.shape", state_rollout_gt.shape) print("state_rollout_pred.shape", state_rollout_pred.shape) # the loss function is between # [B, n_roll, state_dim] state_pred_err = state_rollout_pred - state_rollout_gt # everything is in 3D space now so no need to do any scaling # all the losses would be in meters . . . . loss_mse = criterionMSE(state_rollout_pred, state_rollout_gt) loss_l1 = l1Loss(state_rollout_pred, state_rollout_gt) loss_l2 = torch.norm(state_pred_err, dim=-1).mean() loss_smoothl1 = smoothL1(state_rollout_pred, state_rollout_gt) loss_smoothl1_final_step = smoothL1( state_rollout_pred[:, -1], state_rollout_gt[:, -1]) # compute losses at final step of the rollout mse_final_step = criterionMSE( state_rollout_pred[:, -1], state_rollout_gt[:, -1]) l2_final_step = torch.norm(state_pred_err[:, -1], dim=-1).mean() l1_final_step = l1Loss(state_rollout_pred[:, -1], state_rollout_gt[:, -1]) loss_container['mse'] = loss_mse loss_container['l1'] = loss_l1 loss_container['mse_final_step'] = mse_final_step loss_container['l1_final_step'] = l1_final_step loss_container['l2_final_step'] = l2_final_step loss_container['l2'] = loss_l2 loss_container['smooth_l1'] = loss_smoothl1 loss_container[ 'smooth_l1_final_step'] = loss_smoothl1_final_step # compute the loss loss = 0 for key, val in config['loss_function'].items(): if val['enabled']: loss += loss_container[key] * val['weight'] loss_container['loss'] = loss for key, val in loss_container.items(): if not key in average_meter_container: average_meter_container[key] = AverageMeter() average_meter_container[key].update(val.item(), B) step_duration_meter.update(time.time() - step_start_time) if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() if (i % config['train']['log_per_iter'] == 0) or (global_iteration % config['train']['log_per_iter'] == 0): log = '%s [%d/%d][%d/%d] LR: %.6f' % ( phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase], get_lr(optimizer)) log += ', l2: %.6f' % (loss_container['l2'].item()) log += ', l2_final_step: %.6f' % ( loss_container['l2_final_step'].item()) log += ', step time %.6f' % (step_duration_meter.avg) step_duration_meter.reset() print(log) # log data to tensorboard # only do it once we have reached 100 iterations if global_iteration > 100: writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration) writer.add_scalar("Loss_train/%s" % (phase), loss.item(), global_iteration) for loss_type, loss_obj in loss_container.items(): plot_name = "Loss/%s/%s" % (loss_type, phase) writer.add_scalar(plot_name, loss_obj.item(), counters[phase]) if phase == 'train' and global_iteration % config['train'][ 'ckp_per_iter'] == 0: save_model( model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i)) log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % ( phase, epoch, config['train']['n_epoch'], average_meter_container[valid_loss_type].avg, best_valid_loss) print(log) # record all average_meter losses for key, meter in average_meter_container.items(): writer.add_scalar("AvgMeter/%s/%s" % (key, phase), meter.avg, epoch) if phase == "train": if (scheduler is not None) and ( config['train']['lr_scheduler']['type'] == "StepLR"): scheduler.step() if phase == 'valid': if (scheduler is not None) and ( config['train']['lr_scheduler']['type'] == "ReduceLROnPlateau"): scheduler.step( average_meter_container[valid_loss_type].avg) if average_meter_container[ valid_loss_type].avg < best_valid_loss: best_valid_loss = average_meter_container[ valid_loss_type].avg training_stats['epoch'] = epoch training_stats['global_iteration'] = counters['valid'] save_yaml(training_stats, training_stats_file) save_model(model_dy, '%s/net_best_dy' % (train_dir)) writer.flush() # flush SummaryWriter events to disk except KeyboardInterrupt: # save network if we have a keyboard interrupt save_model( model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external)) writer.flush() # flush SummaryWriter events to disk
from torch.distributions.multivariate_normal import MultivariateNormal from key_dynam.dynamics.config import gen_args from key_dynam.dynamics.data import PhysicsDataset, load_data from key_dynam.dynamics.models_dy import DynaNetGNN from key_dynam.dynamics.utils import rand_int, count_trainable_parameters, Tee, AverageMeter, get_lr, to_np, set_seed args = gen_args() set_seed(args.random_seed) torch.manual_seed(args.random_seed) np.random.seed(args.random_seed) os.system('mkdir -p ' + args.dataf) os.system('mkdir -p ' + args.outf_dy) tee = Tee(os.path.join(args.outf_dy, 'train.log'), 'w') print(args) # generate data trans_to_tensor = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) datasets = {} dataloaders = {} data_n_batches = {} for phase in ['train', 'valid']: datasets[phase] = PhysicsDataset(args, phase=phase, trans_to_tensor=trans_to_tensor)
def mpc_w_learned_dynamics(config, train_dir, mpc_dir, state_dict_path=None, keypoint_observation=False): # set random seed for reproduction set_seed(config['train']['random_seed']) tee = Tee(os.path.join(mpc_dir, 'mpc.log'), 'w') print(config) use_gpu = torch.cuda.is_available() ''' model ''' if config['dynamics']['model_type'] == 'mlp': model_dy = DynaNetMLP(config) else: raise AssertionError("Unknown model type %s" % config['dynamics']['model_type']) # print model #params print("model #params: %d" % count_trainable_parameters(model_dy)) if state_dict_path is None: if config['mpc']['mpc_dy_epoch'] == -1: state_dict_path = os.path.join(train_dir, 'net_best_dy.pth') else: state_dict_path = os.path.join( train_dir, 'net_dy_epoch_%d_iter_%d.pth' % \ (config['mpc']['mpc_dy_epoch'], config['mpc']['mpc_dy_iter'])) print("Loading saved ckp from %s" % state_dict_path) model_dy.load_state_dict(torch.load(state_dict_path)) model_dy.eval() if use_gpu: model_dy.cuda() criterionMSE = nn.MSELoss() # generate action/observation functions action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config( config) # planner planner = planner_from_config(config) ''' env ''' # set up goal obs_goals = np.array([[ 262.9843, 267.3102, 318.9369, 351.1229, 360.2048, 323.5128, 305.6385, 240.4460, 515.4230, 347.8708 ], [ 381.8694, 273.6327, 299.6685, 331.0925, 328.7724, 372.0096, 411.0972, 314.7053, 517.7299, 268.4953 ], [ 284.8728, 275.7985, 374.0677, 320.4990, 395.4019, 275.4633, 306.2896, 231.4310, 507.0849, 312.4057 ], [ 313.1638, 271.4258, 405.0255, 312.2325, 424.7874, 266.3525, 333.6973, 225.7708, 510.1232, 305.3802 ], [ 308.6859, 270.9629, 394.2789, 323.2781, 419.7905, 280.1602, 333.8901, 228.1624, 519.1964, 321.5318 ], [ 386.8067, 284.8947, 294.2467, 323.2223, 313.3221, 368.9970, 405.9415, 330.9298, 495.9970, 268.9920 ], [ 432.0219, 299.6021, 340.8581, 339.4676, 360.2354, 384.5515, 451.4394, 345.2190, 514.6357, 291.2043 ], [ 351.3389, 264.5325, 267.5279, 318.2321, 293.7460, 360.0423, 378.4428, 306.9586, 516.4390, 259.7810 ], [ 521.1902, 254.0693, 492.7884, 349.7861, 539.6320, 364.5190, 569.2258, 268.8824, 506.9431, 286.9752 ], [ 264.8554, 275.9547, 338.1317, 345.3435, 372.7012, 308.4648, 299.3454, 239.9245, 506.2117, 373.8413 ]]) for mpc_idx in range(config['mpc']['num_episodes']): if keypoint_observation: mpc_episode_keypoint_observation(config, mpc_idx, model_dy, mpc_dir, planner, obs_goals[mpc_idx], action_function, observation_function, use_gpu=use_gpu) else: # not supported for now raise AssertionError("currently only support keypoint observation")
def eval_dynamics(config, train_dir, eval_dir, state_dict_path=None, keypoint_observation=False, debug=False, render_human=False): # set random seed for reproduction set_seed(config['train']['random_seed']) tee = Tee(os.path.join(eval_dir, 'eval.log'), 'w') print(config) use_gpu = torch.cuda.is_available() ''' model ''' model_dy = DynaNetMLP(config) # print model #params print("model #params: %d" % count_trainable_parameters(model_dy)) if state_dict_path is None: if config['eval']['eval_dy_epoch'] == -1: state_dict_path = os.path.join(train_dir, 'net_best_dy.pth') else: state_dict_path = os.path.join( train_dir, 'net_dy_epoch_%d_iter_%d.pth' % \ (config['eval']['eval_dy_epoch'], config['eval']['eval_dy_iter'])) print("Loading saved ckp from %s" % state_dict_path) model_dy.load_state_dict(torch.load(state_dict_path)) model_dy.eval() if use_gpu: model_dy.cuda() criterionMSE = nn.MSELoss() bar = ProgressBar() st_idx = config['eval']['eval_st_idx'] ed_idx = config['eval']['eval_ed_idx'] # load the data episodes = load_episodes_from_config(config) # generate action/observation functions action_function = ActionFunctionFactory.function_from_config(config) observation_function = ObservationFunctionFactory.function_from_config( config) dataset = MultiEpisodeDataset(config, action_function=action_function, observation_function=observation_function, episodes=episodes, phase="valid") episode_names = dataset.get_episode_names() episode_names.sort() num_episodes = None # for backwards compatibility if "num_episodes" in config["eval"]: num_episodes = config["eval"]["num_episodes"] else: num_episodes = 10 episode_list = [] if debug: episode_list = [episode_names[0]] else: episode_list = episode_names[:num_episodes] for roll_idx, episode_name in enumerate(episode_list): print("episode_name", episode_name) if keypoint_observation: eval_episode_keypoint_observations(config, dataset, episode_name, roll_idx, model_dy, eval_dir, start_idx=9, n_prediction=30, render_human=render_human) else: eval_episode(config, dataset, episode_name, roll_idx, model_dy, eval_dir, start_idx=9, n_prediction=30, render_human=render_human)