def clone_from_trajectory(self, expert_evaluation, expert_trajectory: Trajectory, streaming_enviroment, trace_list, video_csv_list, log_steps=False): logging_iteration = 0 # Select the training/validation traces self.policy_history = None trace_list = np.array(trace_list) video_csv_list = np.array(video_csv_list) expert_evaluation = np.array(expert_evaluation) train_idx, test_idx = train_test_split(np.arange(len(expert_evaluation)), test_size=self.validation_split, random_state=RANDOM_SEED) trace_video_pair_list = [f.name for f in expert_evaluation[train_idx]] expert_trajectory_train = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_train.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[test_idx]] self.fit_clustering_scorer(expert_trajectory) ########### if self.weight_samples: self.fit_value_function(to_imitate_evaluation=expert_evaluation[train_idx], to_imitate_trajectory=expert_trajectory_train) advantage = [] for index in train_idx: advantage += list(self.estimate_advantage_frame(expert_evaluation[index], trace_list[index], video_csv_list[index], streaming_enviroment)) advantage = np.array(advantage).flatten() advantage = advantage + np.min( advantage) # We smooth the estimate so that the low advantages are a bit bolstered assert (advantage < 0).sum() == 0, 'advantage should be non negative everywhere' #### estimate advantage on the training samples expert_trajectory_test = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) state_t = np.array([self.classifier.extract_features_observation(state_t) for state_t, _, _ in tqdm(expert_trajectory_train.trajectory_list, desc='transforming')]) state_t = pd.DataFrame(state_t, columns=self.classifier.extract_features_names()) self.impute_NaN_inplace(state_t) expert_action = expert_trajectory_train.trajectory_action_t_arr if self.weight_samples: self.classifier.fit(state_t, expert_action.ravel(), sample_weight=advantage) else: self.classifier.fit(state_t, expert_action.ravel()) if self.policy_history is None: self.policy_history, behavioural_cloning_evaluation = self.score(expert_evaluation[test_idx], expert_trajectory_test, streaming_enviroment, trace_list[test_idx], video_csv_list[test_idx], add_data=False) weight_filepaths = [] for cloning_iteration in range(self.iterations): behavioural_cloning_trace_generator_testing = TrajectoryVideoStreaming(self, streaming_enviroment, trace_list=trace_list, video_csv_list=video_csv_list) behavioural_cloning_evaluation, behavioural_cloning_evaluation_trajectory = behavioural_cloning_trace_generator_testing.create_trajectories( random_action_probability=0,cores_avail=1) behavioural_cloning_evaluation_trajectory.convert_list() transformed_observations = self.transform_trajectory(behavioural_cloning_evaluation_trajectory) sample_weights_new = self.clustering_scorer.predict(transformed_observations) state_t_new = np.array([self.classifier.extract_features_observation(state_t) for state_t, _, _ in tqdm(behavioural_cloning_evaluation_trajectory.trajectory_list, desc='transforming')]) state_t_new = np.array(state_t_new[sample_weights_new == 1.]) state_t_new = pd.DataFrame(state_t_new, columns=self.classifier.extract_features_names()) state_t = state_t.append(state_t_new) action_new = behavioural_cloning_evaluation_trajectory.trajectory_action_t_arr[sample_weights_new == 1.] expert_action = np.array(list(expert_action) + list(action_new)) self.classifier.fit(state_t, expert_action.ravel()) weight_filepath = self.rnd_id + '_policy_network_iteration_%d.h5' % cloning_iteration with open(weight_filepath, 'wb') as output_file: dill.dump(self.classifier, output_file) weight_filepaths.append(weight_filepath) best_iteration = self.opt_policy_opt_operator(self.policy_history[self.opt_policy_value_name]) with open(weight_filepaths[best_iteration], 'rb') as input_file: self.classifier = dill.load(input_file)
def clone_from_trajectory(self, expert_evaluation, expert_trajectory: Trajectory, streaming_enviroment, trace_list, video_csv_list, log_steps=False): self.reset_learning() self.policy_history = None self.fit_clustering_scorer(expert_trajectory) trace_list = np.array(trace_list) video_csv_list = np.array(video_csv_list) expert_evaluation = np.array(expert_evaluation) train_idx, test_idx = train_test_split(np.arange(len(expert_evaluation)), test_size=self.validation_split * 2.) test_idx, validation_idx = train_test_split(test_idx, test_size=0.5) trace_video_pair_list = [f.name for f in expert_evaluation[train_idx]] expert_trajectory_train = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_train.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[test_idx]] expert_trajectory_test = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_test.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[validation_idx]] expert_trajectory_validation = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_validation.convert_list() state_t_training = expert_trajectory_train.trajectory_state_t_arr state_t_future_training = expert_trajectory_train.trajectory_state_t_future action_training = to_categorical(expert_trajectory_train.trajectory_action_t_arr, self.n_actions) state_t_testing = expert_trajectory_test.trajectory_state_t_arr state_t_future_testing = expert_trajectory_test.trajectory_state_t_future action_testing = to_categorical(expert_trajectory_test.trajectory_action_t_arr, self.n_actions) validation_data = ([state_t_testing, state_t_future_testing], action_testing) weight_filepaths = [] keras_class_weighting = None self.fit_clustering_scorer(expert_trajectory) if self.balanced: keras_class_weighting = class_weight.compute_class_weight('balanced', np.unique(action_training.argmax(1)), action_training.argmax(1)) for cloning_iteration in tqdm(range(self.cloning_epochs), desc='Cloning Epochs'): history = self.policy_network.model.fit([state_t_training, state_t_future_training], action_training, validation_data=validation_data, epochs=1, verbose=0, class_weight=keras_class_weighting).history if self.policy_history is None: self.policy_history = history else: for k, v in history.items(): self.policy_history[k] += history[k] scoring_history, behavioural_cloning_evaluation = self.score(expert_evaluation[validation_idx], expert_trajectory_validation, streaming_enviroment, trace_list[validation_idx], video_csv_list[validation_idx]) if log_steps: logging_folder = 'logging_%s' % self.abr_name if not os.path.exists(logging_folder): os.makedirs(logging_folder) with open(os.path.join(logging_folder, 'logging_iteration_%d' % cloning_iteration), 'wb') as output_file: dill.dump(behavioural_cloning_evaluation, output_file) for k, v in scoring_history.items(): if k in self.policy_history: self.policy_history[k] += scoring_history[k] else: self.policy_history[k] = scoring_history[k] weight_filepath = self.rnd_id + '_policy_network_iteration_%d.h5' % cloning_iteration self.policy_network.model.save_weights(filepath=weight_filepath) weight_filepaths.append(weight_filepath) best_iteration = self.opt_policy_opt_operator(self.policy_history[self.opt_policy_value_name]) self.policy_network.model.load_weights(weight_filepaths[best_iteration]) logger.info('Restoring best iteration %d' % best_iteration) for path in weight_filepaths: os.remove(path)
def clone_from_trajectory(self, expert_evaluation, expert_trajectory: Trajectory, streaming_enviroment, trace_list, video_csv_list, log_steps=False): self.reset_learning() self.fit_clustering_scorer(expert_trajectory) # Select the training/validation traces trace_list = np.array(trace_list) video_csv_list = np.array(video_csv_list) expert_evaluation = np.array(expert_evaluation) train_idx, test_idx = train_test_split(np.arange(len(expert_evaluation)), test_size=self.validation_split * 2.) test_idx, validation_idx = train_test_split(test_idx, test_size=0.5) trace_video_pair_list = [f.name for f in expert_evaluation[train_idx]] expert_trajectory_train = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_train.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[test_idx]] expert_trajectory_test = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_test.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[validation_idx]] expert_trajectory_validation = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_validation.convert_list() state_t_training = expert_trajectory_train.trajectory_state_t_arr state_t_future_training = expert_trajectory_train.trajectory_state_t_future action_training = to_categorical(expert_trajectory_train.trajectory_action_t_arr, self.n_actions) state_t_testing = expert_trajectory_test.trajectory_state_t_arr state_t_future_testing = expert_trajectory_test.trajectory_state_t_future action_testing = to_categorical(expert_trajectory_test.trajectory_action_t_arr, self.n_actions) weight_filepaths = [] behavioural_cloning_trace_generator_training = TrajectoryVideoStreaming(self, streaming_enviroment, trace_list=trace_list[train_idx], video_csv_list=video_csv_list[ train_idx]) keras_class_weighting = None if self.balanced: keras_class_weighting = class_weight.compute_class_weight('balanced', np.unique(action_training.argmax(1)), action_training.argmax(1)) if self.pretrain: history = self.gail_model.policy_model.model.fit( [state_t_training, state_t_future_training], action_training, validation_data=([state_t_testing, state_t_future_testing], action_testing), epochs=self.pretrain_max_epochs, verbose=0, callbacks=self.early_stopping, class_weight=keras_class_weighting).history self.pretrain_history_last = history.copy() self.pretrain_history = self.keep_last_entry(history) for cloning_iteration in tqdm(range(self.cloning_epochs), desc='Cloning Epochs'): # -------------------------------------------------------------------------------------------------- # Train Discriminator behavioural_cloning_training_evaluation, behavioural_cloning_training_trajectory = behavioural_cloning_trace_generator_training.create_trajectories( random_action_probability=0) behavioural_cloning_training_trajectory.convert_list() training_trajectory_state_t = behavioural_cloning_training_trajectory.trajectory_state_t_arr training_trajectory_state_t_future = behavioural_cloning_training_trajectory.trajectory_state_t_future behavioural_action = behavioural_cloning_training_trajectory.trajectory_action_t_arr behavioural_action_likelihood = behavioural_cloning_training_trajectory.trajectory_likelihood train_idx_clone, test_idx_clone = train_test_split(np.arange(len(training_trajectory_state_t)), test_size=self.validation_split) behavioral_action = to_categorical(behavioural_action, num_classes=self.n_actions) state_t_train = np.vstack([training_trajectory_state_t[train_idx_clone], state_t_training]) state_t_future_train = np.vstack( [training_trajectory_state_t_future[train_idx_clone], state_t_future_training]) action_train = np.vstack([behavioral_action[train_idx_clone], action_training]) target_label_train = to_categorical(np.vstack([0] * len(train_idx_clone) + [1] * len(action_training)), num_classes=2) state_t_validation = np.vstack([training_trajectory_state_t[test_idx_clone], state_t_testing]) state_t_future_validation = np.vstack( [training_trajectory_state_t_future[test_idx_clone], state_t_future_testing]) action_validation = np.vstack([behavioral_action[test_idx_clone], action_testing]) target_label_validation = to_categorical(np.vstack([0] * len(test_idx_clone) + [1] * len(action_testing)), num_classes=2) validation_data_discriminator = ( [state_t_validation, state_t_future_validation, action_validation], target_label_validation) data_train = [state_t_train, state_t_future_train, action_train] history = self.discriminator.model.fit(data_train, target_label_train, validation_data=validation_data_discriminator, epochs=self.adverserial_max_epochs, verbose=0).history # Repeated early stopping callback introduce errors self.discriminator_history_last = history.copy() history = self.keep_last_entry(history) if self.discriminator_history is None: self.discriminator_history = history else: for k, v in history.items(): self.discriminator_history[k] += history[k] data_predict_discriminator = [training_trajectory_state_t, training_trajectory_state_t_future, behavioral_action] discriminator_prediction = self.discriminator.model.predict(data_predict_discriminator)[:, 1] reward = np.log(discriminator_prediction) # Scales to 1.0 as recommended # Train the value net future_reward_obtained = [] i_start = 0 i_end = 0 for evaluation_dataframe in behavioural_cloning_training_evaluation: i_end += len(evaluation_dataframe.streaming_session_evaluation) reward_transform = list(reward[i_start:i_end]) # We ignore the last reward obtained as we don't have a corresponding state for i in range(1, len(reward_transform))[::-1]: exponent = (len(reward_transform) - i) reward_transform[i - 1] += reward_transform[i] * self.future_reward_discount ** exponent future_reward_obtained += reward_transform i_start = i_end future_reward_obtained = np.array(future_reward_obtained).reshape((-1, 1)) future_reward_predicted = self.value_model.model.predict( [training_trajectory_state_t, training_trajectory_state_t_future]) history = self.value_model.model.fit([training_trajectory_state_t, training_trajectory_state_t_future], future_reward_obtained, validation_split=0.2, epochs=self.adverserial_max_epochs, verbose=0).history self.value_history_last = history.copy() history = self.keep_last_entry(history) if self.value_history is None: self.value_history = history else: for k, v in history.items(): self.value_history[k] += history[k] estimated_advantage = future_reward_obtained - future_reward_predicted estimated_advantage = estimated_advantage # -------------------------------------------------------------------------------------------------------- # Fit with the PPO loss # print(np.mean(self.gail_model.policy_model.concatenate_informations.get_weights())) # print('---------' * 10) # print('---------' * 10) history = self.gail_model.gail_training_model.fit( [training_trajectory_state_t, training_trajectory_state_t_future, estimated_advantage, behavioural_action_likelihood], behavioral_action, validation_split=self.validation_split, epochs=self.adverserial_max_epochs, verbose=0, shuffle=True).history # print(np.mean(self.gail_model.policy_model.concatenate_informations.get_weights())) # print('=========' * 10) # print('=========' * 10) self.policy_history_last = history.copy() history = self.keep_last_entry(history) if self.policy_history is None: self.policy_history = history else: for k, v in history.items(): self.policy_history[k] += history[k] scoring_history, behavioural_cloning_evaluation = self.score(expert_evaluation[validation_idx], expert_trajectory_validation, streaming_enviroment, trace_list[validation_idx], video_csv_list[validation_idx]) if log_steps: logging_folder = 'logging_%s' % self.abr_name if not os.path.exists(logging_folder): os.makedirs(logging_folder) with open(os.path.join(logging_folder, 'logging_iteration_%d' % cloning_iteration), 'wb') as output_file: dill.dump(behavioural_cloning_evaluation, output_file) for k, v in scoring_history.items(): if k in self.policy_history: self.policy_history[k] += scoring_history[k] else: self.policy_history[k] = scoring_history[k] weight_filepath = self.rnd_id + '_policy_network_iteration_%d.h5' % cloning_iteration self.gail_model.policy_model.model.save_weights(filepath=weight_filepath) weight_filepaths.append(weight_filepath) best_iteration = self.opt_policy_opt_operator(self.policy_history[self.opt_policy_value_name]) self.gail_model.policy_model.model.load_weights(weight_filepaths[best_iteration]) logger.info('Restoring best iteration %d' % best_iteration) for path in weight_filepaths: os.remove(path)
def clone_from_trajectory(self, expert_evaluation, expert_trajectory: Trajectory, streaming_enviroment, trace_list, video_csv_list, log_steps=False): """ Main function which will try to imitate the expert actions. Simply imitate the actions of an expert in a given situation :param expert_evaluation: :param expert_trajectory: :param streaming_enviroment: :param trace_list: :param video_csv_list: :param log_steps: :return: """ logging_iteration = 0 # Select the training/validation traces self.policy_history = None trace_list = np.array(trace_list) video_csv_list = np.array(video_csv_list) expert_evaluation = np.array(expert_evaluation) train_idx, test_idx = train_test_split(np.arange(len(expert_evaluation)), test_size=self.validation_split, random_state=RANDOM_SEED) trace_video_pair_list = [f.name for f in expert_evaluation[train_idx]] expert_trajectory_train = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_train.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[test_idx]] self.fit_clustering_scorer(expert_trajectory) ########### if self.weight_samples: self.fit_value_function(to_imitate_evaluation=expert_evaluation[train_idx], to_imitate_trajectory=expert_trajectory_train) advantage = [] ## Add advante to the training data for index in train_idx: advantage += list(self.estimate_advantage_frame(expert_evaluation[index], trace_list[index], video_csv_list[index], streaming_enviroment)) advantage = np.array(advantage).flatten() advantage = advantage + np.min( advantage) # We smooth the estimate so that the low advantages are a bit bolstered ## NO NEGATIV WEIGHTS ! assert (advantage < 0).sum() == 0, 'advantage should be non negative everywhere' #### estimate advantage on the training samples expert_trajectory_test = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) state_t = np.array([self.classifier.extract_features_observation(state_t) for state_t, _, _ in tqdm(expert_trajectory_train.trajectory_list, desc='transforming')]) state_t = pd.DataFrame(state_t, columns=self.classifier.extract_features_names()) self.impute_NaN_inplace(state_t) expert_action = expert_trajectory_train.trajectory_action_t_arr if self.weight_samples: self.classifier.fit(state_t, expert_action.ravel(), sample_weight=advantage) if log_steps: logging_folder = 'logging_%s' % self.abr_name if not os.path.exists(logging_folder): os.makedirs(logging_folder) with open(os.path.join(logging_folder, 'advantage_distribution'), 'wb') as output_file: dill.dump(advantage, output_file) else: self.classifier.fit(state_t, expert_action.ravel()) if self.policy_history is None: self.policy_history, behavioural_cloning_evaluation = self.score(expert_evaluation[test_idx], expert_trajectory_test, streaming_enviroment, trace_list[test_idx], video_csv_list[test_idx], add_data=False) if log_steps: with open(os.path.join(logging_folder, 'logging_iteration_%d' % logging_iteration), 'wb') as output_file: dill.dump(behavioural_cloning_evaluation, output_file)
def clone_from_trajectory(self, expert_evaluation, expert_trajectory: Trajectory, streaming_enviroment, trace_list, video_csv_list, log_steps=False): self.reset_learning() self.fit_clustering_scorer(expert_trajectory) trace_list = np.array(trace_list) video_csv_list = np.array(video_csv_list) expert_evaluation = np.array(expert_evaluation) train_idx, test_idx = train_test_split(np.arange(len(expert_evaluation)), test_size=self.validation_split * 2.) test_idx, validation_idx = train_test_split(test_idx, test_size=0.5) trace_video_pair_list = [f.name for f in expert_evaluation[train_idx]] expert_trajectory_train = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_train.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[test_idx]] expert_trajectory_test = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_test.convert_list() trace_video_pair_list = [f.name for f in expert_evaluation[validation_idx]] expert_trajectory_validation = expert_trajectory.extract_trajectory(trace_video_pair_list=trace_video_pair_list) expert_trajectory_validation.convert_list() state_t_training = expert_trajectory_train.trajectory_state_t_arr state_t_future_training = expert_trajectory_train.trajectory_state_t_future action_training = to_categorical(expert_trajectory_train.trajectory_action_t_arr, self.n_actions) state_t_testing = expert_trajectory_test.trajectory_state_t_arr state_t_future_testing = expert_trajectory_test.trajectory_state_t_future action_testing = to_categorical(expert_trajectory_test.trajectory_action_t_arr, self.n_actions) ############################################################################################################### #### Fit first network random_prediction_training = self.rnd_cloning_network.model.predict([state_t_training, state_t_future_training]) random_prediction_testing = self.rnd_cloning_network.model.predict([state_t_testing, state_t_future_testing]) testing_data = ([state_t_testing, state_t_future_testing], random_prediction_testing) self.pretrain_distill_history = self.bc_cloning_network.model.fit( [state_t_training, state_t_future_training], random_prediction_training, validation_data=testing_data, epochs=self.rde_distill_epochs, verbose=0, shuffle=True, callbacks=self.early_stopping).history trained_prediction_training = self.bc_cloning_network.model.predict([state_t_training, state_t_future_training]) scaling_factor = np.random.random(size=100) * 100 # Pick the hyperparameter randomly rewards = [np.exp(-fact * (np.square(trained_prediction_training - random_prediction_training)).mean( axis=-1)).flatten().mean() for fact in scaling_factor] self.scaling_factor_sigma = scaling_factor[np.argmin(np.abs(np.array(rewards) - 1.0))] print('Choosen Scaling factor %.2f' % self.scaling_factor_sigma) red_trajectory_generator_training = TrajectoryVideoStreaming(self, streaming_enviroment, trace_list=trace_list[train_idx], video_csv_list=video_csv_list[train_idx]) keras_class_weighting = None if self.balanced: keras_class_weighting = class_weight.compute_class_weight('balanced', np.unique(action_training.argmax(1)), action_training.argmax(1)) weight_filepaths = [] if self.pretrain: self.pretrain_bc_history = self.policy_network.policy_model.model.fit( [state_t_training, state_t_future_training], action_training, validation_data=([state_t_testing, state_t_future_testing], action_testing), epochs=self.rde_distill_epochs, verbose=0, callbacks=self.early_stopping, class_weight=keras_class_weighting).history for cloning_iteration in tqdm(range(self.cloning_epochs), desc='Cloning Epochs'): """ Iterations of the RED algorithm """ training_evaluation, training_trajectories = red_trajectory_generator_training.create_trajectories( random_action_probability=0) training_trajectories.convert_list() state_t_training_sampled = training_trajectories.trajectory_state_t_arr state_t_future_training_sampled = training_trajectories.trajectory_state_t_future action_sampled = to_categorical(training_trajectories.trajectory_action_t_arr, num_classes=self.n_actions) action_likelihood_sampled = training_trajectories.trajectory_likelihood bc_clone_prediction = self.bc_cloning_network.model.predict([state_t_training_sampled, state_t_future_training_sampled]) random_prediction = self.rnd_cloning_network.model.predict([state_t_training_sampled, state_t_future_training_sampled]) # report_var(·) = exp(−σ1‖fˆθ(·)−fθ(·)‖22) reward = np.exp(-self.scaling_factor_sigma * (np.square(bc_clone_prediction - random_prediction)).mean( axis=-1)).flatten() # Scales to 1.0 as recommended # Train the value net future_reward_obtained = [] i_start = 0 i_end = 0 for evaluation_dataframe in training_evaluation: i_end += len(evaluation_dataframe.streaming_session_evaluation) reward_transform = list(reward[i_start:i_end]) # We ignore the last reward obtained as we don't have a corresponding state for i in range(1, len(reward_transform))[::-1]: exponent = (len(reward_transform) - i) reward_transform[i - 1] += reward_transform[i] * self.future_reward_discount ** exponent future_reward_obtained += reward_transform i_start = i_end future_reward_obtained = np.array(future_reward_obtained).reshape((-1, 1)) future_reward_predicted = self.value_model.model.predict( [state_t_training_sampled, state_t_future_training_sampled]) history = self.value_model.model.fit([state_t_training_sampled, state_t_future_training_sampled], future_reward_obtained, validation_split=0.2, epochs=self.model_iterations, verbose=0, shuffle=True).history # Repeated early stopping callback introduce errors self.value_history_last = history.copy() history = self.keep_last_entry(history) if self.value_history is None: self.value_history = history else: for k, v in history.items(): self.value_history[k] += history[k] estimated_advantage = future_reward_obtained - future_reward_predicted estimated_advantage = estimated_advantage history = self.policy_network.gail_training_model.fit( [state_t_training_sampled, state_t_future_training_sampled, estimated_advantage, action_likelihood_sampled], action_sampled, validation_split=self.validation_split, epochs=self.model_iterations, verbose=0, shuffle=True).history self.policy_history_last = history.copy() history = self.keep_last_entry(history) if self.policy_history is None: self.policy_history = history else: for k, v in history.items(): self.policy_history[k] += history[k] scoring_history, behavioural_cloning_evaluation = self.score(expert_evaluation[validation_idx], expert_trajectory_validation, streaming_enviroment, trace_list[validation_idx], video_csv_list[validation_idx]) if log_steps: logging_folder = 'logging_%s' % self.abr_name if not os.path.exists(logging_folder): os.makedirs(logging_folder) with open(os.path.join(logging_folder, 'logging_iteration_%d' % cloning_iteration), 'wb') as output_file: dill.dump(behavioural_cloning_evaluation, output_file) for k, v in scoring_history.items(): if k in self.policy_history: self.policy_history[k] += scoring_history[k] else: self.policy_history[k] = scoring_history[k] weight_filepath = self.rnd_id + '_policy_network_iteration_%d.h5' % cloning_iteration self.policy_network.policy_model.model.save_weights(filepath=weight_filepath) weight_filepaths.append(weight_filepath) best_iteration = self.opt_policy_opt_operator(self.policy_history[self.opt_policy_value_name]) self.policy_network.policy_model.model.load_weights(weight_filepaths[best_iteration]) for path in weight_filepaths: os.remove(path)