def Predict(self, test_generator): metrics = Metrics() valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( test_generator) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r2_score = metrics.Update(y_pred, gt_labels, [0, 0, 0, 0], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() DataVisualization.desc = "Test_" DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots() logging.info('Test MSE: {}'.format(MSE)) logging.info('Test MAE: {}'.format(MAE)) logging.info('Test r2_score: {}'.format(r2_score)) return y_pred
def Predict(self, test_generator): metrics = Metrics() valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( test_generator) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r2_score = metrics.Update(y_pred, gt_labels, [0, 0, 0, 0], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() DataVisualization.desc = "Test_" DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots() logging.info('[ModelTrainer] Test MSE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(MSE[0], MSE[1], MSE[2], MSE[3])) logging.info('[ModelTrainer] Test MAE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(MAE[0], MAE[1], MAE[2], MAE[3])) logging.info('[ModelTrainer] Test r2_score: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(r2_score[0], r2_score[1], r2_score[2], r2_score[3]))
def Test(self, test_generator): metrics = Metrics() valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( test_generator) outputs = y_pred outputs = np.reshape(outputs, (-1, 4)) labels = gt_labels y_pred = np.reshape(y_pred, (-1, 4)) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r2_score = metrics.Update( y_pred, gt_labels, [0, 0, 0, 0], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) logging.info( '[ModelTrainer] Test MSE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'. format(MSE[0], MSE[1], MSE[2], MSE[3])) logging.info( '[ModelTrainer] Test MAE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'. format(MAE[0], MAE[1], MAE[2], MAE[3])) logging.info( '[ModelTrainer] Test r2_score: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]' .format(r2_score[0], r2_score[1], r2_score[2], r2_score[3])) return MSE, MAE, r2_score, outputs, labels
def Train(self, training_generator, validation_generator): metrics = Metrics() early_stopping = EarlyStopping(patience=10, verbose=True) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=np.sqrt(0.1), patience=5, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0.1e-6, eps=1e-08) loss_epoch_m1 = 1e3 for epoch in range(1, self.args.epochs + 1): logging.info("[ModelTrainer] Starting Epoch {}".format(epoch)) change_prec = False ended = False # if self.args.quantize: # change_prec, ended = self.relax.step(loss_epoch_m1, epoch, None) # if ended: # break train_loss_x, train_loss_y, train_loss_z, train_loss_phi = self.TrainSingleEpoch(training_generator) valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( validation_generator) valid_loss = valid_loss_x + valid_loss_y + valid_loss_z + valid_loss_phi scheduler.step(valid_loss) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r_score = metrics.Update(y_pred, gt_labels, [train_loss_x, train_loss_y, train_loss_z, train_loss_phi], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) logging.info('[ModelTrainer] Validation MSE: {}'.format(MSE)) logging.info('[ModelTrainer] Validation MAE: {}'.format(MAE)) logging.info('[ModelTrainer] Validation r_score: {}'.format(r_score)) checkpoint_filename = self.folderPath + self.model.name + '-{:03d}.pt'.format(epoch) early_stopping(valid_loss, self.model, epoch, checkpoint_filename) if early_stopping.early_stop: logging.info("[ModelTrainer] Early stopping") break MSEs = metrics.GetMSE() MAEs = metrics.GetMAE() r_score = metrics.Getr2_score() y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() train_losses_x, train_losses_y, train_losses_z, train_losses_phi, valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi = metrics.GetLosses() DataVisualization.desc = "Train_" DataVisualization.PlotLoss(train_losses_x, train_losses_y, train_losses_z, train_losses_phi , valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi) DataVisualization.PlotMSE(MSEs) DataVisualization.PlotMAE(MAEs) DataVisualization.PlotR2Score(r_score) DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots()