def Predict(self, test_generator): metrics = Metrics() valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( test_generator) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r2_score = metrics.Update(y_pred, gt_labels, [0, 0, 0, 0], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() DataVisualization.desc = "Test_" DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots() logging.info('Test MSE: {}'.format(MSE)) logging.info('Test MAE: {}'.format(MAE)) logging.info('Test r2_score: {}'.format(r2_score)) return y_pred
def Predict(self, test_generator): metrics = Metrics() valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( test_generator) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r2_score = metrics.Update(y_pred, gt_labels, [0, 0, 0, 0], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() DataVisualization.desc = "Test_" DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots() logging.info('[ModelTrainer] Test MSE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(MSE[0], MSE[1], MSE[2], MSE[3])) logging.info('[ModelTrainer] Test MAE: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(MAE[0], MAE[1], MAE[2], MAE[3])) logging.info('[ModelTrainer] Test r2_score: [{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}]'.format(r2_score[0], r2_score[1], r2_score[2], r2_score[3]))
def __init__(self,inputFile = None, reference=None): """ Inputs • inputFile: Name of the input file. Default value is None • reference: Name of the reference column. Default value is None. Local variables set: • dataDict • header • reference • stats • visualize Outputs: None """ # Check if inputFile is None if inputFile == None: # Initialize all the variables to None self.dataDict = None self.reference = None self.header = None self.numColumns = None self.numRows = None self.inputFile = None else: # else call the load function self.load(inputFile,reference) # initialize DataStatistics and DataVisualization objects self.visualize = DataVisualization(self)
def Train(self, training_generator, validation_generator): metrics = Metrics() early_stopping = EarlyStopping(patience=10, verbose=True) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=np.sqrt(0.1), patience=5, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0.1e-6, eps=1e-08) loss_epoch_m1 = 1e3 for epoch in range(1, self.args.epochs + 1): logging.info("[ModelTrainer] Starting Epoch {}".format(epoch)) change_prec = False ended = False # if self.args.quantize: # change_prec, ended = self.relax.step(loss_epoch_m1, epoch, None) # if ended: # break train_loss_x, train_loss_y, train_loss_z, train_loss_phi = self.TrainSingleEpoch(training_generator) valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch( validation_generator) valid_loss = valid_loss_x + valid_loss_y + valid_loss_z + valid_loss_phi scheduler.step(valid_loss) gt_labels = torch.tensor(gt_labels, dtype=torch.float32) y_pred = torch.tensor(y_pred, dtype=torch.float32) MSE, MAE, r_score = metrics.Update(y_pred, gt_labels, [train_loss_x, train_loss_y, train_loss_z, train_loss_phi], [valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi]) logging.info('[ModelTrainer] Validation MSE: {}'.format(MSE)) logging.info('[ModelTrainer] Validation MAE: {}'.format(MAE)) logging.info('[ModelTrainer] Validation r_score: {}'.format(r_score)) checkpoint_filename = self.folderPath + self.model.name + '-{:03d}.pt'.format(epoch) early_stopping(valid_loss, self.model, epoch, checkpoint_filename) if early_stopping.early_stop: logging.info("[ModelTrainer] Early stopping") break MSEs = metrics.GetMSE() MAEs = metrics.GetMAE() r_score = metrics.Getr2_score() y_pred_viz = metrics.GetPred() gt_labels_viz = metrics.GetLabels() train_losses_x, train_losses_y, train_losses_z, train_losses_phi, valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi = metrics.GetLosses() DataVisualization.desc = "Train_" DataVisualization.PlotLoss(train_losses_x, train_losses_y, train_losses_z, train_losses_phi , valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi) DataVisualization.PlotMSE(MSEs) DataVisualization.PlotMAE(MAEs) DataVisualization.PlotR2Score(r_score) DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz) DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz) DataVisualization.DisplayPlots()
new_time_series = fe.execute_feature_engineering(time_series) # new_time_series = dp.delete_column(time_series, ['Confirmed Cases', 'Deaths', 'Recovered Cases', 'Active Cases']) # Truncate zero values from the time series # new_time_series = dp.truncate_time_series(new_time_series, '26/02/2020') # Preliminary Analysis: Stationarity Check pa = PreliminaryAnalysis() pa.execute_preliminary_analysis(new_time_series) # Data Visualization Phase log.info(Constants.DATA_VISUALIZATION_MSG) dv = DataVisualization() # original time series data dv.plot_data_structure(time_series) # new time series data dv.plot_data_structure(new_time_series) # plots of all cases dv.plot_data_structure(new_time_series['New Confirmed Cases']) dv.plot_data_structure(new_time_series['New Deaths']) dv.plot_data_structure(new_time_series['New Recovered Cases']) dv.plot_data_structure(new_time_series['New Active Cases']) # Decomposition plots for: # Confirmed Cases x = new_time_series['New Confirmed Cases'].resample('D').mean()