Пример #1
0
    def train(self):
        training_history = self.model.fit(
            x=self._data['x_train'],
            y=self._data['y_train'],
            batch_size=self._batch_size,
            epochs=self._epochs,
            callbacks=self.callbacks_list,
            validation_data=(self._data['x_val'], self._data['y_val']),
            shuffle=True,
            verbose=2)
        if training_history is not None:
            self._plot_training_history(training_history)
            self._save_model_history(training_history)
            config = dict(self._kwargs)
            config_filename = 'config_lstm.yaml'
            config['train']['log_dir'] = self._log_dir
            with open(os.path.join(self._log_dir, config_filename), 'w') as f:
                yaml.dump(config, f, default_flow_style=False)

        # evaluate
        scaler = self._data['scaler']
        x_eval = self._data['x_eval']
        y_truth = self._data['y_eval']

        y_pred = self.model.predict(x_eval)
        y_pred = scaler.inverse_transform(y_pred)
        y_truth = scaler.inverse_transform(y_truth)

        mse = metrics.masked_mse_np(preds=y_pred, labels=y_truth, null_val=0)
        mape = metrics.masked_mape_np(preds=y_pred, labels=y_truth, null_val=0)
        rmse = metrics.masked_rmse_np(preds=y_pred, labels=y_truth, null_val=0)
        self._logger.info(
            "Horizon {:02d}, MSE: {:.2f}, MAPE: {:.4f}, RMSE: {:.2f}".format(
                1, mse, mape, rmse))
    def evaluate(self):

        scaler = self._data['scaler']

        y_pred_1, y_pred_2 = self.model.predict(self._data['x_eval'])
        y_pred_1 = scaler.inverse_transform(y_pred_1)
        y_truth_1 = scaler.inverse_transform(self._data['y_eval_1'])
        y_truth_2 = scaler.inverse_transform(self._data['y_eval_2'])

        mse = metrics.masked_mse_np(preds=y_pred_1,
                                    labels=y_truth_1,
                                    null_val=0)
        mae = metrics.masked_mae_np(preds=y_pred_1,
                                    labels=y_truth_1,
                                    null_val=0)
        mape = metrics.masked_mape_np(preds=y_pred_1,
                                      labels=y_truth_1,
                                      null_val=0)
        rmse = metrics.masked_rmse_np(preds=y_pred_1,
                                      labels=y_truth_1,
                                      null_val=0)
        self._logger.info(
            " Forward results: MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}"
            .format(mse, mae, rmse, mape))

        mse_2 = metrics.masked_mse_np(preds=y_pred_2,
                                      labels=y_truth_2,
                                      null_val=0)
        mae_2 = metrics.masked_mae_np(preds=y_pred_2,
                                      labels=y_truth_2,
                                      null_val=0)
        mape_2 = metrics.masked_mape_np(preds=y_pred_2,
                                        labels=y_truth_2,
                                        null_val=0)
        rmse_2 = metrics.masked_rmse_np(preds=y_pred_2,
                                        labels=y_truth_2,
                                        null_val=0)
        self._logger.info(
            "Backward results: MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}"
            .format(mse_2, mae_2, rmse_2, mape_2))
Пример #3
0
    def evaluate(self):
        scaler = self._data['scaler']

        y_pred = self.model.predict(self._data['x_eval'])
        y_pred = scaler.inverse_transform(y_pred)
        y_truth = scaler.inverse_transform(self._data['y_eval'])

        mse = metrics.masked_mse_np(preds=y_pred, labels=y_truth, null_val=0)
        mae = metrics.masked_mae_np(preds=y_pred, labels=y_truth, null_val=0)
        mape = metrics.masked_mape_np(preds=y_pred, labels=y_truth, null_val=0)
        rmse = metrics.masked_rmse_np(preds=y_pred, labels=y_truth, null_val=0)
        self._logger.info(
            "Horizon {:02d}, MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}"
            .format(1, mse, mae, rmse, mape))
Пример #4
0
    def evaluate(self, sess):
        global_step = sess.run(tf.train.get_or_create_global_step())
        test_results = self.run_epoch_generator(
            sess,
            self.model,
            self._data['eval_loader'].get_iterator(),
            return_output=True,
            training=False)

        # y_preds:  a list of (batch_size, horizon, num_nodes, output_dim)
        test_loss, y_preds = test_results['loss'], test_results['outputs']
        utils.add_simple_summary(self._writer, ['loss/test_loss'], [test_loss],
                                 global_step=global_step)

        y_preds = np.concatenate(y_preds, axis=0)
        scaler = self._data['scaler']
        predictions = []
        y_truths = []
        for horizon_i in range(self._data['y_eval'].shape[1]):
            y_truth = scaler.inverse_transform(
                self._data['y_eval'][:, horizon_i, :, 0])
            y_truths.append(y_truth)

            y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :, 0])
            predictions.append(y_pred)

            mse = metrics.masked_mse_np(preds=y_pred,
                                        labels=y_truth,
                                        null_val=0)
            mae = metrics.masked_mae_np(preds=y_pred,
                                        labels=y_truth,
                                        null_val=0)
            mape = metrics.masked_mape_np(preds=y_pred,
                                          labels=y_truth,
                                          null_val=0)
            rmse = metrics.masked_rmse_np(preds=y_pred,
                                          labels=y_truth,
                                          null_val=0)
            self._logger.info(
                "Horizon {:02d}, MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}"
                .format(horizon_i + 1, mse, mae, rmse, mape))
            utils.add_simple_summary(self._writer, [
                '%s_%d' % (item, horizon_i + 1)
                for item in ['metric/rmse', 'metric/mae', 'metric/mse']
            ], [rmse, mae, mse],
                                     global_step=global_step)
        outputs = {'predictions': predictions, 'groundtruth': y_truths}
        return outputs
Пример #5
0
    def _calculate_metrics(self, prediction_results, metrics_summary, scaler, runId, data_norm, n_metrics=4):
        # y_preds:  a list of (batch_size, horizon, num_nodes, output_dim)
        y_preds = prediction_results['y_preds']
        y_preds = np.concatenate(y_preds, axis=0)

        y_truths = prediction_results['y_truths']
        y_truths = np.concatenate(y_truths, axis=0)
        predictions = []

        for horizon_i in range(self._horizon):
            y_truth = scaler.inverse_transform(y_truths[:, horizon_i, :])

            y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :])
            predictions.append(y_pred)

            mse = metrics.masked_mse_np(preds=y_pred, labels=y_truth, null_val=0)
            mae = metrics.masked_mae_np(preds=y_pred, labels=y_truth, null_val=0)
            mape = metrics.masked_mape_np(preds=y_pred, labels=y_truth, null_val=0)
            rmse = metrics.masked_rmse_np(preds=y_pred, labels=y_truth, null_val=0)
            self._logger.info(
                "Horizon {:02d}, MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}".format(
                    horizon_i + 1, mse, mae, rmse, mape
                )
            )
            metrics_summary[runId, horizon_i * n_metrics + 0] = mse
            metrics_summary[runId, horizon_i * n_metrics + 1] = mae
            metrics_summary[runId, horizon_i * n_metrics + 2] = rmse
            metrics_summary[runId, horizon_i * n_metrics + 3] = mape

        tm_pred = scaler.inverse_transform(prediction_results['tm_pred'])
        g_truth = scaler.inverse_transform(data_norm[self._seq_len:-self._horizon])
        m_indicator = prediction_results['m_indicator']
        er = metrics.error_ratio(y_pred=tm_pred,
                                 y_true=g_truth,
                                 measured_matrix=m_indicator)
        metrics_summary[runId, -1] = er
        self._logger.info('ER: {}'.format(er))
        self._save_results(g_truth=g_truth, pred_tm=tm_pred, m_indicator=m_indicator, tag=str(runId))
        return metrics_summary
Пример #6
0
    def _test(self):
        scaler = self._data['scaler']
        results_summary = pd.DataFrame(index=range(self._run_times))
        results_summary['No.'] = range(self._run_times)

        n_metrics = 4
        # Metrics: MSE, MAE, RMSE, MAPE, ER
        metrics_summary = np.zeros(shape=(self._run_times,
                                          self._horizon * n_metrics + 1))

        for i in range(self._run_times):
            print('|--- Running time: {}/{}'.format(i, self._run_times))

            outputs = self._run_tm_prediction()

            tm_pred, m_indicator, y_preds = outputs['tm_pred'], outputs[
                'm_indicator'], outputs['y_preds']

            y_preds = np.concatenate(y_preds, axis=0)
            predictions = []
            y_truths = outputs['y_truths']
            y_truths = np.concatenate(y_truths, axis=0)

            for horizon_i in range(self._horizon):
                y_truth = scaler.inverse_transform(y_truths[:, horizon_i, :])

                y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :])
                predictions.append(y_pred)

                mse = metrics.masked_mse_np(preds=y_pred,
                                            labels=y_truth,
                                            null_val=0)
                mae = metrics.masked_mae_np(preds=y_pred,
                                            labels=y_truth,
                                            null_val=0)
                mape = metrics.masked_mape_np(preds=y_pred,
                                              labels=y_truth,
                                              null_val=0)
                rmse = metrics.masked_rmse_np(preds=y_pred,
                                              labels=y_truth,
                                              null_val=0)
                self._logger.info(
                    "Horizon {:02d}, MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}"
                    .format(horizon_i + 1, mse, mae, rmse, mape))
                metrics_summary[i, horizon_i * n_metrics + 0] = mse
                metrics_summary[i, horizon_i * n_metrics + 1] = mae
                metrics_summary[i, horizon_i * n_metrics + 2] = rmse
                metrics_summary[i, horizon_i * n_metrics + 3] = mape

            tm_pred = scaler.inverse_transform(tm_pred)
            g_truth = scaler.inverse_transform(
                self._data['test_data_norm'][self._seq_len:-self._horizon])
            er = metrics.error_ratio(y_pred=tm_pred,
                                     y_true=g_truth,
                                     measured_matrix=m_indicator)
            metrics_summary[i, -1] = er

            self._save_results(g_truth=g_truth,
                               pred_tm=tm_pred,
                               m_indicator=m_indicator,
                               tag=str(i))

            print('ER: {}'.format(er))

        for horizon_i in range(self._horizon):
            results_summary['mse_{}'.format(
                horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 0]
            results_summary['mae_{}'.format(
                horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 1]
            results_summary['rmse_{}'.format(
                horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 2]
            results_summary['mape_{}'.format(
                horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 3]

        results_summary['er'] = metrics_summary[:, -1]
        results_summary.to_csv(self._log_dir + 'results_summary.csv',
                               index=False)
Пример #7
0
    def _test(self, sess, **kwargs):

        global_step = sess.run(tf.train.get_or_create_global_step())

        results_summary = pd.DataFrame(index=range(self._run_times))
        results_summary['No.'] = range(self._run_times)

        n_metrics = 4
        # Metrics: MSE, MAE, RMSE, MAPE, ER
        metrics_summary = np.zeros(shape=(self._run_times, self._horizon * n_metrics + 1))

        for i in range(self._run_times):
            self._logger.info('|--- Run time: {}'.format(i))
            # y_test = self._prepare_test_set()

            test_results = self._run_tm_prediction(sess, model=self._test_model)

            # y_preds:  a list of (batch_size, horizon, num_nodes, output_dim)
            test_loss, y_preds = test_results['loss'], test_results['y_preds']
            utils.add_simple_summary(self._writer, ['loss/test_loss'], [test_loss], global_step=global_step)

            y_preds = test_results['y_preds']
            y_preds = np.concatenate(y_preds, axis=0)

            y_truths = test_results['y_truths']
            y_truths = np.concatenate(y_truths, axis=0)
            scaler = self._data['scaler']
            predictions = []

            for horizon_i in range(self._horizon):
                y_truth = scaler.inverse_transform(y_truths[:, horizon_i, :, 0])

                y_pred = scaler.inverse_transform(y_preds[:, horizon_i, :, 0])
                predictions.append(y_pred)

                mse = metrics.masked_mse_np(preds=y_pred, labels=y_truth, null_val=0)
                mae = metrics.masked_mae_np(preds=y_pred, labels=y_truth, null_val=0)
                mape = metrics.masked_mape_np(preds=y_pred, labels=y_truth, null_val=0)
                rmse = metrics.masked_rmse_np(preds=y_pred, labels=y_truth, null_val=0)
                self._logger.info(
                    "Horizon {:02d}, MSE: {:.2f}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}".format(
                        horizon_i + 1, mse, mae, rmse, mape
                    )
                )
                metrics_summary[i, horizon_i * n_metrics + 0] = mse
                metrics_summary[i, horizon_i * n_metrics + 1] = mae
                metrics_summary[i, horizon_i * n_metrics + 2] = rmse
                metrics_summary[i, horizon_i * n_metrics + 3] = mape

            tm_pred = scaler.inverse_transform(test_results['tm_pred'])
            g_truth = scaler.inverse_transform(self._data['test_data_norm'][self._seq_len:-self._horizon])
            m_indicator = test_results['m_indicator']
            er = error_ratio(y_pred=tm_pred,
                             y_true=g_truth,
                             measured_matrix=m_indicator)
            metrics_summary[i, -1] = er

            self._save_results(g_truth=g_truth, pred_tm=tm_pred, m_indicator=m_indicator, tag=str(i))

            print('ER: {}'.format(er))

        for horizon_i in range(self._horizon):
            results_summary['mse_{}'.format(horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 0]
            results_summary['mae_{}'.format(horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 1]
            results_summary['rmse_{}'.format(horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 2]
            results_summary['mape_{}'.format(horizon_i)] = metrics_summary[:, horizon_i * n_metrics + 3]

        results_summary['er'] = metrics_summary[:, -1]
        results_summary.to_csv(self._log_dir + 'results_summary.csv', index=False)

        return