Beispiel #1
0
 def _compute_loss(self, y_true, y_predicted):
     # y_predicted = y_predicted.view(-1, self.num_nodes , self.output_dim)
     # print('oooooooooooooooooo y_true', y_true.shape)
     # print('oooooooooooooooooo y_predicted', y_predicted.shape)
     y_true = self.standard_scaler.inverse_transform(y_true)
     y_predicted = self.standard_scaler.inverse_transform(y_predicted)
     return masked_mae_loss(y_predicted, y_true)
Beispiel #2
0
 def _compute_loss(self, y_true, y_predicted):
     y_true = self.standard_scaler.inverse_transform(y_true)
     y_predicted = self.standard_scaler.inverse_transform(y_predicted)
     loss_type = self._train_kwargs.get('loss', 'mae')
     if loss_type == 'mae':
         return masked_mae_loss(y_predicted, y_true)
     elif loss_type == 'rmse':
         return masked_rmse_loss(y_predicted, y_true)
     elif loss_type == 'mse':
         return masked_mse_loss(y_predicted, y_true)
     elif loss_type == 'mixed':
         # return (masked_mse_loss(y_predicted, y_true) + masked_mae_loss(y_predicted, y_true)) / 2
         return mixed_mae_mse_loss(y_predicted, y_true)
     else:
         raise NotImplementedError
Beispiel #3
0
 def _compute_loss(self, y_true, y_predicted):
     y_true = self.standard_scaler.inverse_transform(y_true)
     y_predicted = self.standard_scaler.inverse_transform(y_predicted)
     return masked_mae_loss(y_predicted, y_true)
Beispiel #4
0
    def evaluate(self,label, dataset='val', batches_seen=0, gumbel_soft=True):
        """
        Computes mean L1Loss
        :return: mean L1Loss
        """
        with torch.no_grad():
            self.GTS_model = self.GTS_model.eval()

            val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
            losses = []
            mapes = []
            rmses = []
            l_3 = []
            m_3 = []
            r_3 = []
            l_6 = []
            m_6 = []
            r_6 = []
            l_12 = []
            m_12 = []
            r_12 = []
            temp = self.temperature

            for batch_idx, (x, y) in enumerate(val_iterator):
                x, y = self._prepare_data(x, y)

                output, mid_output = self.GTS_model(label, x, self._train_feas, temp, gumbel_soft)

                if label == 'all':  # or label == 'predictor':
                    loss = self._compute_loss(y, output)
                    y_true = self.standard_scaler.inverse_transform(y)
                    y_pred = self.standard_scaler.inverse_transform(output)
                    mapes.append(masked_mape_loss(y_pred, y_true).item())
                    rmses.append(masked_rmse_loss(y_pred, y_true).item())
                    losses.append(loss.item())

                    l_3.append(masked_mae_loss(y_pred[:3], y_true[:3]).item())
                    m_3.append(masked_mape_loss(y_pred[:3], y_true[:3]).item())
                    r_3.append(masked_rmse_loss(y_pred[:3], y_true[:3]).item())
                    l_6.append(masked_mae_loss(y_pred[:6], y_true[:6]).item())
                    m_6.append(masked_mape_loss(y_pred[:6], y_true[:6]).item())
                    r_6.append(masked_rmse_loss(y_pred[:6], y_true[:6]).item())
                    l_12.append(masked_mae_loss(y_pred[:12], y_true[:12]).item())
                    m_12.append(masked_mape_loss(y_pred[:12], y_true[:12]).item())
                    r_12.append(masked_rmse_loss(y_pred[:12], y_true[:12]).item())

                else:
                    loss_1 = self._compute_loss(y, output)
                    pred = torch.sigmoid(mid_output.view(mid_output.shape[0] * mid_output.shape[1]))
                    true_label = self.adj_mx.view(mid_output.shape[0] * mid_output.shape[1]).to(device)
                    compute_loss = torch.nn.BCELoss()
                    loss_g = compute_loss(pred, true_label)
                    loss = loss_1 + loss_g
                    losses.append((loss_1.item()+loss_g.item()))

                    y_true = self.standard_scaler.inverse_transform(y)
                    y_pred = self.standard_scaler.inverse_transform(output)

                    l_3.append(masked_mae_loss(y_pred[:3], y_true[:3]).item())
                    m_3.append(masked_mape_loss(y_pred[:3], y_true[:3]).item())
                    r_3.append(masked_rmse_loss(y_pred[:3], y_true[:3]).item())

                    l_6.append(masked_mae_loss(y_pred[:6], y_true[:6]).item())
                    m_6.append(masked_mape_loss(y_pred[:6], y_true[:6]).item())
                    r_6.append(masked_rmse_loss(y_pred[:6], y_true[:6]).item())

                    l_12.append(masked_mae_loss(y_pred[:12], y_true[:12]).item())
                    m_12.append(masked_mape_loss(y_pred[:12], y_true[:12]).item())
                    r_12.append(masked_rmse_loss(y_pred[:12], y_true[:12]).item())

            mean_loss = np.mean(losses)
            if dataset == 'test':
                message = 'Horizon 15mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(np.mean(l_3), np.mean(m_3),
                                                                                           np.mean(r_3))
                self._logger.info(message)
                message = 'Horizon 30mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(np.mean(l_6), np.mean(m_6),
                                                                                           np.mean(r_6))
                self._logger.info(message)
                message = 'Horizon 60mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(np.mean(l_12), np.mean(m_12),
                                                                                           np.mean(r_12))
                self._logger.info(message)

            self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
            if label == 'all':
                mean_mape = np.mean(mapes)
                mean_rmse = np.mean(rmses)
                return mean_loss, mean_mape, mean_rmse
            else:
                return mean_loss
Beispiel #5
0
    def evaluate(self, label, dataset='val', batches_seen=0, gumbel_soft=True):
        """
        Computes mean L1Loss
        :return: mean L1Loss
        """
        with torch.no_grad():
            self.GTS_model = self.GTS_model.eval()

            val_iterator = self._data['{}_loader'.format(
                dataset)].get_iterator()
            losses = []
            mapes = []
            rmses = []
            temp = self.temperature

            l_3 = []
            m_3 = []
            r_3 = []
            l_6 = []
            m_6 = []
            r_6 = []
            l_12 = []
            m_12 = []
            r_12 = []

            for batch_idx, (x, y) in enumerate(val_iterator):
                x, y = self._prepare_data(x, y)

                output, mid_output = self.GTS_model(label, x, self._train_feas,
                                                    temp, gumbel_soft)

                if label == 'without_regularization':
                    loss = self._compute_loss(y, output)
                    y_true = self.standard_scaler.inverse_transform(y)
                    y_pred = self.standard_scaler.inverse_transform(output)
                    mapes.append(masked_mape_loss(y_pred, y_true).item())
                    rmses.append(masked_rmse_loss(y_pred, y_true).item())
                    losses.append(loss.item())

                    # Followed the DCRNN TensorFlow Implementation
                    l_3.append(
                        masked_mae_loss(y_pred[2:3], y_true[2:3]).item())
                    m_3.append(
                        masked_mape_loss(y_pred[2:3], y_true[2:3]).item())
                    r_3.append(
                        masked_rmse_loss(y_pred[2:3], y_true[2:3]).item())
                    l_6.append(
                        masked_mae_loss(y_pred[5:6], y_true[5:6]).item())
                    m_6.append(
                        masked_mape_loss(y_pred[5:6], y_true[5:6]).item())
                    r_6.append(
                        masked_rmse_loss(y_pred[5:6], y_true[5:6]).item())
                    l_12.append(
                        masked_mae_loss(y_pred[11:12], y_true[11:12]).item())
                    m_12.append(
                        masked_mape_loss(y_pred[11:12], y_true[11:12]).item())
                    r_12.append(
                        masked_rmse_loss(y_pred[11:12], y_true[11:12]).item())

                else:
                    loss_1 = self._compute_loss(y, output)
                    pred = torch.sigmoid(
                        mid_output.view(mid_output.shape[0] *
                                        mid_output.shape[1]))
                    true_label = self.adj_mx.view(
                        mid_output.shape[0] * mid_output.shape[1]).to(device)
                    compute_loss = torch.nn.BCELoss()
                    loss_g = compute_loss(pred, true_label)
                    loss = loss_1 + loss_g
                    losses.append((loss_1.item() + loss_g.item()))

                    y_true = self.standard_scaler.inverse_transform(y)
                    y_pred = self.standard_scaler.inverse_transform(output)
                    mapes.append(masked_mape_loss(y_pred, y_true).item())
                    rmses.append(masked_rmse_loss(y_pred, y_true).item())

                    # Followed the DCRNN TensorFlow Implementation
                    l_3.append(
                        masked_mae_loss(y_pred[2:3], y_true[2:3]).item())
                    m_3.append(
                        masked_mape_loss(y_pred[2:3], y_true[2:3]).item())
                    r_3.append(
                        masked_rmse_loss(y_pred[2:3], y_true[2:3]).item())
                    l_6.append(
                        masked_mae_loss(y_pred[5:6], y_true[5:6]).item())
                    m_6.append(
                        masked_mape_loss(y_pred[5:6], y_true[5:6]).item())
                    r_6.append(
                        masked_rmse_loss(y_pred[5:6], y_true[5:6]).item())
                    l_12.append(
                        masked_mae_loss(y_pred[11:12], y_true[11:12]).item())
                    m_12.append(
                        masked_mape_loss(y_pred[11:12], y_true[11:12]).item())
                    r_12.append(
                        masked_rmse_loss(y_pred[11:12], y_true[11:12]).item())

                #if batch_idx % 100 == 1:
                #    temp = np.maximum(temp * np.exp(-self.ANNEAL_RATE * batch_idx), self.temp_min)
            mean_loss = np.mean(losses)

            if dataset == 'test':
                #mean_mape = np.mean(mapes)
                #mean_rmse = np.mean(rmses)

                # Followed the DCRNN PyTorch Implementation
                message = 'Test: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(
                    np.mean(mean_loss), np.mean(mean_mape), np.mean(mean_rmse))
                self._logger.info(message)

                # Followed the DCRNN TensorFlow Implementation
                message = 'Horizon 15mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(
                    np.mean(l_3), np.mean(m_3), np.mean(r_3))
                self._logger.info(message)
                message = 'Horizon 30mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(
                    np.mean(l_6), np.mean(m_6), np.mean(r_6))
                self._logger.info(message)
                message = 'Horizon 60mins: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(
                    np.mean(l_12), np.mean(m_12), np.mean(r_12))
                self._logger.info(message)

            self._writer.add_scalar('{} loss'.format(dataset), mean_loss,
                                    batches_seen)
            if label == 'without_regularization':
                mean_mape = np.mean(mapes)
                mean_rmse = np.mean(rmses)
                return mean_loss, mean_mape, mean_rmse
            else:
                return mean_loss