def calculate_loss(self, batch, idx=None, batches_seen=None):
     if idx is not None:
         idx = torch.tensor(idx).to(self.device)
         tx = batch['X'][:, :, idx, :].clone()  # 避免batch[X]被修改 下一次idx索引就不对了
         y_true = batch['y'][:, :, idx, :]
         batch_new = {'X': tx}
         y_predicted = self.predict(batch_new, idx)
     else:
         y_true = batch['y']
         y_predicted = self.predict(batch)
     # print('y_true', y_true.shape)
     # print('y_predicted', y_predicted.shape)
     y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
     y_predicted = self._scaler.inverse_transform(
         y_predicted[..., :self.output_dim])
     if self.training:
         if batches_seen % self.step_size == 0 and self.task_level < self.output_window:
             self.task_level += 1
             self._logger.info(
                 'Training: task_level increase from {} to {}'.format(
                     self.task_level - 1, self.task_level))
             self._logger.info(
                 'Current batches_seen is {}'.format(batches_seen))
         if self.use_curriculum_learning:
             return loss.masked_mae_torch(
                 y_predicted[:, :self.task_level, :, :],
                 y_true[:, :self.task_level, :, :], 0)
         else:
             return loss.masked_mae_torch(y_predicted, y_true, 0)
     else:
         return loss.masked_mae_torch(y_predicted, y_true, 0)
Example #2
0
 def calculate_loss(self, batch):
     y_true = batch['y']
     y_predicted = self.predict(batch)
     y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
     y_predicted = self._scaler.inverse_transform(
         y_predicted[..., :self.output_dim])
     return loss.masked_mae_torch(y_predicted, y_true)
Example #3
0
 def calculate_loss(self, batch):
     y_true = batch['y']  # ground-truth value
     y_predicted = self.predict(batch)  # prediction results
     # denormalization the value
     y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
     y_predicted = self._scaler.inverse_transform(
         y_predicted[..., :self.output_dim])
     # call the mask_mae loss function defined in `loss.py`
     return loss.masked_mae_torch(y_predicted, y_true, 0)
Example #4
0
 def calculate_loss(self, batch):
     y_true = batch['y']  # ground-truth value
     y_predicted = self.predict(batch)  # prediction results
     # print('y_true', y_true.shape)
     # print('y_predicted', y_predicted.shape)
     y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
     y_predicted = self._scaler.inverse_transform(
         y_predicted[..., :self.output_dim])
     return loss.masked_mae_torch(y_predicted, y_true)
Example #5
0
    def calculate_loss(self, batch):
        y_true = batch['y'].to(self.device)

        output = self.predict(batch)
        y_predicted = output

        y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
        y_predicted = self._scaler.inverse_transform(
            y_predicted[..., :self.output_dim])
        res = loss.masked_mae_torch(y_predicted, y_true, 0)

        return res
 def calculate_loss(self, batch, batches_seen=None):
     y_true = batch['y']
     epoch = batches_seen // self.num_batches
     self._logger.debug(f"EPOCH = {epoch}, bep={batches_seen}")
     y_predicted, mid_output = self.forward(batch, batches_seen)
     y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
     y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
     # 根据训练轮数,选择性地加入正则项
     loss_1 = loss.masked_mae_torch(y_predicted, y_true)
     if epoch < self.epoch_use_regularization:
         pred = torch.sigmoid(mid_output.view(mid_output.shape[0] * mid_output.shape[1]))
         # print(f"shape = {mid_output.shape}")
         # print(f"aview = {self.adj_mx.view(mid_output.shape[0] * mid_output.shape[1])}")
         true_label = self.adj_mx.view(mid_output.shape[0] * mid_output.shape[1]).to(self.device)
         compute_loss = torch.nn.BCELoss()
         loss_g = compute_loss(pred, true_label)
         self._logger.debug(f"loss_g = {loss_g}, loss_1 = {loss_1}")
         loss_t = loss_1 + loss_g
         return loss_t
     else:
         self._logger.debug(f"loss_1 = {loss_1}")
         return loss_1
Example #7
0
    def collect(self, batch):
        """
        收集一 batch 的评估输入

        Args:
            batch(dict): 输入数据,字典类型,包含两个Key:(y_true, y_pred):
                batch['y_true']: (num_samples/batch_size, timeslots, ..., feature_dim)
                batch['y_pred']: (num_samples/batch_size, timeslots, ..., feature_dim)
        """
        if not isinstance(batch, dict):
            raise TypeError('evaluator.collect input is not a dict of user')
        y_true = batch['y_true']  # tensor
        y_pred = batch['y_pred']  # tensor
        if y_true.shape != y_pred.shape:
            raise ValueError("batch['y_true'].shape is not equal to batch['y_pred'].shape")
        self.len_timeslots = y_true.shape[1]
        for i in range(1, self.len_timeslots+1):
            for metric in self.metrics:
                if metric+'@'+str(i) not in self.intermediate_result:
                    self.intermediate_result[metric+'@'+str(i)] = []
        if self.mode.lower() == 'average':  # 前i个时间步的平均loss
            for i in range(1, self.len_timeslots+1):
                for metric in self.metrics:
                    if metric == 'masked_MAE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i], 0).item())
                    elif metric == 'masked_MSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i], 0).item())
                    elif metric == 'masked_RMSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i], 0).item())
                    elif metric == 'masked_MAPE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i], 0).item())
                    elif metric == 'MAE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i]).item())
                    elif metric == 'MSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i]).item())
                    elif metric == 'RMSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i]).item())
                    elif metric == 'MAPE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i]).item())
                    elif metric == 'R2':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.r2_score_torch(y_pred[:, :i], y_true[:, :i]).item())
                    elif metric == 'EVAR':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.explained_variance_score_torch(y_pred[:, :i], y_true[:, :i]).item())
        elif self.mode.lower() == 'single':  # 第i个时间步的loss
            for i in range(1, self.len_timeslots + 1):
                for metric in self.metrics:
                    if metric == 'masked_MAE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())
                    elif metric == 'masked_MSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())
                    elif metric == 'masked_RMSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())
                    elif metric == 'masked_MAPE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())
                    elif metric == 'MAE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1]).item())
                    elif metric == 'MSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1]).item())
                    elif metric == 'RMSE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1]).item())
                    elif metric == 'MAPE':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1]).item())
                    elif metric == 'R2':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.r2_score_torch(y_pred[:, i-1], y_true[:, i-1]).item())
                    elif metric == 'EVAR':
                        self.intermediate_result[metric + '@' + str(i)].append(
                            loss.explained_variance_score_torch(y_pred[:, i-1], y_true[:, i-1]).item())
        else:
            raise ValueError('Error parameter evaluator_mode={}, please set `single` or `average`.'.format(self.mode))