def collect(self, batch): """ 收集一 batch 的评估输入 Args: batch(dict): 输入数据,字典类型,包含两个Key:(y_true, y_pred): batch['y_true']: (batch_size, 1) batch['y_pred']: (batch_size, 1) """ if not isinstance(batch, dict): raise TypeError('evaluator.collect input is not a dict of user') y_true = batch['y_true'] # tensor y_pred = batch['y_pred'] # tensor if y_true.shape != y_pred.shape: raise ValueError( "batch['y_true'].shape is not equal to batch['y_pred'].shape") for metric in self.metrics: if metric not in self.intermediate_result: self.intermediate_result[metric] = [] for metric in self.metrics: if metric == 'masked_MAE': self.intermediate_result[metric].append( loss.masked_mae_torch(y_pred, y_true, 0).item()) elif metric == 'masked_MSE': self.intermediate_result[metric].append( loss.masked_mse_torch(y_pred, y_true, 0).item()) elif metric == 'masked_RMSE': self.intermediate_result[metric].append( loss.masked_rmse_torch(y_pred, y_true, 0).item()) elif metric == 'masked_MAPE': self.intermediate_result[metric].append( loss.masked_mape_torch(y_pred, y_true, 0).item()) elif metric == 'MAE': self.intermediate_result[metric].append( loss.masked_mae_torch(y_pred, y_true).item()) elif metric == 'MSE': self.intermediate_result[metric].append( loss.masked_mse_torch(y_pred, y_true).item()) elif metric == 'RMSE': self.intermediate_result[metric].append( loss.masked_rmse_torch(y_pred, y_true).item()) elif metric == 'MAPE': self.intermediate_result[metric].append( loss.masked_mape_torch(y_pred, y_true).item()) elif metric == 'R2': self.intermediate_result[metric].append( loss.r2_score_torch(y_pred, y_true).item()) elif metric == 'EVAR': self.intermediate_result[metric].append( loss.explained_variance_score_torch(y_pred, y_true).item())
def calculate_loss(self, batch): y_true = batch['y'] y_predicted = self.predict(batch) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform( y_predicted[..., :self.output_dim]) return loss.masked_mse_torch(y_predicted, y_true)
def calculate_loss(self, batch): y_true = batch['y'] y_predicted = self.predict(batch) # print('size of y_true:', y_true.shape) # print('size of y_predict:', y_predicted.shape) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform( y_predicted[..., :self.output_dim]) return loss.masked_mse_torch(y_predicted, y_true, 0)
def calculate_loss(self, batch): y_true = batch['y'] y_predicted = self.predict(batch) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform( y_predicted[..., :self.output_dim]) # print('y_true', y_true.shape, y_true.device, y_true.requires_grad) # print('y_predicted', y_predicted.shape, y_predicted.device, y_predicted.requires_grad) res = loss.masked_mse_torch(y_predicted, y_true) return res
def calculate_loss(self, batch): """ Args: batch: dict, need key 'node_features', 'node_labels', 'mask' Returns: """ y_true = batch['node_labels'] y_predicted = self.predict(batch) mask = batch['mask'] return loss.masked_mse_torch(y_predicted[mask], y_true[mask])
def calculate_loss(self, batch): """ Args: batch: dict, need key 'node_features', 'node_labels', 'mask' Returns: """ y_true = batch['node_labels'] # N, feature_dim y_predicted = self.predict(batch) # N, feature_dim y_true = self._scaler.inverse_transform(y_true) y_predicted = self._scaler.inverse_transform(y_predicted) mask = batch['mask'] return loss.masked_mse_torch(y_predicted[mask], y_true[mask])
def calculate_loss(self, batch): y_true = batch['y'] # (B, TO, N, N, 1) y_in_true = torch.sum(y_true, dim=-2, keepdim=True) # (B, TO, N, 1) y_out_true = torch.sum(y_true.permute(0, 1, 3, 2, 4), dim=-2, keepdim=True) # (B, TO, N, 1) y_pred, y_in, y_out = self.predict(batch) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_in_true = self._scaler.inverse_transform( y_in_true[..., :self.output_dim]) y_out_true = self._scaler.inverse_transform( y_out_true[..., :self.output_dim]) y_pred = self._scaler.inverse_transform(y_pred[..., :self.output_dim]) y_in = self._scaler.inverse_transform(y_in[..., :self.output_dim]) y_out = self._scaler.inverse_transform(y_out[..., :self.output_dim]) loss_pred = loss.masked_mse_torch(y_pred, y_true) loss_in = loss.masked_mse_torch(y_in, y_in_true) loss_out = loss.masked_mse_torch(y_out, y_out_true) return self.loss_p0 * loss_pred + self.loss_p1 * loss_in + self.loss_p2 * loss_out
def calculate_loss(self, batch): """ 输入一个batch的数据,返回训练过程这个batch数据的loss,也就是需要定义一个loss函数。 :param batch: 输入数据,类字典,可以按字典的方法取数据 :return: training loss (tensor) """ # 1.取出真值 ground_truth y_true = batch['y'] # 2.取出预测值 y_predicted = self.predict(batch) # 3.使用self._scaler将进行了归一化的真值和预测值进行反向归一化(必须) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim]) # 4.调用loss函数计算真值和预测值的误差 # libcity/model/loss.py中定义了常见的loss函数 # 如果模型源码用到了其中的loss,则可以直接调用,以MSE为例: res = loss.masked_mse_torch(y_predicted, y_true) # 如果模型源码所用的loss函数在loss.py中没有,则需要自己实现loss函数 # ...(自定义loss函数) # 5.返回loss的结果 return res
def calculate_loss(self, batch): if self.train_mode.lower() == 'quick': if self.training: # 训练使用t+1时间步的loss y_true = batch[ 'y'][:, 0:1, :, :] # (batch_size, 1, num_nodes, feature_dim) y_predicted = self.forward( batch) # (batch_size, 1, num_nodes, output_dim) else: # 其他情况使用全部时间步的loss y_true = batch[ 'y'] # (batch_size, output_length, num_nodes, feature_dim) y_predicted = self.predict( batch ) # (batch_size, output_length, num_nodes, output_dim) else: # 'full' y_true = batch[ 'y'] # (batch_size, output_length, num_nodes, feature_dim) y_predicted = self.predict( batch) # (batch_size, output_length, num_nodes, output_dim) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform( y_predicted[..., :self.output_dim]) return loss.masked_mse_torch(y_predicted, y_true)
def collect(self, batch): """ 收集一 batch 的评估输入 Args: batch(dict): 输入数据,字典类型,包含两个Key:(y_true, y_pred): batch['y_true']: (num_samples/batch_size, timeslots, ..., feature_dim) batch['y_pred']: (num_samples/batch_size, timeslots, ..., feature_dim) """ if not isinstance(batch, dict): raise TypeError('evaluator.collect input is not a dict of user') y_true = batch['y_true'] # tensor y_pred = batch['y_pred'] # tensor if y_true.shape != y_pred.shape: raise ValueError( "batch['y_true'].shape is not equal to batch['y_pred'].shape") self.len_timeslots = y_true.shape[1] for i in range(1, self.len_timeslots + 1): for metric in self.metrics: if metric + '@' + str(i) not in self.intermediate_result: self.intermediate_result[metric + '@' + str(i)] = [] if self.mode.lower() == 'average': # 前i个时间步的平均loss for i in range(1, self.len_timeslots + 1): for metric in self.metrics: if metric == 'masked_MAE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i], 0).item()) elif metric == 'masked_MSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i], 0).item()) elif metric == 'masked_RMSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i], 0).item()) elif metric == 'masked_MAPE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i], 0).item()) elif metric == 'MAE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i]).item()) elif metric == 'MSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i]).item()) elif metric == 'RMSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i]).item()) elif metric == 'MAPE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i]).item()) elif metric == 'R2': self.intermediate_result[metric + '@' + str(i)].append( loss.r2_score_torch(y_pred[:, :i], y_true[:, :i]).item()) elif metric == 'EVAR': self.intermediate_result[metric + '@' + str(i)].append( loss.explained_variance_score_torch( y_pred[:, :i], y_true[:, :i]).item()) elif self.mode.lower() == 'single': # 第i个时间步的loss for i in range(1, self.len_timeslots + 1): for metric in self.metrics: if metric == 'masked_MAE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mae_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item()) elif metric == 'masked_MSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mse_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item()) elif metric == 'masked_RMSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_rmse_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item()) elif metric == 'masked_MAPE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mape_torch(y_pred[:, i - 1], y_true[:, i - 1], 0).item()) elif metric == 'MAE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mae_torch(y_pred[:, i - 1], y_true[:, i - 1]).item()) elif metric == 'MSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mse_torch(y_pred[:, i - 1], y_true[:, i - 1]).item()) elif metric == 'RMSE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_rmse_torch(y_pred[:, i - 1], y_true[:, i - 1]).item()) elif metric == 'MAPE': self.intermediate_result[metric + '@' + str(i)].append( loss.masked_mape_torch(y_pred[:, i - 1], y_true[:, i - 1]).item()) elif metric == 'R2': self.intermediate_result[metric + '@' + str(i)].append( loss.r2_score_torch(y_pred[:, i - 1], y_true[:, i - 1]).item()) elif metric == 'EVAR': self.intermediate_result[metric + '@' + str(i)].append( loss.explained_variance_score_torch( y_pred[:, i - 1], y_true[:, i - 1]).item()) else: raise ValueError( 'Error parameter evaluator_mode={}, please set `single` or `average`.' .format(self.mode))
def calculate_loss(self, batch): y_true = batch['y'] # (batch_size, output_length, num_nodes, feature_dim) y_predicted = self.predict(batch) # (batch_size, output_length, num_nodes, output_dim) y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim]) y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim]) return loss.masked_mse_torch(y_predicted, y_true, 0.0)