def loss(self, anchor_objectnesses: Tensor, anchor_transformers: Tensor, gt_anchor_objectnesses: Tensor, gt_anchor_transformers: Tensor, batch_size: int, batch_indices: Tensor) -> Tuple[Tensor, Tensor]: cross_entropies = torch.empty(batch_size, dtype=torch.float, device=anchor_objectnesses.device) # Using smooth_l1_losses so we can avoid outliers to dominant loss function # details: https://zhuanlan.zhihu.com/p/48426076 smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=anchor_transformers.device) for batch_index in range(batch_size): selected_indices = ( batch_indices == batch_index).nonzero().view(-1) cross_entropy = F.cross_entropy( input=anchor_objectnesses[selected_indices], target=gt_anchor_objectnesses[selected_indices]) fg_indices = gt_anchor_objectnesses[selected_indices].nonzero( ).view(-1) smooth_l1_loss = beta_smooth_l1_loss( input=anchor_transformers[selected_indices][fg_indices], target=gt_anchor_transformers[selected_indices][fg_indices], beta=self._anchor_smooth_l1_loss_beta) cross_entropies[batch_index] = cross_entropy smooth_l1_losses[batch_index] = smooth_l1_loss return cross_entropies, smooth_l1_losses
def loss(self, proposal_classes: Tensor, proposal_transformers: Tensor, gt_proposal_classes: Tensor, gt_proposal_transformers: Tensor, batch_size, batch_indices) -> Tuple[Tensor, Tensor]: proposal_transformers = proposal_transformers.view(-1, self.num_classes, 4)[torch.arange(end=len(proposal_transformers), dtype=torch.long), gt_proposal_classes] transformer_normalize_mean = self._transformer_normalize_mean.to(device=gt_proposal_transformers.device) transformer_normalize_std = self._transformer_normalize_std.to(device=gt_proposal_transformers.device) gt_proposal_transformers = (gt_proposal_transformers - transformer_normalize_mean) / transformer_normalize_std # scale up target to make regressor easier to learn cross_entropies = torch.empty(batch_size, dtype=torch.float, device=proposal_classes.device) smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=proposal_transformers.device) for batch_index in range(batch_size): selected_indices = (batch_indices == batch_index).nonzero().view(-1) cross_entropy = F.cross_entropy(input=proposal_classes[selected_indices], target=gt_proposal_classes[selected_indices]) fg_indices = gt_proposal_classes[selected_indices].nonzero().view(-1) smooth_l1_loss = beta_smooth_l1_loss(input=proposal_transformers[selected_indices][fg_indices], target=gt_proposal_transformers[selected_indices][fg_indices], beta=self._proposal_smooth_l1_loss_beta) cross_entropies[batch_index] = cross_entropy smooth_l1_losses[batch_index] = smooth_l1_loss return cross_entropies, smooth_l1_losses
def loss(self, anchor_objectnesses: Tensor, anchor_transformers: Tensor, gt_anchor_objectnesses: Tensor, gt_anchor_transformers: Tensor, batch_size: int, batch_indices: Tensor): # -> Tuple[Tensor, Tensor]: cross_entropies = torch.empty(batch_size, dtype=torch.float, device=anchor_objectnesses.device) smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=anchor_transformers.device) for batch_index in range(batch_size): selected_indices = ( batch_indices == batch_index).nonzero().view(-1) #This part cross_entropy = F.cross_entropy( input=anchor_objectnesses[selected_indices], target=gt_anchor_objectnesses[selected_indices]) fg_indices = gt_anchor_objectnesses[selected_indices].nonzero( ).view(-1) smooth_l1_loss = beta_smooth_l1_loss( input=anchor_transformers[selected_indices][fg_indices], target=gt_anchor_transformers[selected_indices][fg_indices], beta=self._anchor_smooth_l1_loss_beta) cross_entropies[batch_index] = cross_entropy smooth_l1_losses[batch_index] = smooth_l1_loss return cross_entropies, smooth_l1_losses
def loss(self, proposal_vertices: Tensor, proposal_classes: Tensor, proposal_transformers: Tensor, gt_proposal_classes: Tensor, gt_proposal_transformers: Tensor, gt_vertices: Tensor, batch_size, batch_indices) -> Tuple[Tensor, Tensor]: #proposal_transformers = proposal_transformers.view(-1, 2, 4)[torch.arange(end=len(proposal_transformers), dtype=torch.long), gt_proposal_classes] transformer_normalize_mean = self._transformer_normalize_mean.to( device=gt_proposal_transformers.device) transformer_normalize_std = self._transformer_normalize_std.to( device=gt_proposal_transformers.device) gt_proposal_transformers = ( gt_proposal_transformers - transformer_normalize_mean ) / transformer_normalize_std # scale up target to make regressor easier to learn cross_entropies = torch.empty(batch_size, dtype=torch.float, device=proposal_classes.device) smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=proposal_transformers.device) vertex_losses = torch.empty(batch_size, dtype=torch.float, device=proposal_vertices.device) bceloss = nn.BCELoss() sigmoid = nn.Sigmoid() for batch_index in range(batch_size): selected_indices = ( batch_indices == batch_index).nonzero().view(-1) # print(proposal_classes) # print(sigmoid(proposal_classes[selected_indices]).squeeze(1)) # print(gt_proposal_classes[selected_indices].float()) cross_entropy = bceloss( sigmoid(proposal_classes[selected_indices]).squeeze(1), gt_proposal_classes[selected_indices].float()) fg_indices = gt_proposal_classes[selected_indices].nonzero( ).view(-1) #print(proposal_transformers[selected_indices][fg_indices]) corner_l1_loss = beta_smooth_l1_loss( input=proposal_transformers[selected_indices][fg_indices], target=gt_proposal_transformers[selected_indices] [fg_indices], beta=self._proposal_smooth_l1_loss_beta) vertex_loss = beta_smooth_l1_loss( input=proposal_vertices[selected_indices][fg_indices], target=gt_vertices[selected_indices][fg_indices].view( -1, 16), beta=self._proposal_smooth_l1_loss_beta) cross_entropies[batch_index] = cross_entropy smooth_l1_losses[batch_index] = corner_l1_loss vertex_losses[batch_index] = vertex_loss #print('losses', cross_entropies, smooth_l1_losses, vertex_losses) return cross_entropies, smooth_l1_losses, vertex_losses