def loss_fn(self, out, annot): tar_vector = Losses.get_tar_vector(annot) loss_loc = Losses.get_loc_error(out, tar_vector) loss_wh = Losses.get_w_h_error(out, tar_vector) loss_conf = Losses.get_confidence_error(out, tar_vector) loss_cls = Losses.get_class_error(out, tar_vector) return loss_loc, loss_wh, loss_conf, loss_cls
for x in range(cell_num): some_predictions_class_prob_notexist[0, y, x, :] = torch.ones((30)) unittest.TestCase().assertAlmostEqual( 0.5 (noobject_coef * 20 * cell_num * cell_num), Losses.get_conditional_class_prob_notexist( some_predictions_class_prob_notexist, some_targets_class_prob_notexist).cpu().detach().numpy()[0], 2) #0.5(noobject_coef*20objects*7*7) == 490 target_for_wh = { (3, 3): [[0, [torch.Tensor([0.36]).cuda(), torch.Tensor([0.64]).cuda()], [0, 0]]] } predictions_wh = torch.zeros((1, cell_num, cell_num, 30)) for y in range(cell_num): for x in range(cell_num): if y == 3 and x == 3: predictions_wh[0, y, x, :] = torch.Tensor([ 0, 0, 0.04, 0.09, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) else: predictions_wh[0, y, x, :] = torch.zeros((30)) #0.5*((0.6-0.2)^2 + (0.8-0.3)^2 ) = 0.5(0.16+0.25) = 0.5*0.41 = 0.205 unittest.TestCase().assertAlmostEqual( 0.205, Losses.get_w_h_error(predictions_wh, target_for_wh).cpu().detach().numpy()[0], 3)