def forward_loss(self, out_vars, target_var, mask_var):
        if self.output_strat == 'dsnt' or self.output_strat == 'fc':
            total_loss = 0

            # Calculate the loss for each hourglass output, and take the total sum
            for i, out_var in enumerate(out_vars):
                loss = euclidean_loss(out_var, target_var, mask_var)

                reg_loss = self._calculate_reg_loss(
                    target_var, mask_var, self.reg, self.heatmaps_array[i], self.hm_sigma)

                total_loss += loss + self.reg_coeff * reg_loss

            return total_loss
        elif self.output_strat == 'gauss':
            norm_coords = target_var.data.cpu()
            width = out_vars[0].size(-1)
            height = out_vars[0].size(-2)

            target_hm = util.encode_heatmaps(norm_coords, width, height, self.hm_sigma)
            target_hm_var = Variable(target_hm.cuda())

            # Calculate and sum up intermediate losses
            loss = sum([nn.functional.mse_loss(hm, target_hm_var) for hm in out_vars])

            return loss

        raise Exception('invalid configuration')
    def forward_loss(self, out_var, target_var, mask_var):
        if self.output_strat == 'dsnt' or self.output_strat == 'fc':
            loss = euclidean_loss(out_var, target_var, mask_var)

            reg_loss = self._calculate_reg_loss(
                target_var, mask_var, self.reg, self.heatmaps, self.hm_sigma)

            return loss + self.reg_coeff * reg_loss
        elif self.output_strat == 'fc':
            return euclidean_loss(out_var, target_var, mask_var)
        elif self.output_strat == 'gauss':
            norm_coords = target_var.data.cpu()
            width = out_var.size(-1)
            height = out_var.size(-2)

            target_hm = util.encode_heatmaps(norm_coords, width, height, self.hm_sigma)
            target_hm_var = Variable(target_hm.cuda())

            loss = nn.functional.mse_loss(out_var, target_hm_var)
            return loss

        raise Exception('invalid configuration')
Exemple #3
0
    def test_mask(self):
        output = torch.Tensor([
            [[0, 0], [1, 1], [0, 0]],
            [[1, 1], [0, 0], [0, 0]],
        ])

        target = torch.Tensor([
            [[0, 0], [0, 0], [0, 0]],
            [[0, 0], [0, 0], [0, 0]],
        ])

        mask = torch.Tensor([
            [1, 0, 1],
            [0, 1, 1],
        ])

        expected = torch.Tensor([0])
        actual = euclidean_loss(Variable(output), Variable(target), Variable(mask))

        self.assertEqual(expected, actual.data)
Exemple #4
0
    def test_forward_and_backward(self):
        input_tensor = torch.Tensor([
            [[3, 4], [3, 4]],
            [[3, 4], [3, 4]],
        ])

        target = torch.Tensor([
            [[0, 0], [0, 0]],
            [[0, 0], [0, 0]],
        ])

        in_var = Variable(input_tensor, requires_grad=True)

        expected_loss = torch.Tensor([5])
        actual_loss = euclidean_loss(in_var, Variable(target))
        expected_grad = torch.Tensor([
            [[0.15, 0.20], [0.15, 0.20]],
            [[0.15, 0.20], [0.15, 0.20]],
        ])
        actual_loss.backward()
        actual_grad = in_var.grad.data

        self.assertEqual(expected_loss, actual_loss.data)
        self.assertEqual(expected_grad, actual_grad)