Esempio n. 1
0
    def forward(self, input, target, mask=None):

        repeat_channels = target.shape[1]

        sobel_x = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])

        sobel_x = sobel_x.view((1, 1, 3, 3))
        sobel_x = torch.autograd.Variable(sobel_x.cuda())

        sobel_y = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])

        sobel_y = sobel_y.view((1, 1, 3, 3))
        sobel_y = torch.autograd.Variable(sobel_y.cuda())
        if repeat_channels != 1:
            sobel_x = sobel_x.repeat(1, repeat_channels, 1, 1)
            sobel_y = sobel_y.repeat(1, repeat_channels, 1, 1)

        smooth_loss = 0
        grad_loss = 0

        if self.smooth_error:
            diff = thLog(input.clamp(min=self.clamp_value)) - thLog(
                target.clamp(min=self.clamp_value))

            gx_diff = F.conv2d(diff, (1.0 / 8.0) * sobel_x, padding=1)
            gy_diff = F.conv2d(diff, (1.0 / 8.0) * sobel_y, padding=1)

            gradients_diff = torch.pow(gx_diff, 2) + torch.pow(gy_diff, 2)

            if mask is None:
                smooth_loss = gradients_diff.sum()
                if self.size_average:
                    smooth_loss = smooth_loss * (1.0 / gradients_diff.numel())
            else:
                gradients_diff = mul(gradients_diff, mask.repeat(1, 3, 1, 1))
                smooth_loss = gradients_diff.sum()
                if self.size_average:
                    smooth_loss = smooth_loss * (1.0 / mask.sum())

        if self.gradient_loss_on:

            input = thLog(input.clamp(min=self.clamp_value))
            target = thLog(target.clamp(min=self.clamp_value))

            gx_input = F.conv2d(input, (1.0 / 8.0) * sobel_x, padding=1)
            gy_input = F.conv2d(input, (1.0 / 8.0) * sobel_y, padding=1)

            gx_target = F.conv2d(target, (1.0 / 8.0) * sobel_x, padding=1)
            gy_target = F.conv2d(target, (1.0 / 8.0) * sobel_y, padding=1)

            gradients_input = torch.pow(gx_input, 2) + torch.pow(gy_input, 2)
            gradients_target = torch.pow(gx_target, 2) + torch.pow(
                gy_target, 2)

            grad_loss = self.masked_huber_loss(gradients_input,
                                               gradients_target, mask)

        return smooth_loss + grad_loss
Esempio n. 2
0
    def forward(self, normals, depth, boundary):
        repeat_channels = depth.shape[1]

        sobel_x = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])

        sobel_x = sobel_x.view((1, 1, 3, 3))
        sobel_x = torch.autograd.Variable(sobel_x.cuda())

        sobel_y = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])

        sobel_y = sobel_y.view((1, 1, 3, 3))
        sobel_y = torch.autograd.Variable(sobel_y.cuda())
        if repeat_channels != 1:
            sobel_x = sobel_x.repeat(1, repeat_channels, 1, 1)
            sobel_y = sobel_y.repeat(1, repeat_channels, 1, 1)

        gx_depth = F.conv2d(depth, (1.0 / 8.0) * sobel_x, padding=1)
        gy_depth = F.conv2d(depth, (1.0 / 8.0) * sobel_y, padding=1)

        g_depth = torch.cat((gx_depth, gy_depth), 1)
        g_depth = F.normalize(g_depth, p=2, dim=1)

        normal2d = normals[:, :2, ...]
        normal2d = F.normalize(normal2d, p=2, dim=1)

        prod = mul(g_depth, normal2d)
        prod = prod.sum(1).unsqueeze(1)

        prod = (1.0 - prod).clamp(min=0)

        prod = torch.abs(
            mul(prod, (-1.0) * thLog(boundary.clamp(min=self.clamp_value))))

        return prod.mean()
Esempio n. 3
0
    def forward(self, depth, boundary, mask=None):
        repeat_channels = depth.shape[1]

        sobel_x = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])

        sobel_x = sobel_x.view((1, 1, 3, 3))
        sobel_x = torch.autograd.Variable(sobel_x.cuda())

        sobel_y = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])

        sobel_y = sobel_y.view((1, 1, 3, 3))
        sobel_y = torch.autograd.Variable(sobel_y.cuda())

        if repeat_channels != 1:
            sobel_x = sobel_x.repeat(1, repeat_channels, 1, 1)
            sobel_y = sobel_y.repeat(1, repeat_channels, 1, 1)

        gx = F.conv2d(depth, (1.0 / 8.0) * sobel_x, padding=1)
        gy = F.conv2d(depth, (1.0 / 8.0) * sobel_y, padding=1)

        g_depth = torch.pow(gx, 2) + torch.pow(gy, 2)
        loss = torch.abs(
            mul(g_depth, thLog(boundary.clamp(min=self.clamp_value))))

        if mask is None:
            return loss.sum() / (float(depth.numel()))
        else:
            loss = mul(loss, mask)
            return loss.sum() / float(mask.sum())
Esempio n. 4
0
    def forward(self, input, target, mask):
        if self.use_log:
            n = thLog(input.clamp(min=self.clamp_val)) - thLog(
                target.clamp(min=self.clamp_val))
        else:
            n = input - target

        n = torch.abs(n)
        n = mul(n, mask)

        n = n.squeeze(1)
        c = 0.2 * n.max()
        cond = n < c
        loss = torch.where(cond, n, (n**2 + c**2) / (2 * c + 1e-9))

        loss = loss.sum()

        if self.size_average:
            return loss / mask.sum()

        return loss
Esempio n. 5
0
    def forward(self, b_pred, b_gt):
        N = b_gt.shape[0]
        b_pred = b_pred.view(-1, 1)
        b_gt = b_gt.view(-1, 1)
        b_gt = b_gt.float()

        sm = b_gt.sum()
        sz = b_gt.size()[0]

        self.alpha = 1.0 - sm.float() / float(sz)
        alfa = self.alpha * b_gt + (1.0 - self.alpha) * (1.0 - b_gt)

        pt = mul(b_gt, b_pred) + mul(1.0 - b_gt, 1.0 - b_pred)

        clamp_val = 1e-7  # to avoid exploding gradients when taking torch.log

        pt = pt.clamp(min=clamp_val, max=1.0 - clamp_val)
        logpt = thLog(pt)
        power_pt = (1.0 - pt)**self.gamma
        power_pt = power_pt * self.beta * logpt
        loss = -1.0 * alfa * power_pt
        loss = loss.sum()

        return (1.0 / N) * loss