def edge_loss(target, output): gt_pred = SpatialGradient()(output) assert(gt_pred.ndim == 5) assert(gt_pred.shape[2] == 2) dy_pred = gt_pred[:,:,0,:,:] dx_pred = gt_pred[:,:,1,:,:] gt_true = SpatialGradient()(target) dy_true = gt_true[:,:,0,:,:] dx_true = gt_true[:,:,1,:,:] l_edge = torch.mean(torch.abs(dy_pred - dy_true) + torch.abs(dx_pred - dx_true)) return l_edge
def image_and_light_loss(image_relit, image_gt, light_prediction, light_gt): img_l1 = torch.sum(torch.abs(image_relit - image_gt)) image_relit_grad = SpatialGradient()(image_relit) image_gt_grad = SpatialGradient()(image_gt) grad_l1 = torch.sum(torch.abs(image_relit_grad - image_gt_grad)) light_l2 = torch.sum((light_prediction - light_gt)**2) N = image_relit.shape[2] loss = ((img_l1 + grad_l1) / (N * N)) + (light_l2 / 9) return loss
def L1(I_t, I_tp, L_s, L_sp): img_l1 = torch.sum(torch.abs(I_t - I_tp)) I_t_grad = SpatialGradient()(I_t) I_tp_grad = SpatialGradient()(I_tp) grad_l1 = torch.sum(torch.abs(I_t_grad - I_tp_grad)) light_l2 = torch.sum((L_s - L_sp) ** 2) loss = ((img_l1 + grad_l1) / (128 * 128)) + (light_l2 / 9) return loss
def __init__(self, patch_size: int = 19, eps: float = 1e-10): super(PatchAffineShapeEstimator, self).__init__() self.patch_size: int = patch_size self.gradient: nn.Module = SpatialGradient('sobel', 1) self.eps: float = eps sigma: float = float(self.patch_size) / math.sqrt(2.0) self.weighting: torch.Tensor = get_gaussian_kernel2d((self.patch_size, self.patch_size), (sigma, sigma), True)
def __init__(self, r=3, tau=0.1): super(StealNMSLoss, self).__init__() self.grad2d = SpatialGradient() self.nms_loss = 0 self.r = r self.tau = tau self.eps = 1e-7 self.filter_dict = get_filter_dict(r)
def L1_alternate(I_t, I_tp, L_s, L_sp): num_images = I_t.size()[0] img_norm = torch.norm((I_t - I_tp), p=1, dim=3) # computs l1 norm accross the columns img_norm = torch.max(img_norm) / num_images I_t_grad = SpatialGradient()(I_t) I_tp_grad = SpatialGradient()(I_tp) grad_norm = torch.norm((I_t_grad - I_tp_grad), p=1, dim=4) # computs l1 norm accross the columns grad_norm = torch.max(grad_norm) / num_images image_loss = img_norm + grad_norm light_loss = torch.mean((L_s - L_sp) ** 2) loss = image_loss + light_loss return loss
def __init__(self, patch_size: int = 32, num_angular_bins: int = 36, eps: float = 1e-8): super().__init__() self.patch_size = patch_size self.num_ang_bins = num_angular_bins self.gradient = SpatialGradient('sobel', 1) self.eps = eps self.angular_smooth = nn.Conv1d(1, 1, kernel_size=3, padding=1, bias=False, padding_mode="circular") with torch.no_grad(): self.angular_smooth.weight[:] = torch.tensor([[[0.33, 0.34, 0.33]]]) sigma: float = float(self.patch_size) / math.sqrt(2.0) self.weighting = get_gaussian_kernel2d((self.patch_size, self.patch_size), (sigma, sigma), True)
def __init__(self) -> None: super().__init__() self.eps = 1e-8 self.grad = SpatialGradient(mode='diff', order=1, normalized=False)
def __init__(self): super().__init__() self.spatial_gradient = SpatialGradient('sobel', 1)
def __init__(self): super(EdgeDetect, self).__init__() self.spatial = SpatialGradient('diff') self.max_pool = nn.MaxPool2d(3, 1, 1)