Exemplo n.º 1
0
def canny_detector(image, low, high, sigma=1):
    # Step 1: Smooth image with Gaussian to remove any noise
    gauss = filters.gauss1D(sigma)[np.newaxis]
    im = utils.convolve2d(image, gauss)
    im = utils.convolve2d(im, gauss.T)

    # Step 2: Get directional derivatives of the smoothed image
    derivative = np.array([[-1, 1]])
    im_x = canny_convolution(im, derivative)
    im_y = canny_convolution(im, derivative.T)

    # Step 3: Create array consisting of gradient direction/magnitude
    width, height = image.shape
    gradient = np.empty((width, height, 4), dtype=np.float64)

    gradient[:, :, 0] = im_x
    gradient[:, :, 1] = im_y
    gradient[:, :, 2] = gradient_magnitude(im_x, im_y)
    gradient[:, :, 3] = gradient_direction(im_x, im_y)

    # Step 4: Non-Max Suppression
    suppressed = non_max_suppression(gradient)
    # sup_2 = suppressed[:,:,2].astype('uint8')
    # sup_2 = sup_2 * int(255 / np.max(sup_2) - 1)

    # Step 5: Thresholding
    thresholded = thresholds(suppressed, low, high)

    # Step 6: Hysteresis
    final = canny_hysteresis(thresholded)
    return final
Exemplo n.º 2
0
    def grad_P(self, L, P):
        # compute
        utils.convolve2d(L, P ,output=self._J)
        utils.dx(self._J, self._dxJ)
        utils.dy(self._J,self._dyJ)
        utils.dx(L, self._dxL)
        utils.dy(L, self._dyL)
        #~ 
        #~ utils.dx_b(self._dxL, self._dxxL)
        #~ utils.dy_b(self._dyL, self._dyyL)
        #~ utils.dx_b(self._dyL, self._dxyL)
        #~ 
        #~ utils.dx_b(self._dxJ, self._dxxJ)
        #~ utils.dy_b(self._dyJ, self._dyyJ)
        #~ utils.dx_b(self._dyJ, self._dxyJ)
        
        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        #~ dxxR = self._dxxJ - self._dxxI0
        #~ dyyR = self._dyyJ - self._dyyI0
        #~ dxyR = self._dxyJ - self._dxyI0

        dP = self.w0 * utils.grad_P(P.shape, L, R)
        dP += self.w1 * utils.grad_P(P.shape, self._dxL, dxR)
        dP += self.w1 * utils.grad_P(P.shape, self._dyL, dyR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dxxL, dxxR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dyyL, dyyR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dxyL, dxyR)
        
        #~ if self._simplex_normal is None:
            #~ self._simplex_normal = np.ones(P.shape)/np.sqrt(P.size)
        #~ dPnorm = np.dot(dP.flatten(), self._simplex_normal.flatten()) * self._simplex_normal
        dP -= dP.mean()
        return dP/self.I0.size
Exemplo n.º 3
0
    def __call__(self, L, P, psf_only=False):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, output=self._dxJ)
        utils.dy(self._J, output=self._dyJ)
        utils.dx(L, output=self._dxL)
        utils.dy(L, output=self._dyL)
        utils.dx_b(self._dxJ, output=self._dxxJ)
        utils.dy_b(self._dyJ, output=self._dyyJ)
        utils.dx_b(self._dyJ, output=self._dxyJ)
        # enegery for data compatibility

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0

        E = self.w0 * utils.norm2(R)
        E += self.w1 * utils.norm2(dxR)
        E += self.w1 * utils.norm2(dyR)
        #~ E += self.w2 * utils.norm2(dxxR)
        #~ E += self.w2 * utils.norm2(dyyR)
        #~ E += self.w2 * utils.norm2(dxyR)

        if not psf_only:
            # energy for global prior
            E += self.lambda1 * utils.global_prior(self._dxL, self.a, self.b)
            E += self.lambda1 * utils.global_prior(self._dyL, self.a, self.b)
            # energy for local prior
            E += self.lambda2 * utils.local_prior(self._dxL, self._dxI0, self.M)
            E += self.lambda2 * utils.local_prior(self._dyL, self._dyI0, self.M)

        return E/self.I0.size
Exemplo n.º 4
0
    def grad_P(self, L, P):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, self._dxJ)
        utils.dy(self._J, self._dyJ)
        utils.dx(L, self._dxL)
        utils.dy(L, self._dyL)
        #~
        #~ utils.dx_b(self._dxL, self._dxxL)
        #~ utils.dy_b(self._dyL, self._dyyL)
        #~ utils.dx_b(self._dyL, self._dxyL)
        #~
        #~ utils.dx_b(self._dxJ, self._dxxJ)
        #~ utils.dy_b(self._dyJ, self._dyyJ)
        #~ utils.dx_b(self._dyJ, self._dxyJ)

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        #~ dxxR = self._dxxJ - self._dxxI0
        #~ dyyR = self._dyyJ - self._dyyI0
        #~ dxyR = self._dxyJ - self._dxyI0

        dP = self.w0 * utils.grad_P(P.shape, L, R)
        dP += self.w1 * utils.grad_P(P.shape, self._dxL, dxR)
        dP += self.w1 * utils.grad_P(P.shape, self._dyL, dyR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dxxL, dxxR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dyyL, dyyR)
        #~ dP += self.w2 * utils.grad_P(P.shape, self._dxyL, dxyR)

        #~ if self._simplex_normal is None:
        #~ self._simplex_normal = np.ones(P.shape)/np.sqrt(P.size)
        #~ dPnorm = np.dot(dP.flatten(), self._simplex_normal.flatten()) * self._simplex_normal
        dP -= dP.mean()
        return dP / self.I0.size
Exemplo n.º 5
0
def central_difference(image):

    central_y = filters.central_y()
    central_x = filters.central_x()

    x = utils.convolve2d(image, central_y)
    y = utils.convolve2d(image, central_x)
    return utils.combine_arrays(x, y)
Exemplo n.º 6
0
def marr_hildreth_detector(image, log, gaussian=0):
    if gaussian:
        gauss = filters.gauss1D(2)
        gauss = gauss[np.newaxis]
        image = utils.convolve2d(image, gauss)
        image = utils.convolve2d(image, gauss.T)

    edges = marr_hildreth_convolution(image, log)

    return edges
Exemplo n.º 7
0
 def M(self):
     if self._M is None:
         if self._P is None:
             raise ValueError("The kernel has not been set")
         var_k = np.ones(self._P.shape)/self._P.size
         I0_m = utils.convolve2d(self.I0, var_k)
         I02_m = utils.convolve2d(self.I0**2, var_k)
         var = (I02_m - I0_m**2)
         if var.ndim == 3:
             var = I02_m.mean(-1)
         self._M = (var < self.t*self.t).astype("uint8")
         print "smooth parts", self._M.mean()
     return self._M
Exemplo n.º 8
0
def sobel_detector(image, sigma=1):
    # Step 1: Smooth the image with a Gaussian
    gauss = filters.gauss1D(sigma)
    gauss = gauss[np.newaxis]

    # Doing two 1D gaussian filters saves time over doing one 2D filter (2n vs n^2)
    im = utils.convolve2d(image, gauss)
    im = utils.convolve2d(im, gauss.T)

    # Step 2: Apply the Sobel Edge detector
    im = central_difference(im)

    return im
Exemplo n.º 9
0
 def M(self):
     if self._M is None:
         if self._P is None:
             raise ValueError("The kernel has not been set")
         var_k = np.ones(self._P.shape) / self._P.size
         I0_m = utils.convolve2d(self.I0, var_k)
         I02_m = utils.convolve2d(self.I0**2, var_k)
         var = (I02_m - I0_m**2)
         if var.ndim == 3:
             var = I02_m.mean(-1)
         self._M = (var < self.t * self.t).astype("uint8")
         print "smooth parts", self._M.mean()
     return self._M
Exemplo n.º 10
0
    def grad_L(self, L, P):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, self._dxJ)
        utils.dy(self._J, self._dyJ)

        utils.dx(L, self._dxL)
        utils.dy(L, self._dyL)

        utils.dx_b(self._dxJ, self._dxxJ)
        utils.dy_b(self._dyJ, self._dyyJ)
        utils.dx_b(self._dyJ, self._dxyJ)

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0
        # enegery for data compatibility

        dxP = utils.dx(P)
        dyP = utils.dy(P)
        dxxP = utils.dx_b(dxP)
        dyyP = utils.dy_b(dyP)
        dxyP = utils.dx_b(dyP)

        dL = np.zeros(L.shape)

        dL += self.w0 * utils.grad_L(P, R)
        dL += self.w1 * utils.grad_L(dxP, dxR)
        dL += self.w1 * utils.grad_L(dyP, dyR)
        #~ dL += self.w2 * utils.grad_L(dxxP, dxxR)
        #~ dL += self.w2 * utils.grad_L(dyyP, dyyR)
        #~ dL += self.w2 * utils.grad_L(dxyP, dxyR)

        dL += self.lambda1 * utils.grad_global_prior_x(self._dxL, self.a,
                                                       self.b)
        dL += self.lambda1 * utils.grad_global_prior_y(self._dyL, self.a,
                                                       self.b)

        dL += self.lambda2 * utils.grad_local_prior_x(self._dxL, self._dxI0,
                                                      self.M)
        dL += self.lambda2 * utils.grad_local_prior_y(self._dyL, self._dyI0,
                                                      self.M)

        return (dL - dL.mean()) / self.I0.size
Exemplo n.º 11
0
    def grad_L(self, L, P):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, self._dxJ)
        utils.dy(self._J,self._dyJ)
        
        utils.dx(L, self._dxL)
        utils.dy(L, self._dyL)
        
        utils.dx_b(self._dxJ, self._dxxJ)
        utils.dy_b(self._dyJ, self._dyyJ)
        utils.dx_b(self._dyJ, self._dxyJ)

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0
        # enegery for data compatibility

        
        dxP = utils.dx(P)
        dyP = utils.dy(P)
        dxxP = utils.dx_b(dxP)
        dyyP = utils.dy_b(dyP)
        dxyP = utils.dx_b(dyP)

        dL = np.zeros(L.shape)
        
        dL += self.w0 * utils.grad_L(P, R)
        dL += self.w1 * utils.grad_L(dxP, dxR)
        dL += self.w1 * utils.grad_L(dyP, dyR)
        #~ dL += self.w2 * utils.grad_L(dxxP, dxxR)
        #~ dL += self.w2 * utils.grad_L(dyyP, dyyR)
        #~ dL += self.w2 * utils.grad_L(dxyP, dxyR)

        dL += self.lambda1 * utils.grad_global_prior_x(self._dxL, self.a, self.b)
        dL += self.lambda1 * utils.grad_global_prior_y(self._dyL, self.a, self.b)

        dL += self.lambda2 * utils.grad_local_prior_x(self._dxL, self._dxI0, self.M)
        dL += self.lambda2 * utils.grad_local_prior_y(self._dyL, self._dyI0, self.M)
        
        return (dL - dL.mean()) /self.I0.size 
Exemplo n.º 12
0
    def __call__(self, L, P, psf_only=False):
        # compute
        utils.convolve2d(L, P, output=self._J)
        utils.dx(self._J, output=self._dxJ)
        utils.dy(self._J, output=self._dyJ)
        utils.dx(L, output=self._dxL)
        utils.dy(L, output=self._dyL)
        utils.dx_b(self._dxJ, output=self._dxxJ)
        utils.dy_b(self._dyJ, output=self._dyyJ)
        utils.dx_b(self._dyJ, output=self._dxyJ)
        # enegery for data compatibility

        R = self._J - self.I0
        dxR = self._dxJ - self._dxI0
        dyR = self._dyJ - self._dyI0
        dxxR = self._dxxJ - self._dxxI0
        dyyR = self._dyyJ - self._dyyI0
        dxyR = self._dxyJ - self._dxyI0

        E = self.w0 * utils.norm2(R)
        E += self.w1 * utils.norm2(dxR)
        E += self.w1 * utils.norm2(dyR)
        #~ E += self.w2 * utils.norm2(dxxR)
        #~ E += self.w2 * utils.norm2(dyyR)
        #~ E += self.w2 * utils.norm2(dxyR)

        if not psf_only:
            # energy for global prior
            E += self.lambda1 * utils.global_prior(self._dxL, self.a, self.b)
            E += self.lambda1 * utils.global_prior(self._dyL, self.a, self.b)
            # energy for local prior
            E += self.lambda2 * utils.local_prior(self._dxL, self._dxI0,
                                                  self.M)
            E += self.lambda2 * utils.local_prior(self._dyL, self._dyI0,
                                                  self.M)

        return E / self.I0.size