예제 #1
0
 def splice(self, image, mask, gan_out):
     if mask.shape[-1] > 0:
         mask = (np.sum(mask, -1, keepdims=True) < 1)
         mask = 1 - mask  # invert mask for blending
         mask = mask.astype('uint8') * 255
         mask = GaussianBlur(mask, (29, 29), 0)
         # mask_img = np.zeros([mask.shape[0], mask.shape[1],3]).astype('uint8')
         # for i in range(3):
         #     mask_img[:,:,i] = mask
         mask_img = mask.astype(float) / 255
         # proper blending courtesy of https://www.learnopencv.com/alpha-blending-using-opencv-cpp-python/
         fg_o = gan_out.astype(float)
         bg_o = image.astype(float)
         fg = np.zeros([mask.shape[0], mask.shape[1], 3]).astype(float)
         bg = np.zeros([mask.shape[0], mask.shape[1], 3]).astype(
             float
         )  # create foreground and background images with proper rgb channels
         cover = image
         for i in range(3):
             # Multiply the fg with the mask matte
             fg[:, :, i] = multiply(mask_img, fg_o[:, :, i])
             # Multiply the bg with ( 1 - mask_img )
             bg[:, :, i] = multiply(1.0 - mask_img, bg_o[:, :, i])
             # Add the masked fg and bg.
             cover[:, :, i] = add(fg[:, :, i], bg[:, :, i])
     else:
         #error case, return image
         cover = image
     return cover
예제 #2
0
def blur(img, blur_amount=5):
    if(blur_amount == 7):
        dst2 = GaussianBlur(img,(7,7),0)
        dst = bilateralFilter(dst2, 7, 80, 80)
    else:
        dst2 = GaussianBlur(img,(5,5),0)
        dst = bilateralFilter(dst2, 7, 10 * blur_amount, 80)
    return dst
예제 #3
0
 def gaussian_blur(self, image=None, kernel=(5, 5)) -> ndarray:
     '''
     Applies a Gaussian Noise kernel
     :param image:  numpy.ndarray input image
     :param kernel: tuple kernel size
     :return: <numpy.ndarray>
     '''
     if image is not None:
         assert issubclass(ndarray, type(
             image)), 'image must be <numpy.ndarray>, for gaussian blur'
         return GaussianBlur(image, kernel, 0)
     self.image_tf = GaussianBlur(self.gray, kernel, 0)
     return self.image_tf
예제 #4
0
def findFire(data):
    '''Locates the brightest area in the frame by applying a differential
        gaussian filter and creating a byte array light vs dark. The centroid
        of the light region is found and returned as the location of the fire'''
    data = GaussianBlur(data,(3,3),2)
    mask = zeros(data.shape)
    mask[data > (data.mean() + data.max())/2.] = 1
    mom = moments(mask)
    try:
        x, y = mom['m10']/mom['m00'], mom['m01']/mom['m00']
    except ZeroDivisionError:
        x, y = nan, nan
    return x, y
예제 #5
0
def gen_blue_noise(mask, kpc_per_pix, beam, height=73, width=78, fsc=5):
    '''
    Height and width are in the unit of pixel.
    1 pixel in CALIFA is 1 arcsec.
    The fine-structure constant(fsc) is serving to produce a higher-resolution sky map.
    The kernel size must be odd and here the default value is 15.
    '''
    noise = GaussianBlur(np.random.normal(0, 1, (width * fsc, height * fsc)),
                         (15, 15), beam[0] / kpc_per_pix * fsc)[::fsc, ::fsc]
    y, x = np.meshgrid(np.arange(height), np.arange(width))
    return (noise.reshape(-1)[mask], x.reshape(-1)[mask] * kpc_per_pix,
            y.reshape(-1)[mask] * kpc_per_pix,
            min(config.fixed_bin_size, kpc_per_pix))
예제 #6
0
def findFire(data):
    '''Locates the brightest area in the frame by applying a differential
        gaussian filter and creating a byte array light vs dark. The centroid
        of the light region is found and returned as the location of the fire'''
    data = GaussianBlur(data,(3,3),2)
    mask = zeros(data.shape)
    mask[data > (data.mean() + data.max())/1.5] = 1
    mom = moments(mask)
    imwrite('mask{0}{1}{2}.bmp'.format(mom['m00'],mom['m02'],mom['m20']),mask)
    if mom['m00']:
        x, y = mom['m10']/mom['m00'], mom['m01']/mom['m00']
    else:
        x, y = nan, nan
    return x, y
예제 #7
0
def sub_pixel_shift_test():
    img = imread('../Images/Moon_Tile-024_043939_stacked_with_blurr_pp.tif',
                 IMREAD_GRAYSCALE)
    show_image("Original image", img, fullscreen=True)
    spx_shifty = 5.2
    spx_shiftx = 3.5

    img_resized, img_shifted = subpixel_shifted_frame(img, spx_shifty,
                                                      spx_shiftx)
    # for i in range(10):
    #     show_image("Image resized", img_resized, fullscreen=True)
    #     show_image("Image shifted", img_shifted, fullscreen=True)

    gauss_width_reference = 15
    gauss_width_frame = 19
    reference_frame_blurred_intermediate = GaussianBlur(
        img_resized, (gauss_width_reference, gauss_width_reference),
        0).astype(float32)
    reference_frame_blurred = GaussianBlur(
        reference_frame_blurred_intermediate,
        (gauss_width_reference, gauss_width_reference), 0).astype(float32)
    frame_blurred = GaussianBlur(img_shifted,
                                 (gauss_width_frame, gauss_width_frame), 0)

    y_ap = 170
    x_ap = 200
    half_box_width = 24
    y_low = y_ap - half_box_width
    y_high = y_ap + half_box_width
    x_low = x_ap - half_box_width
    x_high = x_ap + half_box_width
    reference_box_second_phase = reference_frame_blurred[y_low:y_high,
                                                         x_low:x_high]
    reference_box_first_phase = reference_box_second_phase[::2, ::2]

    search_width = 10
    shift_y_local_first_phase, shift_x_local_first_phase, success_first_phase, \
    shift_y_local_second_phase, shift_x_local_second_phase, success_second_phase = \
        Miscellaneous.multilevel_correlation(reference_box_first_phase, frame_blurred,
                                             gauss_width_frame,
                                             reference_box_second_phase, y_low, y_high, x_low, x_high,
                                             search_width,
                                             weight_matrix_first_phase=None, subpixel_solve=True)

    print("Shift in y, first phase: " + str(shift_y_local_first_phase) +
          ", second phase: " + str(shift_y_local_second_phase) + ", total: " +
          str(shift_y_local_first_phase + shift_y_local_second_phase))
    print("Shift in x, first phase: " + str(shift_x_local_first_phase) +
          ", second phase: " + str(shift_x_local_second_phase) + ", total: " +
          str(shift_x_local_first_phase + shift_x_local_second_phase))
예제 #8
0
파일: data.py 프로젝트: viraj-shah18/SRFBN
    def __getitem__(self, index):
        upscale_factor = 4
        all_patch_sizes = [60, 50, 40]
        patch_size = all_patch_sizes[upscale_factor - 2]

        lr_image_path = os.path.join(self.LR_path, self.LR_images[index])
        hr_image_path = os.path.join(self.HR_path, self.HR_images[index])

        lr_image = Image.open(lr_image_path)
        hr_image = Image.open(hr_image_path)
        lr_image.load()
        hr_image.load()

        lr_image_data = np.asarray(lr_image, dtype=np.float32)
        hr_image_data = np.asarray(hr_image, dtype=np.float32)

        seed = 42
        lr_patch, hr_patch = get_patch(lr_image_data, hr_image_data,
                                       patch_size, upscale_factor, seed)

        # on testing, found several completely white patches, thus replacing them on fly
        while 35 > np.mean(lr_patch) or np.mean(lr_patch) > 220:
            seed += 1
            lr_patch, hr_patch = get_patch(lr_image_data, hr_image_data,
                                           patch_size, upscale_factor, seed)

        lr_patch, hr_patch = lr_patch / 255, hr_patch / 255

        if self.mode == "DN":
            lr_patch = random_noise(lr_patch)

        elif self.mode == "BD":
            lr_patch = GaussianBlur(lr_patch, (7, 7), 1.6)

        return lr_patch, hr_patch
예제 #9
0
def makebackground(imagepath, imagex, imagey, screenx, screeny):
    from cv2 import GaussianBlur, BORDER_DEFAULT
    scaleby = find_scale_factor(imagex, imagey, screenx, screeny, True)
    background = resize_image(imagepath, imagex, imagey, scaleby)
    background = GaussianBlur(background, (31, 31), BORDER_DEFAULT)
    background = crop(background, screenx, screeny)
    return background
예제 #10
0
파일: cv5.py 프로젝트: abadied/CV5
def get_derivatives(img):
    I = GaussianBlur(src=img, ksize=(11, 11), sigmaX=7)
    h_x1, h_x2 = cv2.getDerivKernels(1, 0, 3, normalize=True)
    h_y1, h_y2 = cv2.getDerivKernels(0, 1, 3, normalize=True)
    img_x = cv2.sepFilter2D(I, -1, h_x1, h_x2)
    img_y = cv2.sepFilter2D(I, -1, h_y1, h_y2)
    return img_x, img_y
예제 #11
0
    def execute(self, title="", size=None, showPlot=True):
        field = np.zeros(self._frameSize)

        #allCords = np.array([])
        for i, track in enumerate(self._exp._tracks):
            #print('Track %d' % i)
            if track.getMaxDistTravelled() > 100:
                cords = track._trackCords.astype(np.int)
                field[cords[:, 0], cords[:, 1]] += 1
                #allCords = np.vstack((allCords, cords)) if allCords.size else cords

        # First we roll the image to center the plate.
        rightBorder = np.min(np.where(field > 0)[1])
        leftBorder = field.shape[1] - np.max(np.where(field > 0)[1])
        allBorders = rightBorder + leftBorder
        correctBorder = np.floor(allBorders / 2)
        field = np.roll(field, int(correctBorder - rightBorder), axis=1)
        nField = (field - np.min(field)) / (np.max(field) - np.min(field))
        #DEBUG
        nField = (np.log10(nField))
        nField[nField == -np.inf] = 0
        #DEBUG
        #nField = blur(nField, (24, 24))
        nField = (GaussianBlur(nField, (65, 65), BORDER_DEFAULT, 15, 15))

        # Saving the results
        self._results['mat'] = nField

        if showPlot == True:
            plt.style.use("dark_background")
            plt.imshow(nField, cmap=plt.get_cmap('gnuplot2_r'))
            plt.title(title)
            plt.axis('off')
            plt.show()
예제 #12
0
def generateBaseImage(image, sigma, assumed_blur):
    """Generate base image from input image by upsampling by 2 in both directions and blurring
    """
    logger.debug('Generating base image...')
    image = resize(image, (0, 0), fx=2, fy=2, interpolation=INTER_LINEAR)
    sigma_diff = sqrt(max((sigma ** 2) - ((2 * assumed_blur) ** 2), 0.01))
    return GaussianBlur(image, (0, 0), sigmaX=sigma_diff, sigmaY=sigma_diff)  # the image blur is now sigma instead of assumed_blur
예제 #13
0
파일: handler.py 프로젝트: wozimer/applypy
    def gaussian(self, kernel_size, sigma_x, *args, region: tuple = None):
        """
        Smooths a rectangle area of the image using Gaussian  blur.
        :param sigma_x: Gaussian kernel standard deviation in X direction.
        :param kernel_size: Gaussian kernel size. kernel_size.width and kernel_size.height can differ
        but they both must be positive and odd. Or, they can be zero's and then they are computed from sigma.
        :param region: a tuple of top left and bottom right points of the smoothing area.
        The whole image will smoothed if not provided.
        :return: smoothed image.
        """

        if not region:
            top_left, bottom_right = (0, 0), (self._obj.width,
                                              self._obj.height)
        else:
            top_left, bottom_right = region

        smoothed = self._obj.copy()
        smoothed_region = smoothed[top_left[1]:bottom_right[1],
                                   top_left[0]:bottom_right[0]]
        smoothed_region = GaussianBlur(smoothed_region, kernel_size, sigma_x,
                                       *args)
        smoothed[top_left[1]:bottom_right[1],
                 top_left[0]:bottom_right[0]] = smoothed_region

        return self._obj.new(smoothed)
 def get_Train_Test_Validation(self):
     url = [
         "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4",
         "data_batch_5", "test_batch"
     ]
     X = []
     y = []
     d = self._uncrypt(url)
     for index in range(len(url)):
         n = d[url[index]][b'data'].shape[0]
         doc = url[index]
         for count in range(n):
             #Initialize a random Sigma value for the Gaussian Smoothing
             seed = randint(0, 3)
             src = self.CreateImage(d[doc][b'data'][count, :])
             y.append(src)
             dst = GaussianBlur(src, (5, 5), seed)
             X.append(dst)
     #The proportion of the data is 70% Train, 15% Validation and 15% Test
     Xtrain, X_test, ytrain, y_test = train_test_split(np.asarray(X),
                                                       np.asarray(y),
                                                       test_size=0.3)
     Xtest, Xval, ytest, yval = train_test_split(X_test,
                                                 y_test,
                                                 test_size=0.5)
     return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
예제 #15
0
 def _find_edges(im):
     gray = cvtColor(im, COLOR_BGR2GRAY)
     # Apply histogram equalization - The parameters below are good
     clahe = createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
     gray = clahe.apply(gray)
     gray = GaussianBlur(gray, (5, 5), 0)
     return Canny(gray, 75, 200)
예제 #16
0
def project_object_edge(img, dimension):
    """ scale the image, binarise with Othu and project to one dimension

    :param ndarray img:
    :param int dimension: select dimension for projection
    :return list(float):

    >>> img = np.zeros((20, 10, 3))
    >>> img[2:6, 1:7, :] = 1
    >>> img[10:17, 4:6, :] = 1
    >>> project_object_edge(img, 0).tolist()  # doctest: +NORMALIZE_WHITESPACE
    [0.0, 0.0, 0.7, 0.7, 0.7, 0.7, 0.0, 0.0, 0.0, 0.0,
     0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.0, 0.0, 0.0]
    """
    if dimension not in (0, 1):
        raise ValueError('not supported dimension %i' % dimension)
    if img.ndim != 3:
        raise ValueError('unsupported image shape %r' % img.shape)
    img_gray = np.mean(img, axis=-1)
    img_gray = GaussianBlur(img_gray, (5, 5), 0)
    p_low, p_high = np.percentile(img_gray, (1, 95))
    img_gray = rescale_intensity(img_gray, in_range=(p_low, p_high))
    img_bin = img_gray > threshold_otsu(img_gray)
    img_edge = np.mean(img_bin, axis=1 - dimension)
    return img_edge
예제 #17
0
def gaussianBlur(originalImage, space):
    from cv2 import GaussianBlur
    from random import randint
    min_blur = int(space['blur_extent_min'])
    max_blur = min_blur + int(space['blur_extent_distance'])
    amountToBlur = randint(min_blur, max_blur)
    if amountToBlur % 2 == 0: amountToBlur += 1
    return GaussianBlur(originalImage, (int(amountToBlur), int(amountToBlur)),0)
예제 #18
0
def field_flattening(image, filter_type="gaussian", ksize=31, sigma=101):
    """
    Function that will field flatten a single channel image
    """
    if filter_type == "gaussian":
        blurred_image = GaussianBlur(image.astype(np.uint8),
                                     ksize=(ksize, ksize),
                                     sigmaX=sigma)
    if filter_type == "median":
        blurred_image = medianBlur(image.astype(np.uint8), ksize=ksize)

    flat_image = np.divide(image.astype(float), blurred_image.astype(float))

    flat_image[np.isnan(flat_image)] = 0
    flat_image = (min_max_rescaling(flat_image)) * 255

    return flat_image
예제 #19
0
def generateBaseImage(image, sigma, assumed_blur):
    """
    通过对原图像的两个方向的向上采样并模糊,来生成basic image.
    """
    logger.info('Generating base image...')
    image = resize(image, (0, 0), fx=2, fy=2, interpolation=INTER_LINEAR)
    sigma_diff = sqrt(max((sigma ** 2) - ((2 * assumed_blur) ** 2), 0.01))
    return GaussianBlur(image, (0, 0), sigmaX=sigma_diff, sigmaY=sigma_diff)
예제 #20
0
def toCanny(bw, gaussian):
    if gaussian == 0:
        canny = Canny(bw, 200, 230, 3)
        return canny
    else:
        img1 = GaussianBlur(bw, (gaussian, gaussian), 0)
        cannyGaus = Canny(img1, 10, 30, 453)
        return cannyGaus
예제 #21
0
def detect(image):
    """Detect marker from the camera image"""
    markers = []
    # Stage 1: Detect edges in image
    gray = cvtColor(image, COLOR_BGR2GRAY)
    clahe = createCLAHE(clipLimit=1, tileGridSize=(6, 6))
    cl1 = clahe.apply(gray)
    _, thresh = threshold(cl1, 60, 255, THRESH_OTSU)
    blurred = GaussianBlur(thresh, (5, 5), 0)
    edges = Canny(blurred, 75, 100)

    # Stage 2: Find contours
    contours = findContours(edges, RETR_TREE, CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=contourArea, reverse=True)[:]

    for contour in contours:
        # Stage 3: Shape check
        perimeter = arcLength(contour, True)
        approx = approxPolyDP(contour, 0.01 * perimeter, True)

        if len(approx) == QUADRILATERAL_POINTS:
            area = contourArea(approx)
            # (x, y, w, h) = boundingRect(approx)
            # ar = float(h) / float(w)
            # if area > 100 and ar >= 0.8 and ar <= 1.2:
            if area > 700:
                # putText(image, str(area), (10, 30), FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                drawContours(image, [contour], -1, (0, 255, 0), 1)

                # Stage 4: Perspective warping
                topdown_quad = get_topdown_quad(thresh, approx.reshape(4, 2))

                # Stage 5: Border check
                if topdown_quad[int((topdown_quad.shape[0] / 100.0) * 5),
                                int((topdown_quad.shape[1] / 100.0) *
                                    5)] > BLACK_THRESHOLD:
                    continue

                # Stage 6: Get marker pattern
                marker_pattern = None

                try:
                    marker_pattern = get_marker_pattern(
                        topdown_quad, THRESHOLD_PERCENT)
                except:
                    continue

                if not marker_pattern:
                    continue

                # Stage 7: Match marker pattern
                marker_found, marker_rotation, marker_name = match_marker_pattern(
                    marker_pattern)

                if marker_found:
                    markers.append([marker_name, marker_rotation])

    return markers, image
예제 #22
0
    def get_gradient_x(self):
        print 'creating sobel features'

        self.x_train = self.x_train.astype(np.double)
        x_train = [sobel_features(GaussianBlur(x.reshape((32,32)), ksize=(5,5), \
                    sigmaX=1), magnitude, direction, sx, sy, x2) \
                   for x in self.x_train]
        print 'got features'
        return x_train
예제 #23
0
 def __init__(self, image, g_h, g_w, tr):
     _, self.image = threshold(
         GaussianBlur(
             cvtColor(image, COLOR_BGR2GRAY),
             (g_w, g_h),
             0
         ),
         tr, 255, THRESH_BINARY
     )
예제 #24
0
 def get():
     valid, frame = mov.read()
     if not valid:
         return (False, None)
     frame = frame.astype(np.float32)
     frame = cvtColor(frame, CV_RGB2GRAY)
     if blur:
         frame = GaussianBlur(frame, (self.kernel, self.kernel), 0)
     return valid, frame
예제 #25
0
def Blur(ImageArray, Distort):
    """
    Param:
        ImageArray: Get an Image array or list
        Distort: This set the distortion amount in image
    Return:
        Returns a blurred image array
    """
    return GaussianBlur(ImageArray, (Distort, Distort), 0)
예제 #26
0
def preprocess(imgOriginal, PreprocessCvcSel, PreprocessMode,
               PreprocessGaussKernel, PreprocessThreshBlockSize,
               PreprocessThreshweight, PreprocessMorphKernel,
               PreprocessMedianBlurKernel, PreprocessCannyThr):
    """ CSC, Contrast stretch (morph.), Blurring and Adaptive-Threshold """

    # Color-Space-Conversion (CSC): switch from BGR to HSV and take the requested component:
    imgHSV = cvtColor(imgOriginal, COLOR_BGR2HSV)
    imgHSV_H, imgHSV_S, imgHSV_V = split(imgHSV)

    if PreprocessCvcSel == "H":
        imgGrayscale = imgHSV_H
    elif PreprocessCvcSel == "S":
        imgGrayscale = imgHSV_S
    elif PreprocessCvcSel == "V":
        imgGrayscale = imgHSV_V
    else:
        error("Unsupported PreprocessCvcSel mode: %s" % PreprocessCvcSel)

    # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..

    if PreprocessMode == "Legacy":

        # Increase Contrast (morphological):
        imgMaxContrastGrayscale = maximizeContrast(imgGrayscale,
                                                   PreprocessMorphKernel)

        # Blurring:
        imgBlurred = GaussianBlur(imgMaxContrastGrayscale,
                                  PreprocessGaussKernel, 0)

        # Adaptive Threshold:
        imgThresh = adaptiveThreshold(imgBlurred, 255.0,
                                      ADAPTIVE_THRESH_GAUSSIAN_C,
                                      THRESH_BINARY_INV,
                                      PreprocessThreshBlockSize,
                                      PreprocessThreshweight)

    # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..

    elif PreprocessMode == "BlurAndCanny":

        # Blurring:
        imgBlurred = medianBlur(imgGrayscale, PreprocessMedianBlurKernel)

        # Canny Edge Detection:
        imgThresh = Canny(imgBlurred, PreprocessCannyThr / 2,
                          PreprocessCannyThr)

    # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..

    else:
        error("Unsupported PreprocessMode mode: %s" % PreprocessMode)

    imgGrayscale = imgBlurred

    return imgGrayscale, imgThresh
예제 #27
0
def generar_Imagen_Base(imagen, sigma, assumed_blur):
    """Genere una imagen base a partir de la imagen de entrada submuestreando en 2 en ambas direcciones y difuminando
    """
    logger.debug('Generando imagen base...')
    imagen = resize(imagen, (0, 0), fx=2, fy=2, interpolation=INTER_LINEAR)
    sigma_diff = sqrt(max((sigma ** 2) - ((2 * assumed_blur) ** 2), 0.01))

    #el desenfoque de la imagen ahora es sigma en lugar del assumed_blur
    return GaussianBlur(imagen, (0, 0), sigmaX=sigma_diff, sigmaY=sigma_diff)
예제 #28
0
파일: SIFT.py 프로젝트: albgp/SIFT
 def computeFirstImg(self):
     img_upsampled = resize(self.img, (0, 0),
                            fx=2,
                            fy=2,
                            interpolation=INTER_LINEAR)
     delta_sigma = np.sqrt(self.sigma**2 - (2 * self.assumed_blur)**2)
     self.first_img = GaussianBlur(img_upsampled, (0, 0),
                                   sigmaX=delta_sigma,
                                   sigmaY=delta_sigma)
예제 #29
0
    def __call__(self, sample):
        im_arr = np.array(sample)

        im_arr = self.gaussian.augment_image(im_arr)
        im_arr = self.poisson.augment_image(im_arr)
        im_arr = GaussianBlur(im_arr, (3, 3), 0.0)
        im_arr = fastNlMeansDenoisingColored(im_arr, None, 6, 6, 4, 12)
        image = Image.fromarray(im_arr)

        return image
예제 #30
0
def sharpen(src):
    src = asarray(src)
    w, h = get_size(src)
    #    w, h = pychron.size()
    #    im = new_dst(w, h)
    #    kern = CreateMat(3, 3, pychron.type)
    #    print type(kern), type(pychron)
    im = GaussianBlur(src, (3, 3), 3)
    addWeighted(src, 1.5, im, -0.5, 0, im)
    return im
예제 #31
0
def imagePreProcessing(img_url):
    urllib.request.urlretrieve(img_url, "api/image.jpg")
    img = imread('api/image.jpg')
    img = GaussianBlur(img, (5, 5), 0)
    gray = cvtColor(img, COLOR_BGR2GRAY)
    kernel1 = getStructuringElement(MORPH_ELLIPSE, (11, 11))

    close = morphologyEx(gray, MORPH_CLOSE, kernel1)
    div = float32(gray) / (close)
    # freeResources()
    return uint8(normalize(div, div, 0, 255, NORM_MINMAX))
def ToEdgeByAccuratey(img, rati=3, canny=(50, 100), deg=5):
    grayImg = cvtColor(img, cv2.COLOR_BGR2GRAY)
    gauImg = GaussianBlur(grayImg, (rati,rati), 3)
    cannyImg = Canny(gauImg, canny[0], canny[1])
    
    kernel = np.ones((deg,deg), np.uint8)
#     openingImg = cv2.morphologyEx(cannyImg, cv2.MORPH_OPEN, kernel)
    dilateImg = dilate(cannyImg, kernel, iterations =1)
    
    ret, thr = threshold(dilateImg, 127, 255, 0)
    
    return thr