def canny(filename):
    # 1 get the gray scale
    img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)

    # 2 smooth filter
    #img = cv2.GaussianBlur(img, (3,3), 1.5)
    img_smoothed = cv2.adaptiveBilateralFilter(img, (3, 3), 75)

    # 3 get the gradient
    #grad = canny_operator(img)
    grad, angles = scharr(img_smoothed)

    # 4 Non-Maximum Suppression
    suppress = non_maxm_suppression(grad, angles)

    # 5 fuzzy thresholds algrithm and edge linking
    high = get_high_threshold(suppress, 0.7)
    low = round(high*0.4)
    img_edge = edge_linking(suppress, high, low)

    opencv_canny = cv2.Canny(img, 50, 150)
    plt.subplot(131), plt.imshow(
        img, cmap=plt.cm.gray), plt.title("Origin Picture")
    plt.subplot(132), plt.imshow(
        img_edge, cmap=plt.cm.gray), plt.title("My Canny")
    plt.subplot(133), plt.imshow(
        opencv_canny, cmap=plt.cm.gray), plt.title("Opencv's Canny")
    plt.show()
def enhance(image):
    # Options:
    #  - Anisotropic diffusion
    #  - Bilateral filter
    #  - Median filter
    #  - Homomorphic Filtering (normalize brightness, increase contrast)
    # http://www.123seminarsonly.com/Seminar-Reports/029/42184313-10-1-1-100-81.pdf
    image = cv2.adaptiveBilateralFilter(image,ksize=(21,21),sigmaSpace=19)
    return image
示例#3
0
def sharp(mangaStr):
	manga = str(mangaStr)
	
	#print manga
	img = cv2.imread(manga)
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	img = cv2.adaptiveBilateralFilter(img, (5,5),75)

	#sharp
	img = cv2.addWeighted(img, 1.5, img,-0.5,0)
	cv2.imwrite('berserkSarpening.jpg', img)
示例#4
0
def dinamicContrast(alpha, beta, mangaStr):
	
	manga = str(mangaStr)
	img = cv2.imread(manga)

	mul_img = cv2.multiply(img,np.array([alpha]))                    	 # mul_img = img*alpha
	new_img = cv2.add(mul_img,beta)					                     # new_img = img*alpha + beta

	cv2.imwrite('original_image.jpg', img)
	
	img = cv2.adaptiveBilateralFilter(new_img, (5,5),75)
	#sharp
	new_img = cv2.addWeighted(img, 1.5, new_img,-0.5,0)
	cv2.imwrite('new_image.jpg',new_img)
示例#5
0
def bilateral_tonemap(image, debug, sigmaRange, sigmaSpace, contrast, gamma):
    """Tonemap image (HDR) using Durand 2002"""
	# Compute intensity as the average of the three color channels.
	intensity = np.mean(image, axis=2)

	# compute log intensity
	logintensity = np.log(intensity)

	# Make sigma space a factor of the image resolution
	width, height = image.shape[0:2]
	sigmaSpace = sigmaSpace * min(width,height)
    
	# apply the bilateral filter to the result. use a kernel size of 14 and the provided sigmaRange sigmaSpace
    # You can use opencv here if you like.
	baseImage = cv2.adaptiveBilateralFilter(loginstensity, 14, signmaSpace, maxSigmaColor= sigmaRange)
示例#6
0
    def run(self, image):
        assert image.size > 0
        hed = cv2.split(rgb2hed(image))[1]
        hed = img_as_ubyte(1.0 - hed)
        # hed = 1.0 - hed
        hed = rescale_intensity(hed)
        im = hed
        # im = img_as_ubyte(hed)
        # com.debug_im(im)
        im[im >= 115] = 255
        im[im < 115] = 0
        im = rank.enhance_contrast(im, disk(5))
        im = morph.close(im, disk(3))

        can = cv2.adaptiveBilateralFilter(im,
                                          self.bilateral_kernel,
                                          self.sigma_color)
        return can
示例#7
0
def bilat_adjust(mat):

    bilat = cv2.adaptiveBilateralFilter(mat, -1, 7, 7)

    cv2.namedWindow('bilat')
    cv2.createTrackbar('sigC', 'bilat', 0, 100, nothing)
    cv2.createTrackbar('sigD', 'bilat', 0, 100, nothing)
    while (1):
        cv2.imshow('bilat', bilat)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        n = -1
        sigC = cv2.getTrackbarPos('sigC', 'bilat')
        sigD = cv2.getTrackbarPos('sigD', 'bilat')
        bilat = cv2.bilateralFilter(mat, n, sigC, sigD)

    cv2.destroyAllWindows()

    return bilat, sigC, sigD
示例#8
0
def bilat_adjust(mat):

    bilat = cv2.adaptiveBilateralFilter(mat, -1, 7, 7)

    cv2.namedWindow('bilat')
    cv2.createTrackbar('sigC','bilat',0,100,nothing)
    cv2.createTrackbar('sigD','bilat',0,100,nothing)
    while(1):
        cv2.imshow('bilat',bilat)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        n = -1
        sigC = cv2.getTrackbarPos('sigC','bilat')
        sigD = cv2.getTrackbarPos('sigD','bilat')
        bilat = cv2.bilateralFilter(mat, n, sigC, sigD)

    cv2.destroyAllWindows()

    return bilat, sigC, sigD
示例#9
0
def find_blocks(image):
    img = image

    # Smoothen image
    img = cv2.adaptiveBilateralFilter(img, (9, 9), 100)

    # Detect edges
    img = cv2.Canny(img, 35, 100)

    # Close edges
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=3)

    # Find contours
    contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # Draw contours
    #draw_contours(contours, hierarchy[0], image)

    # Find blocks from contours
    blocks = blocks_from_contour_tree(contours, hierarchy[0])

    return blocks
示例#10
0
def extract_text(filename, is_url=False, debug=False):
    if is_url:
        req = urllib.urlopen(filename)
        arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
        # src = cv2.imdecode(arr,-1) # 'load it as it is'    else:
        src = cv2.imdecode(
            arr, cv2.CV_LOAD_IMAGE_GRAYSCALE)  # 'load it as grayscale
    else:
        src = cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    # smooth
    # src = cv2.GaussianBlur(src,(3,3),0)
    src = cv2.adaptiveBilateralFilter(src, (9, 9), 75, 55)
    if debug:
        cv2.imwrite("/tmp/1blurred.png", src)

    # resize image
    orig_size = src.shape[:2]
    # such that smaller dimension is 500 pixels at least
    normalized_size = max(1000, max(orig_size))
    max_dim_idx = max(enumerate(orig_size), key=lambda l: l[1])[0]
    min_dim_idx = [idx for idx in [0, 1] if idx != max_dim_idx][0]
    new_size = [0, 0]
    new_size[min_dim_idx] = normalized_size
    new_size[max_dim_idx] = int(
        float(orig_size[max_dim_idx]) / orig_size[min_dim_idx] *
        normalized_size)

    # src = cv2.resize(src=src, dsize=(0,0), dst=src, fx=4, fy=4)
    src = cv2.resize(src=src, dsize=tuple(new_size), dst=src, fx=0, fy=0)

    # # smooth
    # # src = cv2.GaussianBlur(src,(5,5),0)
    # src = cv2.adaptiveBilateralFilter(src,(1,1),75,75)
    # cv2.imwrite("/tmp/2blurred2.png", src)

    # erode + dilate
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
    skel = np.zeros(src.shape, np.uint8)

    eroded = cv2.erode(src, element)
    if debug:
        cv2.imwrite("/tmp/3eroded.png", eroded)
    temp = cv2.dilate(eroded, element2)
    if debug:
        cv2.imwrite("/tmp/4dilated.png", temp)
    # temp = cv2.subtract(src,temp)
    # skel = cv2.bitwise_or(skel,temp)
    src = temp.copy()

    # black and white
    # src = cv2.adaptiveThreshold(src, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=15, C=2)
    # _, src = cv2.threshold(src, 150, 255, cv2.THRESH_BINARY)
    # For large text I think we need the first parameter to be higher
    src = cv2.adaptiveThreshold(src, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY, 23, 3)
    # _,src = cv2.threshold(src,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    if debug:
        cv2.imwrite("/tmp/5thresh.png", src)

    # TODO: optimize by not writing image to disk
    cv2.imwrite("/tmp/dst.png", src)

    if debug:
        file = urllib.urlopen(filename)
        im = cStringIO.StringIO(
            file.read())  # constructs a StringIO holding the image
        img = Image.open(im)
        original_text = pytesseract.image_to_string(img)
        print "ORIGINAL", filter(None, original_text.split('\n'))[:3]

        print "-----------------------------------"

    img = Image.open("/tmp/dst.png")
    final_text = pytesseract.image_to_string(img)
    if debug:
        print "FINAL", filter(None, final_text.split('\n'))[:30]
    return final_text
def adaptive_bilateral_filter(
    img,
    kernel_size,
):
    """(src, ksize, sigmaSpace[, dst[, maxSigmaColor[, anchor[, borderType]]]]) → ds"""
    return cv2.adaptiveBilateralFilter(img, 9, 75, 75)
示例#12
0
文件: track.py 项目: nipponnp/aboutme
        # orig_height,orig_width=orig_img.shape[:2]

        # Part of the image to be considered for lane detection
        upper_threshold = 0.4
        lower_threshold = 0.2
        # Copy the part of original image to temporary image for analysis.
        img = orig_img[int(upper_threshold *
                           orig_height):int((1 - lower_threshold) *
                                            orig_height), :]
        # Convert temp image to GRAY scale
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        height, width = img.shape[:2]

        # Image processing to extract better information form images.
        # Adaptive Biateral Filter:
        img = cv2.adaptiveBilateralFilter(img, ksize=(5, 5), sigmaSpace=2)
        # Equalize the histogram to account for better contrast in the images.
        img = cv2.equalizeHist(img)
        # Apply Canny Edge Detector to detect the edges in the image.
        bin_img = cv2.Canny(img, 30, 60, apertureSize=3)

        #Thresholds for lane detection. Emperical values, detected from trial and error.
        xl_low = int(-1 * orig_width)  # low threshold for left x_intercept
        xl_high = int(0.8 * orig_width)  # high threshold for left x_intercept
        xr_low = int(0.2 * orig_width)  # low threshold for right x_intercept
        xr_high = int(2 * orig_width)  # high threshold for right x_intercept
        xl_phase_threshold = 15  # Minimum angle for left x_intercept
        xr_phase_threshold = 14  # Minimum angle for right x_intercept
        xl_phase_upper_threshold = 80  # Maximum angle for left x_intercept
        xr_phase_upper_threshold = 80  # Maximum angle for right x_intercept
示例#13
0
    #for i in range(0,439):
    #	for j in range(0,639):
    #		if v_th[i][j] == 1 and v[i][j] < val_th and h[i][j] < hhigh_th and h[i][j] > hlow_th:
    #			v_th[i][j]=1
    #		else:
    #			v_th[i][j]=0;

    #h = cv2.getTrackbarPos('h', 'value')
    #templateWindowSize = cv2.getTrackbarPos('templateWindowSize', 'value')*2 +1
    #searchWindowSize = cv2.getTrackbarPos('searchWindowSize', 'value')*2 + 1
    #dst = cv2.fastNlMeansDenoising(v_th, h, templateWindowSize, searchWindowSize)
    #ksize = cv2.getTrackbarPos('ksize','value')*2 + 1
    #dst =cv2.medianBlur(v_th, ksize)
    #cv2.imshow('value',v_th)

    sigmaSpace = cv2.getTrackbarPos('sigmaSpace','value')
    sigmaColor = cv2.getTrackbarPos('sigmaColor','value')
    #dst = cv2.medianBlur(v_th, ksize)
    #dst = cv2.boxFilter(v_th,-1, (ksize,ksize)
    dst = cv2.bilateralFilter(v_th, ksize, sigmaColor, sigmaSpace) 
    
import numpy as np
import cv2

img = cv2.imread('/home/sine/Pictures/vel.png',0)
dst = cv2.adaptiveBilateralFilter(img, 5, 5, maxSigmaColor = 5, anchor = (-1,-1))
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例#14
0
    def find_plates(self):
        """
        Find the license plates in the image

        :rtype: list[(numpy.array, numpy.array)]
        :return: List of tuples containing the plate image and the plate rectangle location.
            The plates returned must be a grayscale image with black background and white characters
        """

        # Create a grayscale version of the image
        processing_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)

        # Blur the image
        kernel_size = (7, 7)
        processing_img = cv2.adaptiveBilateralFilter(processing_img, kernel_size, 15)
        # processing_img = cv2.GaussianBlur(processing_img, (7, 7), 3)

        if __debug__:
            display.show_image(processing_img, self.label, 'Gray')

        # Threshold the image using an adaptive algorithm
        processing_img = cv2.adaptiveThreshold(processing_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                               cv2.THRESH_BINARY_INV, 11, 2)

        if __debug__:
            display.show_image(processing_img, self.label, 'Threshold')

        # Find the contours in the image. MODIFIES source image, hence a copy is used
        contours, hierarchy = cv2.findContours(processing_img.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

        if __debug__:
            display.draw_contours(self.image, contours, self.label)

        rectangles = []
        for i in contours:
            area = cv2.contourArea(i)  # Calculate the area of the contour
            if area > 200:  # Trivial check
                peri = cv2.arcLength(i, True)  # Calculate a contour perimeter
                approx = cv2.approxPolyDP(i, 0.045 * peri, True)  # Approximate the curve using a polygon

                # Consider the polygon only if it is convex and has 4 edges
                if len(approx) == 4 and cv2.isContourConvex(approx):
                    if self._check_size(approx):
                        rectangles.append(approx)

        processing_plates = display.get_parts_of_image(processing_img, rectangles)
        ret = []

        # Experimental: Mask every color pixel in every plate rectangle from the original picture
        # mask_pixels = display.get_white_pixels(self.image, rectangles)

        for i, processing_plate in enumerate(processing_plates):
            img_height, img_width = processing_plate.shape
            img_area = img_height * img_width

            # masked_plate = self._filter_white(processing_plate, mask_pixels[i])
            # processing_plate = masked_plate

            # If the area of the plate is below 4500, perform hq2x on the plate
            if img_area < 4500:
                ret.append((
                    cv2.cvtColor(image.hq2x_zoom(processing_plate), cv2.COLOR_BGR2GRAY),
                    rectangles[i]
                ))
            else:
                ret.append((
                    processing_plate,
                    rectangles[i]
                ))

        return ret
示例#15
0
    def img_callback(self, data):
        xr_phase = 0
        xl_phase = 0

        try:
            orig_img = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        (rows, cols, channels) = orig_img.shape
        # cv_image = imutils.resize(orig_img, width=min(400, cols))

        xl_int_pf = ParticleFilter(N=1000,
                                   x_range=(0, 1500),
                                   sensor_err=1,
                                   par_std=100)
        xl_phs_pf = ParticleFilter(N=1000,
                                   x_range=(15, 90),
                                   sensor_err=0.3,
                                   par_std=1)
        xr_int_pf = ParticleFilter(N=1000,
                                   x_range=(100, 1800),
                                   sensor_err=1,
                                   par_std=100)
        xr_phs_pf = ParticleFilter(N=1000,
                                   x_range=(15, 90),
                                   sensor_err=0.3,
                                   par_std=1)

        #tracking queues
        xl_int_q = [0] * 15
        xl_phs_q = [0] * 15
        count = 0

        # Scale down the image - Just for better display.
        orig_height, orig_width = orig_img.shape[:2]
        orig_img = cv2.resize(orig_img, (orig_width / 2, orig_height / 2),
                              interpolation=cv2.INTER_CUBIC)
        orig_height, orig_width = orig_img.shape[:2]

        # Part of the image to be considered for lane detection
        upper_threshold = 0.4
        lower_threshold = 0.2
        # Copy the part of original image to temporary image for analysis.
        img = orig_img[int(upper_threshold *
                           orig_height):int((1 - lower_threshold) *
                                            orig_height), :]
        # Convert temp image to GRAY scale
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        height, width = img.shape[:2]

        # Image processing to extract better information form images.
        # Adaptive Biateral Filter:
        img = cv2.adaptiveBilateralFilter(img, ksize=(5, 5), sigmaSpace=2)
        # Equalize the histogram to account for better contrast in the images.
        img = cv2.equalizeHist(img)
        # Apply Canny Edge Detector to detect the edges in the image.
        bin_img = cv2.Canny(img, 30, 60, apertureSize=3)

        #Thresholds for lane detection. Emperical values, detected from trial and error.
        xl_low = int(-1 * orig_width)  # low threshold for left x_intercept
        xl_high = int(0.8 * orig_width)  # high threshold for left x_intercept
        xr_low = int(0.2 * orig_width)  # low threshold for right x_intercept
        xr_high = int(2 * orig_width)  # high threshold for right x_intercept
        xl_phase_threshold = 15  # Minimum angle for left x_intercept
        xr_phase_threshold = 14  # Minimum angle for right x_intercept
        xl_phase_upper_threshold = 80  # Maximum angle for left x_intercept
        xr_phase_upper_threshold = 80  # Maximum angle for right x_intercept

        # Arrays/Containers for intercept values and phase angles.
        xl_arr = np.zeros(xl_high - xl_low)
        xr_arr = np.zeros(xr_high - xr_low)
        xl_phase_arr = []
        xr_phase_arr = []
        # Intercept Bandwidth: Used to assign weights to neighboring pixels.
        intercept_bandwidth = 6

        # Run Probabilistic Hough Transform to extract line segments from Binary image.
        lines = cv2.HoughLinesP(bin_img,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=30,
                                minLineLength=20,
                                maxLineGap=5)

        # Loop for every single line detected by Hough Transform
        # print len(lines[0])
        for x1, y1, x2, y2 in lines[0]:
            if (x1 < x2 and y1 > y2 and x1 < 0.6 * width and x2 > 0.2 * width):
                norm = cv2.norm(float(x1 - x2), float(y1 - y2))
                phase = cv2.phase(np.array(x2 - x1, dtype=np.float32),
                                  np.array(y1 - y2, dtype=np.float32),
                                  angleInDegrees=True)
                if (phase < xl_phase_threshold
                        or phase > xl_phase_upper_threshold
                        or x1 > 0.5 * orig_width):  #Filter out the noisy lines
                    continue
                xl = int(x2 - (height + lower_threshold * orig_height - y2) /
                         np.tan(phase * np.pi / 180))
                # Show the Hough Lines
                # cv2.line(orig_img,(x1,y1+int(orig_height*upper_threshold)),(x2,y2+int(orig_height*upper_threshold)),(0,0,255),2)

                # If the line segment is a lane, get weights for x-intercepts
                try:
                    for i in range(xl - intercept_bandwidth,
                                   xl + intercept_bandwidth):
                        xl_arr[i - xl_low] += (norm**0.5) * y1 * (
                            1 - float(abs(i - xl)) /
                            (2 * intercept_bandwidth)) * (phase**2)
                except IndexError:
                    # print "Debug: Left intercept range invalid:", xl
                    continue
                xl_phase_arr.append(phase[0][0])

            elif (x1 < x2 and y1 < y2 and x2 > 0.6 * width
                  and x1 < 0.8 * width):
                norm = cv2.norm(float(x1 - x2), float(y1 - y2))
                phase = cv2.phase(np.array(x2 - x1, dtype=np.float32),
                                  np.array(y2 - y1, dtype=np.float32),
                                  angleInDegrees=True)
                if (phase < xr_phase_threshold
                        or phase > xr_phase_upper_threshold
                        or x2 < 0.5 * orig_width):  #Filter out the noisy lines
                    continue
                xr = int(x1 + (height + lower_threshold * orig_height - y1) /
                         np.tan(phase * np.pi / 180))
                # Show the Hough Lines
                # cv2.line(orig_img,(x1,y1+int(orig_height*upper_threshold)),(x2,y2+int(orig_height*upper_threshold)),(0,0,255),2)
                # If the line segment is a lane, get weights for x-intercepts
                try:
                    for i in range(xr - intercept_bandwidth,
                                   xr + intercept_bandwidth):
                        xr_arr[i - xr_low] += (norm**0.5) * y2 * (
                            1 - float(abs(i - xr)) /
                            (2 * intercept_bandwidth)) * (phase**2)
                except IndexError:
                    # print "Debug: Right intercept range invalid:", xr
                    continue
                xr_phase_arr.append(phase[0][0])
            else:
                pass  # Invalid line - Filter out orizontal and other noisy lines.

        # Sort the phase array and get the best estimate for phase angle.
        try:
            xl_phase_arr.sort()
            xl_phase = xl_phase_arr[-1] if (
                xl_phase_arr[-1] < np.mean(xl_phase_arr) + np.std(xl_phase_arr)
            ) else np.mean(xl_phase_arr) + np.std(xl_phase_arr)
        except IndexError:
            # print "Debug: ", fname + " has no left x_intercept information"
            pass
        try:
            xr_phase_arr.sort()
            xr_phase = xr_phase_arr[-1] if (
                xr_phase_arr[-1] < np.mean(xr_phase_arr) + np.std(xr_phase_arr)
            ) else np.mean(xr_phase_arr) + np.std(xr_phase_arr)
        except IndexError:
            # print "Debug: ", fname + " has no right x_intercept information"
            pass

        # Get the index of x-intercept (700 is for positive numbers for particle filter.)
        pos_int = np.argmax(xl_arr) + xl_low + 700
        # Apply Particle Filter.
        xl_int = xl_int_pf.filterdata(data=pos_int)
        xl_phs = xl_phs_pf.filterdata(data=xl_phase)

        # Draw lines for display
        cv2.line(orig_img, (int(xl_int - 700), orig_height),
                 (int(xl_int - 700) +
                  int(orig_height * 0.3 / np.tan(xl_phs * np.pi / 180)),
                  int(0.7 * orig_height)), (0, 255, 255), 2)
        # Apply Particle Filter.
        xr_int = xr_int_pf.filterdata(data=np.argmax(xr_arr) + xr_low)
        xr_phs = xr_phs_pf.filterdata(data=xr_phase)
        # Draw lines for display
        cv2.line(orig_img, (int(xr_int), orig_height),
                 (int(xr_int) -
                  int(orig_height * 0.3 / np.tan(xr_phs * np.pi / 180)),
                  int(0.7 * orig_height)), (0, 255, 255), 2)

        # print "Degbug: %5d\t %5d\t %5d\t %5d %s"%(xl_int-700,np.argmax(xl_arr)+xl_low,xr_int,np.argmax(xr_arr)+xr_low,fname)
        fname = "test_frame"
        intercepts.append((xl_int[0] - 700, xr_int[0]))

        # Show image
        cv2.imshow('Lane Markers', orig_img)
        key = cv2.waitKey(30)
        if key == 27:
            cv2.destroyAllWindows()
            sys.exit(0)
示例#16
0
        orig_height,orig_width=orig_img.shape[:2]
        # orig_img=cv2.resize(orig_img,(orig_width/2,orig_height/2),interpolation = cv2.INTER_CUBIC)
        # orig_height,orig_width=orig_img.shape[:2]

        # Part of the image to be considered for lane detection
        upper_threshold=0.4
        lower_threshold=0.2
        # Copy the part of original image to temporary image for analysis.
        img=orig_img[int(upper_threshold*orig_height):int((1- lower_threshold)*orig_height),:]
        # Convert temp image to GRAY scale
        img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
        height,width=img.shape[:2]

        # Image processing to extract better information form images.
        # Adaptive Biateral Filter:
        img = cv2.adaptiveBilateralFilter(img,ksize=(5,5),sigmaSpace=2)
        # Equalize the histogram to account for better contrast in the images.
        img = cv2.equalizeHist(img);
        # Apply Canny Edge Detector to detect the edges in the image.
        bin_img = cv2.Canny(img,30,60,apertureSize = 3)

        #Thresholds for lane detection. Emperical values, detected from trial and error.
        xl_low = int(-1*orig_width) # low threshold for left x_intercept
        xl_high = int(0.8*orig_width) # high threshold for left x_intercept
        xr_low = int(0.2*orig_width)  # low threshold for right x_intercept
        xr_high = int(2*orig_width) # high threshold for right x_intercept
        xl_phase_threshold = 15  # Minimum angle for left x_intercept
        xr_phase_threshold = 14  # Minimum angle for right x_intercept
        xl_phase_upper_threshold = 80  # Maximum angle for left x_intercept
        xr_phase_upper_threshold = 80  # Maximum angle for right x_intercept
示例#17
0
文件: filters.py 项目: ajdroid/DIP15
					
	print Wr
				

	return src


# if __name__ == "main":	
spnoisy = cv2.imread('../spnoisy.jpg',0)
unifnoisy = cv2.imread('../unifnoisy.jpg',0)
spunifnoisy = cv2.imread('../spunifnoisy.jpg',0)

for i in [10]:
	dst = bilateralFilter(unifnoisy,i*i,4*i*i)
# 	cv2.imshow('unifnoise cleared ' + str(i), dst)
	cv2.imshow('unifnoise cleared cvb' + str(i)+'.jpg', cv2.adaptiveBilateralFilter(unifnoisy,(5,5),i))
	cv2.imshow('spunifnoise cleared cvb' + str(i)+'.jpg', cv2.adaptiveBilateralFilter(spunifnoisy,(5,5),i))
	cv2.imshow('spnoise cleared cvb' + str(i)+'.jpg', cv2.adaptiveBilateralFilter(spnoisy,(5,5),i))
# cv2.imshow('spnoisy', spnoisy)
for i in [3,5]:
	dst = medianFilter(spnoisy,i)
	# cv2.imshow('spnoise cleared ' + str(i), dst)
	cv2.imshow('spnoise cleared cv m' + str(i)+'.jpg', cv2.medianBlur(spnoisy,i))
	cv2.imshow('spunifnoise cleared cv m' + str(i)+'.jpg', cv2.medianBlur(spunifnoisy,i))
	cv2.imshow('uninoise cleared cv m' + str(i)+'.jpg', cv2.medianBlur(unifnoisy,i))

cv2.waitKey(0)



# dst = cv2.filter2D()
示例#18
0
def adaptivefilter(img):
    return cv2.adaptiveBilateralFilter(img, 11, 20)
示例#19
0
def bilateral_tonemap(image, debug, sigmaRange, sigmaSpace, contrast, gamma):
    # Compute intensity as the average of the three color channels.
    intensity = np.mean(image, axis=2)
    # compute log intensity
    logintensity = np.log(intensity)

    # Make sigma space a factor of the image resolution
    width, height = image.shape[0:2]
    sigmaSpace = sigmaSpace * min(width, height)

    # apply the bilateral filter to the result. use a kernel size of 14 and the provided sigmaRange sigmaSpace
    # You can use opencv here if you like.
    baseImage = cv2.adaptiveBilateralFilter(loginstensity,
                                            14,
                                            signmaSpace,
                                            maxSigmaColor=sigmaRange)

    # compute detail image
    detail = np.subtract(logintensity, baseImage)

    # Compute the min and max values of the base image
    max_value = np.max(baseImage)
    min_value = np.min(baseImage)

    # Reduce the dynamic range of the base image as discussed in the slides.
    # note that dR = log(contrast)
    contrastReducedBaseImage = np.multiply(
        np.subtract(B, max_value),
        np.divide(np.log(contrast), np.subtract(max_value, min_value)))

    # Reconstruct the intensity from the adjusted base image and the detail image.
    reconst = np.exp(contrastReducedBaseImage + detail)

    # Put the colors back in.
    colorReconstructedImage = reconst * (image[:, :, 0] / intensity,
                                         image[:, :, 1] / intensity,
                                         image[:, :, 2] / intensity)

    # Apply any gamma correction requested
    gammaCorrected = cv2.pow(colorReconstructedImage[:, :, 0], gamma)

    # Normalize the results. Do each channel separately.
    cv2.normalize(gammaCorrected,
                  gammaCorrected,
                  0,
                  1,
                  NORM_MINMAX,
                  dtype=cv2.CV_32F)
    # Write the debug info if requested.
    if (debug):
        print "intensity range ", np.min(intensity), np.max(intensity)
        print "logintensity range ", np.min(logintensity), np.max(logintensity)
        print "baseImage range ", np.min(baseImage), np.max(baseImage)
        print "detail range ", np.min(detail), np.max(detail)
        print "contrastReducedBaseImage range ", np.min(
            contrastReducedBaseImage), np.max(contrastReducedBaseImage)
        for c in range(0, 3):
            print "gammaCorrected " + str(c) + " range ", np.min(
                gammaCorrected[:, :, c]), np.max(gammaCorrected[:, :, c])

    cv2.imwrite("debug-intensity.png", intensity * 255)
    cv2.imwrite("debug-logintensity.png", logintensity * 255)
    cv2.imwrite("debug-baseImage.png", np.exp(baseImage) * 255)
    cv2.imwrite("debug-detail.png", detail * 255)
    cv2.imwrite("debug-contrastReducedBaseImage.png",
                np.exp(contrastReducedBaseImage) * 255)
    cv2.imwrite("debug-reconst.png", reconst * 255)
    scipy.misc.imsave("debug-colorReconstructedImage.png",
                      colorReconstructedImage)

    # Convert to 8-bit and return
    return (gammaCorrected * 255).astype(np.uint8)
示例#20
0
    lab = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)
    h,s,v = cv2.split(hsv)
    l,a,b = cv2.split(lab) 
    a=cv2.equalizeHist(a) 
    #h=cv2.equalizeHist(h)
    testwrite(a, "A", imnum, None, None)
    testwrite(h, "H", imnum, None, None)
  
    sigD=2
    T=121
    t=27
    n=2
    
    ksize=(4*sigD+1,4*sigD+1)
    ab = a
    ab = cv2.adaptiveBilateralFilter(a, ksize, sigD)
    retval,at = cv2.threshold(ab,t,1,cv2.THRESH_BINARY_INV)
    lt=67
    kernel =  cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2*n+1,2*n+1))
    ao = cv2.morphologyEx(at, cv2.MORPH_OPEN, kernel)
    testwrite(ab, "AB", imnum, None, None)
    testwrite(at*255, "AT", imnum, None, None)
    
    contours, hierarchy = cv2.findContours(ao,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    nn=np.size(contours)
    area = np.zeros([nn,1])
    cx = np.zeros([nn,1])
    cy = np.zeros([nn,1])
    loc = np.zeros([nn,1])

    
示例#21
0
"""
4. Bilateral Filtering
bilateralFilter(src, d, sigmaColor, sigmaSpace[, dst[, borderType]]) → dst
Parameters:
src – Source 8-bit or floating-point, 1-channel or 3-channel image.
dst – Destination image of the same size and type as src .
d – Diameter of each pixel neighborhood that is used during filtering.
    If it is non-positive, it is computed from sigmaSpace .
sigmaColor – Filter sigma in the color space. A larger value of the parameter means that
             farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together,
             resulting in larger areas of semi-equal color.
sigmaSpace – Filter sigma in the coordinate space. A larger value of the parameter means that farther
             pixels will influence each other as long as their colors are close enough
             (see sigmaColor). When d>0 , it specifies the neighborhood size regardless of sigmaSpace.
             Otherwise, d is proportional to sigmaSpace .

"""
img = cv2.imread('./data/logo.png')

bilateralFilter = cv2.bilateralFilter(img, 9, 75, 75)
adaptiveBilateralFilter = cv2.adaptiveBilateralFilter(img, (9, 9), 75, 75)

plt.subplot(131), plt.imshow(img), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(132), plt.imshow(bilateralFilter), plt.title('Bilateral Filtering')
plt.xticks([]), plt.yticks([])
plt.subplot(133), plt.imshow(adaptiveBilateralFilter), plt.title('Ada Bilateral Filtering')
plt.xticks([]), plt.yticks([])
plt.show()
 def adaptive_bilateral_filter_do (self, image):
     return cv2.adaptiveBilateralFilter (image, \
             (self.config.adaptive_bilateral_filter_kernel_x.get_value_as_int (), \
             self.config.adaptive_bilateral_filter_kernel_y.get_value_as_int ()), \
             self.config.adaptive_bilateral_filter_sigma_space.get_value_as_int (), \
             self.config.adaptive_bilateral_filter_max_sigma_color.get_value_as_int ())
    def find_plates(self):
        """
        Find the license plates in the image

        :rtype: list[(numpy.array, numpy.array)]
        :return: List of tuples containing the plate image and the plate rectangle location
            The plates returned must be a grayscale image with black background and white characters
        """

        # Create a grayscale version of the image
        processing_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        img_height, img_width = processing_img.shape
        img_area = img_height * img_width

        # Blur the image
        processing_img = cv2.adaptiveBilateralFilter(processing_img, (11, 11), 100)

        if __debug__:
            display.show_image(processing_img, self.label, 'Gray')

        # Apply Sobel filter on the image
        sobel_img = cv2.Sobel(processing_img, cv2.CV_8U, 1, 0, ksize=3, scale=1, delta=0)
        if __debug__:
            display.show_image(sobel_img, self.label, 'Sobel')

        # sobel_img = cv2.morphologyEx(sobel_img, cv2.MORPH_TOPHAT, (3, 3))
        # if __debug__:
        # display.show_image(sobel_img)

        # Apply Otsu's Binary Thresholding
        ret, sobel_img = cv2.threshold(sobel_img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
        if __debug__:
            display.show_image(sobel_img, self.label, 'Otsu Threshold')

        # TODO: Variable kernel size depending on image size and/or perspective
        k_size = (50, 5)  # Kernel for a very upclose picture
        # k_size = (10, 5)  # Kernel for a distant picture
        element = cv2.getStructuringElement(cv2.MORPH_RECT, k_size)

        # Apply the Close morphology Transformation
        sobel_img = cv2.morphologyEx(sobel_img, cv2.MORPH_CLOSE, element)
        if __debug__:
            display.show_image(sobel_img, self.label, 'Closed Morphology')

        # Find the contours in the image
        contours, hierarchy = cv2.findContours(sobel_img.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        if __debug__:
            display.draw_contours(self.image, contours, self.label)

        rectangles = []
        for itc in contours:
            mr = cv2.minAreaRect(itc)  # Minimum enclosing rectangle
            # mr = (top-left x, top-left y), (width, height), angle-of-rotation
            box = cv2.cv.BoxPoints(mr)
            box_points = np.array([[(box[i][0], box[i][1])] for i in range(len(box))])
            if self._check_size(box_points):
                rectangles.append(np.int0(box))  # Rotated minimum enclosing rectangle

        processing_plates = display.get_parts_of_image(processing_img, rectangles)
        ret = []
        # if __debug__:
        # display.display_rectangles(self.image, rectangles)

        for i, processing_plate in enumerate(processing_plates):
            if processing_plate is not None and len(processing_plate) > 0:
                processing_plate = cv2.bitwise_not(processing_plate)
                a, processing_plate = cv2.threshold(processing_plate, 50, 255, cv2.THRESH_OTSU)

                img_width, img_height = processing_plate.shape
                img_area = img_height * img_width

                if img_area < 4500:
                    ret.append((cv2.cvtColor(image.hq2x_zoom(processing_plate), cv2.COLOR_BGR2GRAY), rectangles[i]))
                else:
                    ret.append((processing_plate, rectangles[i]))

        return ret
示例#24
0
文件: test.py 项目: Tk01/multivision
def displayVectorizedEdgeData(image, vectorizedEdgeData,i):
    m,n = image.shape
    img = np.zeros((m,n,3), np.uint8)
    
    for (x,y) in vectorizedEdgeData:
        img[x,y] = (255,255,255)
    
    img2=cv2.resize(img,(300,150))
    return img2
    
def findVectorizedEdgeData(img,(x1,y1),(x2,y2)):
    #bovenkant
    filter_length = 5
    sigma = 1
   # result = cv2.bilateralFilter(img,12,17,17)
    result1 = cv2.adaptiveBilateralFilter(img,(13,13),13)  
    #cv2.imshow('img_res',result1)
    #cv2.waitKey(0)
    edges1 = cv2.Canny(np.uint8(result1), 1, 15,L2gradient=True)
    #onderkant
    filter_length = 5
    sigma = 1
    result = cv2.bilateralFilter(img  ,9,20,20)
    #cv2.imshow('img_res',result1)
    #cv2.waitKey(0)
    edges = cv2.Canny(np.uint8(result), 1, 20,L2gradient=True)
    #result = cv2.GaussianBlur(img, (filter_length,filter_length),sigma)  
    mid = (y1 + y2 ) / 2
    
    edges[0:mid][:] = edges1[0:mid][:]
    
示例#25
0
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    h, s, v = cv2.split(hsv)
    l, a, b = cv2.split(lab)
    a = cv2.equalizeHist(a)
    #h=cv2.equalizeHist(h)
    testwrite(a, "A", imnum, None, None)
    testwrite(h, "H", imnum, None, None)

    sigD = 2
    T = 121
    t = 27
    n = 2

    ksize = (4 * sigD + 1, 4 * sigD + 1)
    ab = a
    ab = cv2.adaptiveBilateralFilter(a, ksize, sigD)
    retval, at = cv2.threshold(ab, t, 1, cv2.THRESH_BINARY_INV)
    lt = 67
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                       (2 * n + 1, 2 * n + 1))
    ao = cv2.morphologyEx(at, cv2.MORPH_OPEN, kernel)
    testwrite(ab, "AB", imnum, None, None)
    testwrite(at * 255, "AT", imnum, None, None)

    contours, hierarchy = cv2.findContours(ao, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    nn = np.size(contours)
    area = np.zeros([nn, 1])
    cx = np.zeros([nn, 1])
    cy = np.zeros([nn, 1])
    loc = np.zeros([nn, 1])
示例#26
0
    def find_plates(self):
        """
        Find the license plates in the image

        :rtype: list[(numpy.array, numpy.array)]
        :return: List of tuples containing the plate image and the plate rectangle location
            The plates returned must be a grayscale image with black background and white characters
        """

        # Create a grayscale version of the image
        gray_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)

        # Blur the image
        gray_img = cv2.adaptiveBilateralFilter(gray_img, (11, 11), 100)

        if __debug__:
            display.show_image(gray_img, 'Gray')

        blur_kernel_size = (3, 3)
        thresh = cv2.adaptiveThreshold(gray_img, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY_INV, 17, 2)
        blurred = cv2.GaussianBlur(thresh, blur_kernel_size, 0)

        if __debug__:
            display.show_image(blurred, 'Blurred')

        edges = cv2.Canny(blurred, 100, 100, 3)
        if __debug__:
            display.show_image(edges, 'Canny edges')

        contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_LIST,
                                               cv2.CHAIN_APPROX_SIMPLE)
        if __debug__:
            display.draw_contours(self.image, contours)

        rectangles = []
        for i in contours:
            area = cv2.contourArea(i)
            if area > 50:
                peri = cv2.arcLength(i, True)
                approx = cv2.approxPolyDP(i, 0.02 * peri, True)

                if len(approx) == 4 and cv2.isContourConvex(approx):
                    if self._check_size(approx):
                        rectangles.append(approx)

        processing_plates = display.get_parts_of_image(self.image, rectangles)
        ret = []

        for i, processing_plate in enumerate(processing_plates):
            processing_plate = cv2.cvtColor(processing_plate,
                                            cv2.COLOR_BGR2GRAY)
            processing_plate = cv2.bitwise_not(processing_plate)
            a, processing_plate = cv2.threshold(processing_plate, 50, 255,
                                                cv2.THRESH_OTSU)

            img_height, img_width = processing_plate.shape
            img_area = img_height * img_width

            # If the area of the plate is below 4500, perform hq2x on the plate
            if img_area < 4500:
                ret.append((cv2.cvtColor(image.hq2x_zoom(processing_plate),
                                         cv2.COLOR_BGR2GRAY), rectangles[i]))
            else:
                ret.append((processing_plate, rectangles[i]))

        return ret
    def find_plates(self):
        """
        Find the license plates in the image

        :rtype: list[(numpy.array, numpy.array)]
        :return: List of tuples containing the plate image and the plate rectangle location
            The plates returned must be a grayscale image with black background and white characters
        """

        # Create a grayscale version of the image
        gray_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)

        # Blur the image
        gray_img = cv2.adaptiveBilateralFilter(gray_img, (11, 11), 100)

        if __debug__:
            display.show_image(gray_img, 'Gray')

        blur_kernel_size = (3, 3)
        thresh = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 17, 2)
        blurred = cv2.GaussianBlur(thresh, blur_kernel_size, 0)

        if __debug__:
            display.show_image(blurred, 'Blurred')

        edges = cv2.Canny(blurred, 100, 100, 3)
        if __debug__:
            display.show_image(edges, 'Canny edges')

        contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        if __debug__:
            display.draw_contours(self.image, contours)

        rectangles = []
        for i in contours:
            area = cv2.contourArea(i)
            if area > 50:
                peri = cv2.arcLength(i, True)
                approx = cv2.approxPolyDP(i, 0.02 * peri, True)

                if len(approx) == 4 and cv2.isContourConvex(approx):
                    if self._check_size(approx):
                        rectangles.append(approx)

        processing_plates = display.get_parts_of_image(self.image, rectangles)
        ret = []

        for i, processing_plate in enumerate(processing_plates):
            processing_plate = cv2.cvtColor(processing_plate, cv2.COLOR_BGR2GRAY)
            processing_plate = cv2.bitwise_not(processing_plate)
            a, processing_plate = cv2.threshold(processing_plate, 50, 255, cv2.THRESH_OTSU)

            img_height, img_width = processing_plate.shape
            img_area = img_height * img_width

            # If the area of the plate is below 4500, perform hq2x on the plate
            if img_area < 4500:
                ret.append((
                    cv2.cvtColor(image.hq2x_zoom(processing_plate), cv2.COLOR_BGR2GRAY),
                    rectangles[i]
                ))
            else:
                ret.append((
                    processing_plate,
                    rectangles[i]
                ))

        return ret
            str(kbuffersize), rose_subsequence_mm_outputname
        ])

        frame_mask = mask
        recover_base = cv2.imread(rose_subsequence_mm_outputname)

        #using bilateral filtering
        temp = np.zeros((recover_base.shape[0] * 3, recover_base.shape[1],
                         recover_base.shape[2]),
                        dtype=np.uint8)
        for i in range(0, recover_base.shape[0]):
            temp[3 * i + 0, :, :] = recover_base[i, :, :]
            temp[3 * i + 1, :, :] = recover_base[i, :, :]
            temp[3 * i + 2, :, :] = recover_base[i, :, :]

        temp2 = cv2.adaptiveBilateralFilter(temp, (3, 15), 200.0)

        for i in range(0, recover_base.shape[0]):
            recover_base[i, :, :] = temp2[3 * i + 1, :, :]

        _frame = np.zeros(firstframe.shape, dtype=np.uint8)

        for i in range(
                0, KEYFRAME_INDICES[index] - KEYFRAME_INDICES[index - 1] +
                kbuffersize + 1):

            _frame = lerp_image(
                first_keyframe, last_keyframe, i,
                KEYFRAME_INDICES[index] - KEYFRAME_INDICES[index - 1],
                frame_mask)
            _frame[frame_mask == 0] = (recover_base[:, i, :]).reshape(
示例#29
0
 def adaptive_bilateral_filter_do(self, image):
     return cv2.adaptiveBilateralFilter (image, \
             (self.config.adaptive_bilateral_filter_kernel_x.get_value_as_int (), \
             self.config.adaptive_bilateral_filter_kernel_y.get_value_as_int ()), \
             self.config.adaptive_bilateral_filter_sigma_space.get_value_as_int (), \
             self.config.adaptive_bilateral_filter_max_sigma_color.get_value_as_int ())