Example #1
0
    def preproc_map_img(self, map_img):
        """ Preprocesses the map image Soft, erode or whtaever it is necessary to improve the input"""
        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Threshold(map_img, thresh_img, 250, 255, cv.CV_THRESH_BINARY)

        #Blur map's thresholded image
        soft_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Smooth(thresh_img, soft_img, cv.CV_GAUSSIAN, 9, 9)

        #Dilate the inverse map to get it's skeleton
        dilated_img = cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Dilate(soft_img, dilated_img, iterations=20)

        #Create inverse image
#        dilated_inverted_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
#        for r in range(0,dilated_img.rows):
#            for c in range(0,dilated_img.cols):
#                dilated_inverted_img[r,c]=255-dilated_img[r,c]

        #Enhance image edges for hough transformdilated_img
        canny_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Canny(soft_img, canny_img, 200,220)

        preprocessed_map = dilated_img
        return preprocessed_map
    def cannyGradient(self, image, t1=20, t2=250):
        '''Returns the canny gradient'''
        #Checks whether inputs are correct
        if self.image_check(image) < 0:
            return -1

        #Converts the image if it is not B&W
        gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        if image.channels > 1:
            temp = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
            cv.CvtColor(image, temp, cv.CV_BGR2GRAY)
            gsimage = temp
        else:
            gsimage = image

        #Gets the edges from the image
        edges = cv.CreateImage(cv.GetSize(gsimage), cv.IPL_DEPTH_8U, 1)

        #Warning: the threshold 1 and threshold 2 should be selected by experiment
        cv.Canny(gsimage, edges, threshold1=t1, threshold2=t2)

        if self.visualize:
            while True:
                cv.NamedWindow("Original")
                cv.ShowImage("Original", gsimage)
                cv.NamedWindow("Edges")
                cv.ShowImage("Edges", edges)
                c = cv.WaitKey(5)
                if c > 0:

                    break
        cv.DestroyAllWindows()
        return edges
Example #3
0
def url_jpg_contours(url):
    position = 100
    filedata = urllib2.urlopen(url).read()
    imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
    cv.SetData(imagefiledata, filedata, len(filedata))
    im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray_im = cv.CreateImage((im.width, im.height), 8, 1)
    edge_im = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY)
    cv.Canny(gray_im, edge_im, position, position * 3, 3)
    cv.SetZero(col_edge)
    # copy edge points
    cv.Copy(im, col_edge, edge_im)
    edge_im_array = np.asarray(edge_im[:])

    ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255,
                                       cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(edge_im_array, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    scale = 10000.0
    points = []
    for contour in contours:
        for i in contour:
            for j in i:
                lng_offset = j[0] / scale
                lat_offset = j[1] / scale
                points.append([lng_offset, lat_offset])
    return points
Example #4
0
def find_Lines(im):
    out = cv.CreateImage(cv.GetSize(im), 8, 1)
    tmp = cv.CreateImage(cv.GetSize(im), 8, 3)
    storage = cv.CreateMemStorage(0)
    cv.Canny(im, out, 50, 200, 3)
    cv.CvtColor(out, tmp, cv.CV_GRAY2BGR)
    return cv.HoughLines2(out, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100,
                          0, 0)
Example #5
0
def lines2():
    im = cv.LoadImage('roi_edges.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
    pi = math.pi
    x = 0
    dst = cv.CreateImage(cv.GetSize(im), 8, 1)
    cv.Canny(im, dst, 200, 200)
    cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)
    color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(im, color_dst_standard,
                cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines
    lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD,
                           1, pi / 100, 71, 0, 0)
    klsum = 0
    klaver = 0
    krsum = 0
    kraver = 0

    #global k
    #k=0
    for (rho, theta) in lines[:100]:
        kl = []
        kr = []
        a = math.cos(theta)
        b = math.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
        pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
        k = ((y0 - 1000 * (a)) - (y0 + 1000 * (a))) / ((x0 - 1000 * (-b)) -
                                                       (x0 + 1000 * (-b)))

        if abs(k) < 0.4:
            pass
        elif k > 0:
            kr.append(k)
            len_kr = len(kr)
            for i in kr:
                krsum = krsum + i
                kraver = krsum / len_kr

                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        elif k < 0:
            kr.append(k)
            kl.append(k)
            len_kl = len(kl)
            for i in kl:
                klsum = klsum + i
                klaver = klsum / len_kl
                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        #print k
    #  cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4)
    cv.SaveImage('lane.jpg', color_dst_standard)
    print '左车道平均斜率:', klaver, '  右车道平均斜率:', kraver
    cv.ShowImage("Hough Standard", color_dst_standard)
    cv.WaitKey(0)
Example #6
0
def Canny(image, low_thr=50, hi_thr=150):
    # PERFETTO: le pupille (a distanza ravvicinata) sembrano essere
    # prese bene... TODO: smanettare con i parametri di canny
    yuv = cv.CreateImage(cv.GetSize(image), 8, 3)
    gray = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.CvtColor(image, yuv, cv.CV_BGR2YCrCb)
    cv.Split(yuv, gray, None, None, None)
    canny = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.Canny(gray, canny, low_thr, hi_thr)
    cv.NamedWindow('Canny')
    cv.ShowImage('Canny', canny)
Example #7
0
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices,
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2)
    timg = cv.CloneImage(color_img)  # make a copy of input image
    gray = cv.CreateImage((width, height), 8, 1)

    # select the maximum ROI in the image
    cv.SetImageROI(timg, (0, 0, width, height))

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage((width / 2, height / 2), 8, 3)
    cv.PyrDown(timg, pyr, 7)
    cv.PyrUp(pyr, timg, 7)

    tgray = cv.CreateImage((width, height), 8, 1)
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI(timg, c + 1)
        cv.Copy(timg, tgray, None)
        cv.Canny(tgray, gray, 0, 50, 5)
        cv.Dilate(gray, gray)
        squares = squares + find_squares_from_binary(gray)

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                         cv.CV_THRESH_BINARY)
            squares = squares + find_squares_from_binary(gray)

    return squares
Example #8
0
 def render_outline_image(image_id, threshold):
     im=cv.LoadImage("Image"+str(image_id)+".bmp", cv.CV_LOAD_IMAGE_COLOR)
     gray = cv.CreateImage((im.width, im.height), 8, 1)
     edge = cv.CreateImage((im.width, im.height), 8, 1)
     im_bw1 = cv.CreateImage((im.width, im.height), 8, 1)
     cv.CvtColor(im, gray, cv.CV_BGR2GRAY)
     cv.Not(gray, edge)
     im_white=cv.LoadImage("white.bmp", cv.CV_LOAD_IMAGE_COLOR)
     white = cv.CreateImage((im_white.width, im_white.height), 8, 1)
     cv.Canny(gray, edge, threshold, 125 * 3, 3)
    # cv.Not(white, edge)
     cv.SaveImage("edge_image.png", edge)
     jpg1 = wx.Image('edge_image.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
     os.remove("edge_image.png")
     return jpg1
Example #9
0
def on_trackbar(position):

    cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.Not(gray, edge)

    # run the edge dector on gray scale
    cv.Canny(gray, edge, position, position * 3, 3)

    # reset
    cv.SetZero(col_edge)

    # copy edge points
    cv.Copy(im, col_edge, edge)

    # show the im
    cv.ShowImage(win_name, col_edge)
Example #10
0
    def on_trackbar(self, position):

        cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 3, 3, 0)
        cv.Not(self.source_image, self.edge)

        # run the edge dector on gray scale
        cv.Canny(self.source_image, self.edge, position, position * 3, 3)

        # reset
        cv.SetZero(self.col_edge)

        # copy edge points
        cv.Copy(self.source_color, self.col_edge, self.edge)

        # show the im
        cv.ShowImage(win_name, self.col_edge)
        self.process_image(position)
Example #11
0
def on_trackbar(position):
    '''
    position is the value of the track bar
    '''
    img_result = cv.CreateImage(src_img_size, 8, 1)
    cv.Canny(img_gray, img_result, position, position*2, 3)
    cv.ShowImage("contours", img_result)
    storage = cv.CreateMemStorage()
    contours = cv.FindContours(img_result, storage,  cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
    print contours
    # draw contours in red and green
    cv.DrawContours (img_result, #dest image
        contours, #input contours
        _red, #color of external contour
        _green, #color of internal contour
        levels, #maxlevel of contours to draw
        _contour_thickness,
        cv.CV_AA, #line type
        (0, 0)) #offset
    pass
def analyzeImage(f, name):

    im = Image.open(f)
    try:
        if (im.size[0] == 1 or im.size[1] == 1):
            return
        print(name + ' : ' + str(im.size[0]) + ',' + str(im.size[1]))
        le = 1
        if (type(im.getpixel((0, 0))) == type((1, 2))):
            le = len(im.getpixel((0, 0)))
        gray = cv.CreateImage(cv.Size(im.size[0], im.size[1]), 8, 1)
        edge1 = cv.CreateImage(cv.Size(im.size[0], im.size[1]), 32, 1)
        edge2 = cv.CreateImage(cv.Size(im.size[0], im.size[1]), 8, 1)
        edge3 = cv.CreateImage(cv.Size(im.size[0], im.size[1]), 32, 3)

        for h in range(im.size[1]):
            for w in range(im.size[0]):
                p = im.getpixel((w, h))
                if (type(p) == type(1)):
                    gray[h][w] = im.getpixel((w, h))
                else:
                    gray[h][w] = im.getpixel((w, h))[0]

        cv.CornerHarris(gray, edge1, 5, 5, 0.1)
        cv.Canny(gray, edge2, 20, 100)

        cv.NamedWindow("win")
        cv.ShowImage("win", gray)
        cv.NamedWindow("win2")
        cv.ShowImage("win2", edge1)
        cv.NamedWindow("win3")
        cv.ShowImage("win3", edge2)

        cv.WaitKey()

        f.close()
    except Exception, e:
        print e
        print 'ERROR: problem handling ' + name
Example #13
0
    def ImagePro(self, capture, orig, processed, storage, grid):
        orig = cv.QueryFrame(capture)
        #cv.Normalize(orig)
        # filter for all yellow and blue - everything else is black
        processed = processor.colorFilterCombine(orig, "yellow", "blue", s)

        # Some processing and smoothing for easier circle detection
        cv.Canny(processed, processed, 5, 70, 3)
        cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 7, 7)

        #cv.ShowImage('processed2', processed)

        # Find&Draw circles
        processor.find_circles(processed, storage, 100)

        #if it is in the range of 1 to 9, we can try and recalibrate our filter
        #if 1 <= storage.rows < 10:
        #    s = autocalibrate(orig, storage)

        processor.draw_circles(storage, orig)

        #warp = processor.update_grid(storage, orig, grid)

        # Delete and recreate the storage so it has the correct width
        #del(storage)
        #storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)

        #cv.ShowImage('output', orig)

        #return processed
        #cv.ShowImage('grid', warp)

        #warp = perspective_transform(orig)
        #cv.ShowImage('warped', warp)
        mask = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
        cv.Resize(orig, mask)
        return mask
Example #14
0
    else:
        url = 'http://code.opencv.org/svn/opencv/trunk/opencv/doc/pics/building.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)

    cv.NamedWindow("Source", 1)
    cv.NamedWindow("Hough", 1)

    while True:
        dst = cv.CreateImage(cv.GetSize(src), 8, 1)
        color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
        storage = cv.CreateMemStorage(0)
        lines = 0
        cv.Canny(src, dst, 50, 200, 3)
        cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)

        if USE_STANDARD:
            lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1,
                                   pi / 180, 100, 0, 0)
            for (rho, theta) in lines[:100]:
                a = cos(theta)
                b = sin(theta)
                x0 = a * rho
                y0 = b * rho
                pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
                pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
                cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
        else:
            lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1,
Example #15
0
    def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        use_this = self.source_image
        if self.intensity == False:
            cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 9, 9, 0)
            cv.Not(self.source_image, self.edge)

            # run the edge dector on gray scale
            cv.Canny(self.source_image, self.edge, slider_pos, slider_pos * 3,
                     3)

            # reset
            cv.SetZero(self.col_edge)

            # copy edge points
            cv.Copy(self.source_color, self.col_edge, self.edge)
            use_this = self.edge

        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(use_this)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(use_this, image02, slider_pos, 255, cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256), random.randrange(256),
                                  random.randrange(256))
                cv.Ellipse(image04, center, size, angle, 0, 360, color, 2,
                           cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage("Result", image04)
Example #16
0
cv.ShowImage("Blue", b)
cv.ShowImage("Green", g)
cv.ShowImage("Red", r)
cv.ShowImage("Merged", merged)  #merged=fucionado

cv.SaveImage("blue.png", b)
cv.SaveImage("verde.png", g)
cv.SaveImage("rojo.png", r)
cv.SaveImage("fucion.png", merged)  #merged=fucionado
cv.WaitKey(0)
enter = raw_input("Mostrar Canny")
#canny
#imagen en gris
pi = math.pi
dst = cv.CreateImage(cv.GetSize(b), 8, 1)
cv.Canny(b, dst, 100, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)
color_dst_standard = cv.CreateImage(cv.GetSize(b), 8, 3)
cv.CvtColor(b, color_dst_standard, cv.CV_GRAY2BGR)
lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1,
                       pi / 180, 100, 0, 0)
cv.ShowImage("Greyscale edges canny", dst)
cv.SaveImage("Greyscale edges canny.png", dst)
cv.WaitKey(0)

#azul
pi = math.pi
dst = cv.CreateImage(cv.GetSize(b), 8, 1)
cv.Canny(b, dst, 100, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)
color_dst_standard = cv.CreateImage(cv.GetSize(b), 8, 3)
Example #17
0
import time
import math
import matplotlib.pyplot as plt
import cv2.cv as cv2
import numpy as np

# Foto con fondo cuadrado
img = Image('/home/pi/Documents/Lab3/foto4.png')
(r, g, b) = img.splitChannels(False)

r.save('/home/pi/Documents/Lab3/red4.png')
g.save('/home/pi/Documents/Lab3/green4.png')
b.save('/home/pi/Documents/Lab3/blue4.png')

img = cv2.LoadImage('/home/pi/Documents/Lab3/foto4.png',
                    cv2.CV_LOAD_IMAGE_GRAYSCALE)

pi = math.pi

dst = cv2.CreateImage(cv2.GetSize(img), 8, 1)
cv2.Canny(img, dst, 100, 200)
cv2.Threshold(dst, dst, 100, 255, cv2.CV_THRESH_BINARY)
color_dst_standard = cv2.CreateImage(cv2.GetSize(img), 8, 3)
cv2.CvtColor(img, color_dst_standard, cv2.CV_GRAY2BGR)
lines = cv2.HoughLines2(dst, cv2.CreateMemStorage(0), cv2.CV_HOUGH_STANDARD, 1,
                        pi / 180, 100, 0, 0)
cv2.ShowImage('Image', img)
cv2.ShowImage("Cannied", dst)

cv2.WaitKey(0)
Example #18
0
dst = cv.CreateImage(size, 8, 3)
smoothed = cv.CreateImage(size, 8, 3)
dst3 = cv.CreateImage(size, 8, 3)
cv.Resize(im,dst)

for i in range(4):
        cv.Smooth(dst,smoothed,cv.CV_BILATERAL, 30,1,32,32)
        cv.Smooth(smoothed,dst,cv.CV_BILATERAL, 30,1,32,32)
        cv.Smooth(dst,smoothed,cv.CV_BILATERAL, 30,1,32,32)

lap = cv.CreateImage(cv.GetSize(smoothed), cv.IPL_DEPTH_16S, 3)
#laplace = cv.Laplace(smoothed, lap)
gray = cv.CreateImage(cv.GetSize(smoothed), 8, 1)
canny = cv.CreateImage(cv.GetSize(smoothed), 8, 1)
cv.CvtColor(smoothed, gray, cv.CV_BGR2GRAY)
cv.Canny(gray,canny,20,20,3)

#ret,thresh = cv.threshold(imgray,127,255,0)
          
cv.NamedWindow('Original')
cv.MoveWindow('Original', 10, 10)
cv.ShowImage('Original', im)  
cv.NamedWindow('Smoothed')
cv.MoveWindow('Smoothed', 600, 100)
cv.ShowImage('Smoothed',smoothed)
cv.NamedWindow('Laplace')
cv.MoveWindow('Laplace', 600, 100)
cv.ShowImage('Canny',canny)
cv.ShowImage('Gray',gray)
cv.WaitKey(0) 
#cv.SaveImage("smoothed.png",dst)
Example #19
0
import cv2.cv as cv

capture = cv.CaptureFromFile('img/paulvideo.avi')

nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
wait = int(1 / fps * 1000 / 1)

dst = cv.CreateImage(
    (int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)),
     int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))), 8, 1)

for f in xrange(nbFrames):

    frame = cv.QueryFrame(capture)

    cv.CvtColor(frame, dst, cv.CV_BGR2GRAY)
    cv.Canny(dst, dst, 125, 350)
    cv.Threshold(dst, dst, 128, 255, cv.CV_THRESH_BINARY_INV)

    cv.ShowImage("The Video", frame)
    cv.ShowImage("The Dst", dst)
    cv.WaitKey(wait)
def extract_features(filename, is_url=False):
    '''Extracts features to be used in text image classifier.
    :param filename: input image
    :param is_url: is input image a url or a file path on disk
    :return: tuple of features:
    (average_slope, median_slope, average_tilt, median_tilt, median_differences, average_differences, nr_straight_lines)
    Most relevant ones are average_slope, average_differences and nr_straight_lines.
    '''

    if is_url:
        filedata = urllib2.urlopen(filename).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)

    # normalize size
    normalized_size = 400

    # smaller dimension will be 400, longer dimension will be proportional
    orig_size = cv.GetSize(src)

    max_dim_idx = max(enumerate(orig_size), key=lambda l: l[1])[0]
    min_dim_idx = [idx for idx in [0, 1] if idx != max_dim_idx][0]
    new_size = [0, 0]
    new_size[min_dim_idx] = normalized_size
    new_size[max_dim_idx] = int(
        float(orig_size[max_dim_idx]) / orig_size[min_dim_idx] *
        normalized_size)
    dst = cv.CreateImage(new_size, 8, 1)
    cv.Resize(src, dst)
    # cv.SaveImage("/tmp/resized.jpg",dst)
    src = dst

    dst = cv.CreateImage(cv.GetSize(src), 8, 1)
    color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
    storage = cv.CreateMemStorage(0)

    cv.Canny(src, dst, 50, 200, 3)
    cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)

    slopes = []
    # difference between xs or ys - variant of slope
    tilts = []
    # x coordinates of horizontal lines
    horizontals = []
    # y coordinates of vertical lines
    verticals = []

    if USE_STANDARD:
        coords = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1,
                                pi / 180, 50, 50, 10)
        lines = []
        for coord in coords:
            (rho, theta) = coord
            a = cos(theta)
            b = sin(theta)
            x0 = a * rho
            y0 = b * rho
            pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
            pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
            lines += [(pt1, pt2)]

    else:
        lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1,
                               pi / 180, 50, 50, 10)

    # eliminate duplicates - there are many especially with the standard version
    # first round the coordinates to integers divisible with 5 (to eliminate different but really close ones)
    # TODO
    # lines = list(set(map(lambda l: tuple([int(p) - int(p)%5 for p in l]), lines)))

    nr_straight_lines = 0
    for line in lines:
        (pt1, pt2) = line

        # compute slope, rotate the line so that the slope is smallest
        # (slope is either delta x/ delta y or the reverse)
        # add smoothing term in denominator in case of 0
        slope = min(
            abs(pt1[1] - pt2[1]),
            (abs(pt1[0] - pt2[0]))) / (max(abs(pt1[1] - pt2[1]),
                                           (abs(pt1[0] - pt2[0]))) + 0.01)
        # if slope < 0.1:
        # if slope < 5:
        if slope < 0.05:
            if abs(pt1[0] - pt2[0]) < abs(pt1[1] - pt2[1]):
                # means it's a horizontal line
                horizontals.append(pt1[0])
            else:
                verticals.append(pt1[1])
        if slope < 0.05:
            # if slope < 5:
            # if slope < 0.1:
            nr_straight_lines += 1
        slopes.append(slope)
        tilts.append(min(abs(pt1[1] - pt2[1]), (abs(pt1[0] - pt2[0]))))
        # print slope
    average_slope = sum(slopes) / float(len(slopes))
    median_slope = npmedian(nparray(slopes))
    average_tilt = sum(tilts) / float(len(tilts))
    median_tilt = npmedian(nparray(tilts))
    differences = []
    horizontals = sorted(horizontals)
    verticals = sorted(verticals)
    print "x_differences:"
    for (i, x) in enumerate(horizontals):
        if i > 0:
            # print abs(horizontals[i] - horizontals[i-1])
            differences.append(abs(horizontals[i] - horizontals[i - 1]))
    print "y_differences:"
    for (i, y) in enumerate(verticals):
        if i > 0:
            # print abs(verticals[i] - verticals[i-1])
            differences.append(abs(verticals[i] - verticals[i - 1]))

    print filename
    print "average_slope:", average_slope
    print "median_slope:", median_slope
    print "average_tilt:", average_tilt
    print "median_tilt:", median_tilt
    median_differences = npmedian(nparray(differences))
    print "median_differences:", median_differences
    if not differences:
        # big random number for average difference
        average_differences = 50
    else:
        average_differences = sum(differences) / float(len(differences))
    print "average_differences:", average_differences
    print "nr_lines:", nr_straight_lines

    # print "sorted xs:", sorted(lines)

    return (average_slope, median_slope, average_tilt, median_tilt,
            median_differences, average_differences, nr_straight_lines)
Example #21
0
    frame = cv.QueryFrame(cap)
    cv.ShowImage('camera', frame)

    # Convert to greyscale
    grey = cv.CreateImage(cv.GetSize(frame), frame.depth, 1)
    cv.CvtColor(frame, grey, cv.CV_RGB2GRAY)
    cv.ShowImage('greyscale', grey)

    # Gaussian blur to remove noise
    blur = cv.CreateImage(cv.GetSize(grey), cv.IPL_DEPTH_8U, grey.channels)
    cv.Smooth(grey, blur, cv.CV_GAUSSIAN, 5, 5)
    cv.ShowImage('Gaussian Blur', blur)

    # And do Canny edge detection
    canny = cv.CreateImage(cv.GetSize(blur), blur.depth, blur.channels)
    cv.Canny(blur, canny, 10, 100, 3)
    cv.ShowImage('Edge Detect', canny)

    c = cv.WaitKey(50)
    if c == 27:
        exit(0)

    # Apparently not supported for my cameras:
    # print "FPS:", cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FPS)

    print "Frame", frames
    frames += 1

    if frames % 10 == 0:
        currtime = time.time()
        numsecs = currtime - start_time
Example #22
0
def findSquares4(img, storage):
    N = 11
    sz = (img.width & -2, img.height & -2)
    timg = cv.CloneImage(img); # make a copy of input image
    gray = cv.CreateImage(sz, 8, 1)
    pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3)
    # create empty sequence that will contain points -
    # 4 points per square (the square's vertices)
    squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
    squares = CvSeq_CvPoint.cast(squares)

    # select the maximum ROI in the image
    # with the width and height divisible by 2
    subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))

    # down-scale and upscale the image to filter out the noise
    cv.PyrDown(subimage, pyr, 7)
    cv.PyrUp(pyr, subimage, 7)
    tgray = cv.CreateImage(sz, 8, 1)
    # find squares in every color plane of the image
    for c in range(3):
        # extract the c-th color plane
        channels = [None, None, None]
        channels[c] = tgray
        cv.Split(subimage, channels[0], channels[1], channels[2], None)
        for l in range(N):
            # hack: use Canny instead of zero threshold level.
            # Canny helps to catch squares with gradient shading
            if(l == 0):
                # apply Canny. Take the upper threshold from slider
                # and set the lower to 0 (which forces edges merging)
                cv.Canny(tgray, gray, 0, thresh, 5)
                # dilate canny output to remove potential
                # holes between edge segments
                cv.Dilate(gray, gray, None, 1)
            else:
                # apply threshold if l!=0:
                #     tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
                cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)

            # find contours and store them all as a list
            count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
                cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0))

            if not contours:
                continue

            # test each contour
            for contour in contours.hrange():
                # approximate contour with accuracy proportional
                # to the contour perimeter
                result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
                    cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0)
                # square contours should have 4 vertices after approximation
                # relatively large area (to filter out noisy contours)
                # and be convex.
                # Note: absolute value of an area is used because
                # area may be positive or negative - in accordance with the
                # contour orientation
                if(result.total == 4 and
                    abs(cv.ContourArea(result)) > 1000 and
                    cv.CheckContourConvexity(result)):
                    s = 0
                    for i in range(5):
                        # find minimum angle between joint
                        # edges (maximum of cosine)
                        if(i >= 2):
                            t = abs(angle(result[i], result[i-2], result[i-1]))
                            if s<t:
                                s=t
                    # if cosines of all angles are small
                    # (all angles are ~90 degree) then write quandrange
                    # vertices to resultant sequence
                    if(s < 0.3):
                        for i in range(4):
                            squares.append(result[i])

    return squares
Example #23
0
def url_jpg_contours():
    url = 'http://i12.tietuku.com/05ef0b29030fa46c.jpg'
    filedata = urllib2.urlopen(url).read()
    imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
    print imagefiledata  #<cvmat(type=42424000 8UC1 rows=1 cols=48230 step=48230 )>
    cv.SetData(imagefiledata, filedata, len(filedata))
    im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray_im = cv.CreateImage((im.width, im.height), 8, 1)
    edge_im = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY)
    cv.Canny(gray_im, edge_im, position, position * 3, 3)
    cv.SetZero(col_edge)
    # copy edge points
    cv.Copy(im, col_edge, edge_im)
    #ret, edge_jpg = cv2.imencode('.jpg', edge_im, [int(cv.CV_IMWRITE_JPEG_QUALITY), 80])
    edge_im_array = np.asarray(edge_im[:])

    print type(edge_im_array)
    #edge_jpg_gray = cv2.cvtColor(edge_im_array,cv2.COLOR_BGR2GRAY)
    ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255,
                                       cv2.THRESH_BINARY)
    print type(edge_im_array)
    contours, hierarchy = cv2.findContours(
        edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
    )  #压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
    contours_img = cv2.cvtColor(edge_im_array, cv2.COLOR_GRAY2BGR)
    url_str_len_contours = str(len(contours))  #取轮廊数量
    str_len_contours = str(len(contours))  #取轮廊数量

    #数据处理

    first_contours = contours[0]  #第一条轨迹坐标集合,数据格式为numpy.ndarry

    first_contours_list = first_contours.tolist()
    #print contours                #输出所有轨迹坐标集合
    #print contours[-1]          #输出最后一条轨迹坐标,数据格式为numpy.ndarry
    #print contours[0][0].tolist()[0] #输出第一条轨迹起始点坐标[[375 241]]并转化成list格式[[375,241]] |**.tolist()[0] 可以省掉一个中括号输出[375,241]
    #print contours[0][0].tolist()[0][0] #输出第一条轨迹起始点坐标的X坐标值。
    #print contours[0][0].tolist()[0][1] #输出第一条轨迹起始点坐标的Y坐标值。

    #print [i[0][0] for i in contours]
    #print [i[0][0] for i in contours[0]]

    scale = 1  #不缩放
    contours_img = cv2.resize(contours_img, (0, 0), fx=scale, fy=scale)
    print "Url_jpg_contours_num:%s" % url_str_len_contours
    for cnt in contours:
        color = np.random.randint(0, 255, (3)).tolist()
        cv2.drawContours(contours_img, [cnt * scale], 0, color, 1)
    cv2.imshow("URL_canny_img", edge_im_array)
    cv2.imshow("URL_contours_img", contours_img)

    #轮廊清单转文本输出
    edge_im_array_pix = str(np.size(edge_im_array))
    contours_img_pix = str(np.size(contours_img))

    ss = open("Contours" + ".log", 'w')
    ss.write("edge_im_array_pix nums:" + "%s" % edge_im_array_pix + "\n")
    ss.write("contours_img_pix nums:" + "%s" % contours_img_pix + "\n")
    ss.write("_url_contours num:" + "%s" % str_len_contours + "\n")
    for ele in contours:
        ss.write("%s" % ele)
    ss.write("**" * 50 + "\n")
    ss.close()
    #return contours
    cv2.waitKey(0)
    img_r = cv.CloneMat(img)
    img_g = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_b = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_a = cv.CreateImage((img.cols, img.rows), 8, 1)
    cv.Set(img_g, 10)
    cv.Set(img_b, 100)
    cv.Set(img_a, 100)

    cv.Merge(img_b, img_g, img_r, img_a, rgb_img)
    """Precorner detect"""
    corners = cv.CreateMat(float_img.rows, float_img.cols, float_img.type)
    cv.PreCornerDetect(float_img, corners, 3)
    """Canny"""
    edges = cv.CreateImage((img.cols, img.rows), 8, 1)
    print img.rows, img.cols, edges.height
    cv.Canny(img, edges, 20.0, 160.0)
    disp2 = edges
    """Good features to track"""
    eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
    temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
    features_x_y_vector = cv.GoodFeaturesToTrack(img,
                                                 eig_image,
                                                 temp_image,
                                                 10,
                                                 0.002,
                                                 1.0,
                                                 useHarris=True)
    disp3 = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1)
    cv.Set(disp3, 0)
    for (x, y) in features_x_y_vector:
        disp3[y, x] = 255
import cv2.cv as cv
import math

im = cv.LoadImage('14_108_eae2591dbc033d9.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)

pi = math.pi  #Pi value

dst = cv.CreateImage(cv.GetSize(im), 8, 1)

cv.Canny(im, dst, 200, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)

#---- Standard ----
color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_standard,
            cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines

lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1,
                       pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
    a = math.cos(theta)  #Calculate orientation in order to print them
    b = math.sin(theta)
    x0 = a * rho
    y0 = b * rho
    pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
    pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
    cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
            4)  #Draw the line

#---- Probabilistic ----
color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3)
Example #26
0
orig2 = cv.QueryFrame(capture2)
processed = cv.CreateImage((orig.width, orig.height), cv.IPL_DEPTH_8U, 1)
grid = cv.CreateImage((orig.width * 2, orig.height), cv.IPL_DEPTH_8U, 3)
storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
s = []

draw_grid(grid)
while True:
    orig = cv.QueryFrame(capture)
    orig2 = cv.QueryFrame(capture2)
    #cv.Normalize(orig)
    # filter for all yellow and blue - everything else is black
    processed = colorFilterCombine(orig, "yellow", "blue", s)

    # Some processing and smoothing for easier circle detection
    cv.Canny(processed, processed, 5, 70, 3)
    cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 7, 7)

    #cv.ShowImage('processed2', processed)

    # Find&Draw circles
    find_circles(processed, storage, 100)

    #if it is in the range of 1 to 9, we can try and recalibrate our filter
    #if 1 <= storage.rows < 10:
    #    s = autocalibrate(orig, storage)

    draw_circles(storage, orig)

    warp = update_grid(storage, orig, grid)
Example #27
0
def main():
    cap = cv.CaptureFromCAM(0)
    cv.NamedWindow("camera", cv.CV_WINDOW_NORMAL)
    cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH, 720)
    cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT, 540)
    cols = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH))
    rows = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT))
    grey = cv.CreateImage((cols, rows), 8, 1)
    cumulated = cv.CreateImage((cols, rows), 8, 1)

    equalize = True
    laplace = False

    settings = {
        "canny_avg": 10,
    }

    threshold1 = 600
    threshold2 = 200

    settings_names = sorted(settings.keys())
    setting_current = 0
    setting_name = settings_names[setting_current]

    while True:
        im = cv.QueryFrame(cap)
        cv.CvtColor(im, grey, cv.CV_BGR2GRAY)

        if equalize:
            cv.Smooth(grey, grey, param1=5, param2=5)
            cv.EqualizeHist(grey, grey)

        if laplace:
            cv.Canny(grey, grey, threshold1, threshold2)
            avg = cv.Avg(cumulated)[0]
            if avg > settings["canny_avg"] * 1.2:
                threshold1 *= 1.1
                threshold2 = threshold1 / 2.5
            if avg < settings["canny_avg"] / 1.2:
                threshold1 /= 1.1
                threshold2 = threshold1 / 2.5

        cv.ShowImage("camera", grey)

        key = cv.WaitKey(1)
        if key not in (-1, 1114085, 1245157): # None, block
            print("Key %d" % key)
            if key in ( # Capture one frame
                1048675, # c
                99, # c
                ):
                filenames = save_image(cap, 1)
                print("Capturing: %s" % ", ".join(list(filenames)))
            if key in ( # Capture ten frames
                1114179, # C
                1179715, # C (block)
                65603, # C
                131139, # C (block)
                ):
                filenames = save_image(cap, 10)
                print("Capturing: %s" % ", ".join(list(filenames)))

            elif key in ( # Toggle equalization
                1114181, # e
                1048677, # E
                1179717, # E (block)
                1245285, # e (block)
                101,     # e
                65605,   # E
                131141,  # E (block)
                196709,  # e (block)
                ):
                equalize = not equalize
                print("Equalize: %s" % equalize)

            elif key in ( # Toggle laplace
                1179724, # l
                1048684, # L (block(
                1114188, # L
                108, 
                65612,
                131148,
                196716,
                ):
                laplace = not laplace 
                print("Laplace: %s" % laplace)

            elif key in ( # Increment value
                1113938, # Up
                65362,
                ):
                settings[setting_name] += 1
                print("%s := %d" % (setting_name, settings[setting_name]))

            elif key in ( # Decrement value
                1113940, # Down
                65364,
                ):
                settings[setting_name] -= 1
                print("%s := %d" % (setting_name, settings[setting_name]))

            elif key in ( # Next setting
                1113939, # Right
                65363,
                ):
                setting_current = (setting_current + 1) % len(settings_names)
                setting_name = settings_names[setting_current]
                print("%s : %d" % (setting_name, settings[setting_name]))

            elif key in ( # Prev setting
                1113937, # Left
                65361,
                ):
                setting_current = (setting_current - 1) % len(settings_names)
                setting_name = settings_names[setting_current]
                print("%s : %d" % (setting_name, settings[setting_name]))

            elif key in ( # Exit
                27, # ESC
                1048603, # ESC
                1114193, # q
                1048689, # Q
                1179729, # Q (block)
                1245297, # q (block)
                113,
                65617,
                131153,
                196721,
                ):
                break