Esempio n. 1
0
def main():
    root = "/Users/soswow/Documents/Face Detection/test/negative"
    #    root = "/Users/soswow/Documents/Face Detection/test/sets/negative"
    #    root = "/Users/soswow/Documents/Face Detection/test/edge_view/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/sobel/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/sets/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/falses"

    for folder in os.listdir(root):
        path = p.join(root, folder)
        if p.isdir(path):
            sum = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(sum)
            k = 0
            for path, _ in directory_files(path):
                try:
                    img = cv.LoadImage(path, iscolor=False)
                except IOError:
                    continue
                mat = cv.CreateMat(32, 32, cv.CV_32F)
                cv.Zero(mat)
                cv.Convert(cv.GetMat(img), mat)
                cv.Add(mat, sum, sum)
                k += 1
            avg = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(avg)
            count = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(count)
            cv.Set(count, k)
            cv.Div(sum, count, avg)
            new_img = cv.CreateImage((32, 32), 8, 0)
            cv.Zero(new_img)
            cv.Convert(avg, new_img)
            cv.SaveImage(p.join(root, "%s-avg.png" % folder), new_img)
Esempio n. 2
0
def main():
    if len(sys.argv) < 1:
        print "Come on, give me some files to play with"
        return

    print "Reading image " + sys.argv[1]

    incoming = cv.LoadImageM(sys.argv[1])
    w, h = (incoming.cols, incoming.rows)
    nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
    img = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
    cv.Convert(incoming, img)

    n = 0
    for f in sys.argv[1:]:
        incoming = cv.LoadImageM(f)
        w, h = (incoming.cols, incoming.rows)
        nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
        new = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
        cv.Convert(incoming, new)

        n += 1
        print "Read in image [%04d] [%s]" % (n, f)
        img = imageBlend(img, new, 1.0 / n)

        del (new)

    out = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16U, 3)
    cv.ConvertScale(img, out, 256.)
    cv.SaveImage("out-16-up.png", out)
    print "Written out-16-up.png"
Esempio n. 3
0
def get_normalized_rgb_planes(r, g, b):
    size = cv.GetSize(r)
    #    r,g,b = get_three_planes(img)

    nr_plane = cv.CreateImage(size, 8, 1)
    ng_plane = cv.CreateImage(size, 8, 1)
    nb_plane = cv.CreateImage(size, 8, 1)

    r32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    g32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    b32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    sum = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Zero(sum)
    cv.Convert(r, r32)
    cv.Convert(g, g32)
    cv.Convert(b, b32)

    cv.Add(r32, g32, sum)
    cv.Add(b32, sum, sum)

    tmp = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Div(r32, sum, tmp)
    cv.ConvertScale(tmp, nr_plane, scale=255)
    cv.Div(g32, sum, tmp)
    cv.ConvertScale(tmp, ng_plane, scale=255)
    cv.Div(b32, sum, tmp)
    cv.ConvertScale(tmp, nb_plane, scale=255)

    #    res = image_empty_clone(img)
    #    cv.Merge(nr_plane,ng_plane,nb_plane,None,res)
    return nr_plane, ng_plane, nb_plane
Esempio n. 4
0
def EarthMovers_dist(s1, s2):
    x = np.array(s1)
    y = np.array(s2)
    #print("x = ", x ," y = " , y)
    a = np.zeros((len(x), 2))
    b = np.zeros((len(y), 2))
    for i in range(0, len(a)):
        a[i][0] = x[i]
        a[i][1] = i + 1.0

    for i in range(0, len(b)):
        b[i][0] = y[i]
        b[i][1] = i + 1.0
    #print("a = ", a ," b = " , b)
    # Convert from numpy array to CV_32FC1 Mat
    a64 = cv.fromarray(a)
    a32 = cv.CreateMat(a64.rows, a64.cols, cv.CV_32FC1)
    cv.Convert(a64, a32)

    b64 = cv.fromarray(b)
    b32 = cv.CreateMat(b64.rows, b64.cols, cv.CV_32FC1)
    cv.Convert(b64, b32)

    # Calculate Earth Mover's
    dis = cv.CalcEMD2(
        a32, b32, cv.CV_DIST_L1
    )  #CV_DIST_L2 -- Euclidean Distance, CV_DIST_L1 --- Manhattan Distance
    return dis
Esempio n. 5
0
def edge_threshold(image, roi=None, debug=0):
    thresholded = cv.CloneImage(image)
    horizontal = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
    magnitude32f = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
    vertical = cv.CloneImage(horizontal)
    v_edge = cv.CloneImage(image)
    magnitude = cv.CloneImage(horizontal)

    storage = cv.CreateMemStorage(0)
    mag = cv.CloneImage(image)
    cv.Sobel(image, horizontal, 0, 1, 1)
    cv.Sobel(image, vertical, 1, 0, 1)
    cv.Pow(horizontal, horizontal, 2)
    cv.Pow(vertical, vertical, 2)

    cv.Add(vertical, horizontal, magnitude)
    cv.Convert(magnitude, magnitude32f)
    cv.Pow(magnitude32f, magnitude32f, 0.5)
    cv.Convert(magnitude32f, mag)
    if roi:
        cv.And(mag, roi, mag)
    cv.Normalize(mag, mag, 0, 255, cv.CV_MINMAX, None)
    cv.Threshold(mag, mag, 122, 255, cv.CV_THRESH_BINARY)
    draw_image = cv.CloneImage(image)
    and_image = cv.CloneImage(image)
    results = []

    threshold_start = 17
    for window_size in range(threshold_start, threshold_start + 1, 1):
        r = 20
        for threshold in range(0, r):
            cv.AdaptiveThreshold(image, thresholded, 255, \
                cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, window_size, threshold)
            contour_image = cv.CloneImage(thresholded)
            contours = cv.FindContours(contour_image, storage, cv.CV_RETR_LIST)
            cv.Zero(draw_image)
            cv.DrawContours(draw_image, contours, (255, 255, 255),
                            (255, 255, 255), 1, 1)
            if roi:
                cv.And(draw_image, roi, draw_image)
            cv.And(draw_image, mag, and_image)
            m1 = np.asarray(cv.GetMat(draw_image))
            m2 = np.asarray(cv.GetMat(mag))
            total = mag.width * mag.height  #cv.Sum(draw_image)[0]

            coverage = cv.Sum(and_image)[0] / (mag.width * mag.height)
            if debug:
                print threshold, coverage
                cv.ShowImage("main", draw_image)
                cv.ShowImage("main2", thresholded)
                cv.WaitKey(0)
            results.append((coverage, threshold, window_size))

    results.sort(lambda x, y: cmp(y, x))
    _, threshold, window_size = results[0]
    cv.AdaptiveThreshold(image, thresholded, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, \
        cv.CV_THRESH_BINARY, window_size, threshold)

    return thresholded
Esempio n. 6
0
def histEMD(hist1, hist2, hist1weights, hist2weights):
    a64 = cv.fromarray(np.hstack((hist1weights, hist1)).copy())
    a32 = cv.CreateMat(a64.rows, a64.cols, cv.CV_32FC1)
    cv.Convert(a64, a32)

    b64 = cv.fromarray(np.hstack((hist2weights, hist2)).copy())
    b32 = cv.CreateMat(b64.rows, b64.cols, cv.CV_32FC1)
    cv.Convert(b64, b32)

    return cv.CalcEMD2(a32, b32, cv.CV_DIST_L2)
Esempio n. 7
0
def edgedetect(I_np, LOW_T=75, RATIO=3):
    I_cv = cv.fromarray(I_np)
    I_cv8U = cv.CreateMat(I_cv.rows, I_cv.cols, cv.CV_8U)
    cv.Convert(I_cv, I_cv8U)
    edges = cv.CreateMat(I_cv8U.rows, I_cv8U.cols, cv.CV_8U)

    cv.Canny(I_cv8U, edges, LOW_T, LOW_T * RATIO)
    edges_32f = cv.CreateMat(edges.rows, edges.cols, cv.CV_32F)
    cv.Convert(edges, edges_32f)
    edges_np = np.array(edges_32f)
    return edges_np
Esempio n. 8
0
def sobel(im, xorder=1, yorder=0, aperture_size=3, sigma=None):
    '''
    void cv.Sobel(src, dst, xorder, yorder, apertureSize = 3) 
    @param im: Input image
    @param xorder: The order of the x derivative (see cv.Sobel openCV docs) 
    @param yorder: The order of the y derivative (see cv.Sobel openCV docs)
    @param aperture_size: How large a convolution window to use
    @param sigma: Optional smoothing parameter to be applied prior to detecting edges
    '''
    gray = im.asOpenCVBW()
    edges = cv.CreateImage(cv.GetSize(gray), 8, 1)

    if sigma != None:
        cv.Smooth(gray, gray, cv.CV_GAUSSIAN,
                  int(sigma) * 4 + 1,
                  int(sigma) * 4 + 1, sigma, sigma)

    #sobel requires a destination image with larger bit depth...
    #...so we have to convert it back to 8 bit for the pv Image...
    dst32f = cv.CreateImage(cv.GetSize(gray), cv.IPL_DEPTH_32F, 1)

    cv.Sobel(gray, dst32f, xorder, yorder, aperture_size)
    cv.Convert(dst32f, edges)
    edges = pv.Image(edges)

    return edges
Esempio n. 9
0
	def loadVectTransFromFile(self,filename):
		#print mnemosyne_dir+filename
		self.VectTrans = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="Translation")
		if self.VectTrans.type==5: #Everything should be in CV_64FC1
			tmpVectTrans = self.VectTrans
			self.VectTrans = cv.CreateMat(3,1,cv.CV_64FC1)
			cv.Convert(tmpVectTrans, self.VectTrans)
Esempio n. 10
0
def ConvertForEmd(histogram):
    array = [(histogram[i][0], i) for i in xrange(HISTOGRAM_BIN_N)
             if histogram[i] > 0]
    f64 = cv.fromarray(numpy.array(array))
    f32 = cv.CreateMat(f64.rows, f64.cols, cv.CV_32FC1)
    cv.Convert(f64, f32)
    return f32
Esempio n. 11
0
def extractContour(segmented_image, orig_img):
    contour_img = []
    contour_img = zeros(
        (segmented_image.shape[0], segmented_image.shape[1], 1),
        uint8)  # Create a new image container for contour image
    image_with_border = []
    image_with_border = cv.CreateMat(
        orig_img.height, orig_img.width,
        cv.CV_8UC3)  # Create a new image container for final image
    cv.Convert(orig_img,
               image_with_border)  # Copy original image to this container
    for loopVar1 in range(0, segmented_image.shape[0]):  # Loop for all pixels
        for loopVar2 in range(0, segmented_image.shape[1]):
            if any(segmented_image[loopVar1]
                   [loopVar2]) == True:  # For each non-zero pixel value
                try:
                    # Check if any 4-connected pixel is zero, if yes, then make it 255 (border pixel)
                    if (any(segmented_image[loopVar1 - 1][loopVar2]) == False
                            or any(segmented_image[loopVar1 + 1][loopVar2])
                            == False
                            or any(segmented_image[loopVar1][loopVar2 - 1])
                            == False
                            or any(segmented_image[loopVar1][loopVar2 + 1])
                            == False):
                        contour_img[loopVar1][loopVar2] = 255
                    else:  # If its not a border pixel, make it zero.
                        contour_img[loopVar1][loopVar2] = 0
                except IndexError:  # If its a border pixel, then all 4 neighbors won't exist, then make it zero
                    contour_img[loopVar1][loopVar2] = 0
                image_with_border[loopVar1, loopVar2] = [255, 255, 255]
            else:  # If its not a border pixel, make it zero in both contour and final image
                contour_img[loopVar1][loopVar2] = 0
                image_with_border[loopVar1, loopVar2] = [0, 0, 0]
        print loopVar1
    return contour_img, image_with_border  # Return contour and final images
Esempio n. 12
0
def edge_magnitude(image):
    magnitude32f = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
    horizontal = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
    vertical = cv.CloneImage(horizontal)
    magnitude = cv.CloneImage(horizontal)

    mag = cv.CloneImage(image)
    cv.Sobel(image, horizontal, 0, 1, 1)
    cv.Sobel(image, vertical, 1, 0, 1)
    cv.Pow(horizontal, horizontal, 2)
    cv.Pow(vertical, vertical, 2)

    cv.Add(vertical, horizontal, magnitude)
    cv.Convert(magnitude, magnitude32f)
    cv.Pow(magnitude32f, magnitude32f, 0.5)
    cv.Convert(magnitude32f, mag)
    return mag
Esempio n. 13
0
	def loadVectRotFromFile(self,filename):
		#print mnemosyne_dir+filename
		print (mnemosyne_dir+filename)
		self.VectRot = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="Rotation")
		if self.VectRot.type==5: #Everything should be in CV_64FC1
			tmpVectRot = self.VectRot
			self.VectRot = cv.CreateMat(3,1,cv.CV_64FC1)
			cv.Convert(tmpVectRot, self.VectRot)
Esempio n. 14
0
 def get_mask(self):
     width = self.reference.width
     height = self.reference.height
     mask = zeros([height, width], dtype=int32)
     cvmask = cv.CreateMat(height, width, cv.CV_8UC1)
     mask[self.ypos:self.ypos + self.size,
          self.xpos:self.xpos + self.size] = 255
     cv.Convert(mask, cvmask)
     return cvmask
Esempio n. 15
0
 def vers_iplimage(self, profondeur=cv.IPL_DEPTH_8U):
     "Renvoie l'iplimage équivalente (format OpenCV)"
     temp = cv.CreateImage((self.largeur, self.hauteur),
                           cv.IPL_DEPTH_64F, 1)
     for (i, ligne) in enumerate(self.tab):
         for (j, valeur) in enumerate(ligne):
             cv.Set2D(temp, i, j, valeur)
     img = cv.CreateImage((self.largeur, self.hauteur), profondeur, 1)
     cv.Convert(temp, img)
     return img
Esempio n. 16
0
def main():
    '''Testing Main method'''
    import sys, cv

    if len(sys.argv) == 1:
        print 'No image.'
        sys.exit(1)

    im_file = sys.argv[1]
    image = cv.LoadImageM(im_file, cv.CV_LOAD_IMAGE_GRAYSCALE)

    size = 3
    ori = (pi * 1) / 4
    ste = 0.75

    kern = cv.fromarray(SteerableFilter.get_filter(ori, size, False, ste))
    kern_t = cv.fromarray(SteerableFilter.get_filter(ori, size, True, ste))

    kernel = cv.CreateMat(size, size, cv.CV_32F)
    kernel_t = cv.CreateMat(size, size, cv.CV_32F)

    cv.Convert(kern, kernel)
    cv.Convert(kern_t, kernel_t)

    dst = cv.CreateMat(image.rows, image.cols, image.type)
    dst_t = cv.CreateMat(image.rows, image.cols, image.type)
    cv.Filter2D(image, dst, kernel)
    cv.Filter2D(image, dst_t, kernel_t)

    cv.ShowImage('Source', image)
    cv.ShowImage('Result', dst)
    cv.ShowImage('Result (Transpose)', dst_t)

    cv.WaitKey(0)
    cv.WaitKey(0)
    cv.WaitKey(0)
Esempio n. 17
0
def FFT(image, flag=0):
    w = image.width
    h = image.height
    iTmp = cv.CreateImage((w, h), cv.IPL_DEPTH_32F, 1)
    cv.Convert(image, iTmp)
    iMat = cv.CreateMat(h, w, cv.CV_32FC2)
    mFFT = cv.CreateMat(h, w, cv.CV_32FC2)
    for i in range(h):
        for j in range(w):
            if flag == 0:
                num = -1 if (i + j) % 2 == 1 else 1
            else:
                num = 1
            iMat[i, j] = (iTmp[i, j] * num, 0)
    cv.DFT(iMat, mFFT, cv.CV_DXT_FORWARD)
    return mFFT
Esempio n. 18
0
    def test_ConvertIPLImage32FToPvImage(self):
        im = pv.Image(pv.LENA)
        im = im.resize((512, 400))
        cv_im = im.asOpenCV()
        mat = im.asMatrix3D()
        cv_32 = cv.CreateImage(cv.GetSize(cv_im), cv.IPL_DEPTH_32F, 3)
        cv.Convert(cv_im, cv_32)

        for x in range(50):
            for y in range(50):
                for c in range(3):
                    self.assertAlmostEqual(cv_im[y, x][2 - c], mat[c, x, y])
                    self.assertAlmostEqual(cv_im[y, x][2 - c], cv_32[y,
                                                                     x][2 - c])

        _ = pv.Image(cv_32)
    def __produce_gradient_image(self, i, scale):
        size = cv.GetSize(i)
        grey_image = cv.CreateImage(size, 8, 1)

        size = [s / scale for s in size]
        grey_image_small = cv.CreateImage(size, 8, 1)

        cv.CvtColor(i, grey_image, cv.CV_RGB2GRAY)

        df_dx = cv.CreateImage(cv.GetSize(i), cv.IPL_DEPTH_16S, 1)
        cv.Sobel(grey_image, df_dx, 1, 1)
        cv.Convert(df_dx, grey_image)
        cv.Resize(grey_image,
                  grey_image_small)  #, interpolation=cv.CV_INTER_NN)
        cv.Resize(grey_image_small,
                  grey_image)  #, interpolation=cv.CV_INTER_NN)
        return grey_image
Esempio n. 20
0
    def get_average(self, imagesArray):
        bitDepth = imagesArray[0].depth
        if bitDepth > 8:
            print 'ERROR: Image bit depth too large. Adjust get_average method parameters.'
            sys.exit()
        
        imageSize = cv.GetSize(imagesArray[0])
        averageImage = cv.CreateImage(imageSize, cv.IPL_DEPTH_16U, 3)
        cv.Set(averageImage,0)
        for image in imagesArray:
            imageExpansion = cv.CreateImage(imageSize, cv.IPL_DEPTH_16U, 3)
            cv.Convert(image, imageExpansion)
            cv.Add(averageImage, imageExpansion, averageImage)

        nImages = 1/float(len(imagesArray))
        imageReduction = cv.CreateImage(imageSize, cv.IPL_DEPTH_8U, 3)
        cv.CvtScale(averageImage,imageReduction,nImages)

        return imageReduction
Esempio n. 21
0
    def analyse(self, painting):
        data = super(SimpleRHOG, self).analyse(painting)
        segmented = numpy.ones((10, 10), numpy.float32)
        for i in range(1, 10):
            for j in range(1, 10):
                avg = 0
                for x in range(1, int(data.rows / 10)):
                    for y in range(1, int(data.cols / 10)):
                        (val, _, _, _) = cv.Get2D(data, x * i, y * j)
                        avg += val
                avg /= int(data.rows / 10) * int(data.cols / 10)
                segmented[i - 1][j - 1] = avg

        segs = cv.fromarray(segmented)
        grad_img = cv.CreateImage((segs.rows, segs.cols), cv.IPL_DEPTH_32F,
                                  segs.channels)
        cv.Convert(segs, grad_img)
        hist = cv.CreateHist([15], cv.CV_HIST_ARRAY, [[0, 2 * math.pi]])
        cv.CalcHist([grad_img], hist)
        return hist
Esempio n. 22
0
    def drawDepthImage(self):
        """ Draw the caputered image to the screen. This is very slooow, so ommit this function in competition """

        image = self.depthImage

        if image != 0:
            # make sure 8 bit image is displayed
            if image.depth == 16:
                newImage = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
                cv.Convert(image, newImage)
                image = newImage

            # generate pygame image
            image_rgb = cv.CreateMat(image.height, image.width, cv.CV_8UC3)
            cv.CvtColor(image, image_rgb, cv.CV_GRAY2RGB)
            pygameImage = pygame.image.frombuffer(image_rgb.tostring(),
                                                  cv.GetSize(image_rgb), "RGB")

            # draw the image
            self.screen.blit(pygameImage, (0, 0))
Esempio n. 23
0
def avgnimages(il,n,ngood=2):
    global maxlen
    global off
    global scale
    global correction

    if(n>maxlen):
        n = maxlen
        print "Error: avgnimages: n>maxlen"

    for i in range(n-ngood): #capture n more images
        il.appendleft(cv.CreateImage((list(il)[0].width,list(il)[0].height),list(il)[0].depth,list(il)[0].nChannels))
        FormatImage(cv.QueryFrame(capture),list(il)[0],off,scale,correction)

    ideep = cv.CreateImage((list(il)[0].width,list(il)[0].height),cv.IPL_DEPTH_16U,list(il)[0].nChannels)
    cv.SetZero(ideep)
    itmp = cv.CreateImage((list(il)[0].width,list(il)[0].height),cv.IPL_DEPTH_16U,list(il)[0].nChannels)
    for i in range(n):
        cv.Convert(list(il)[i],itmp)
        cv.Add(itmp,ideep,ideep)

    cv.ConvertScale(ideep,list(il)[0],1/float(n))
    def getAverageValues(self, images):
        """ get the average values over all the images
            adds them all together and then divides them with the numb. images
        """
        if len(images) == 0:
            return None
        if len(images) == 1:
            return images[0]

        imageSize = (images[0].width, images[0].height)

        sumImage = cv.CreateImage(imageSize, cv.IPL_DEPTH_32S, 1)

        cv.Set(sumImage, 0)
        for image in images:
            tempImage = cv.CreateImage(imageSize, cv.IPL_DEPTH_32S, 1)
            cv.Convert(image, tempImage)
            cv.Add(sumImage, tempImage, sumImage)

        nImages = 1 / float(len(images))
        meanImage = cv.CreateImage(imageSize, cv.IPL_DEPTH_8U, 1)
        cv.CvtScale(sumImage, meanImage, nImages)

        return meanImage
Esempio n. 25
0
def detect(image, debug=False, display=None):
    work_image = cv.CreateImage((image.width, image.height), 8, 1)
    cv.CvtColor(image, work_image, cv.CV_BGR2GRAY)
    image = work_image
    edge = cv.CloneImage(image)
    thresholded = cv.CloneImage(image)
    v_edges = cv.CloneImage(image)
    h_edges = cv.CloneImage(image)
    vertical = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
    cv.Sobel(image, vertical, 1, 0, 1)
    cv.Abs(vertical, vertical)
    cv.Convert(vertical, v_edges)
    storage = cv.CreateMemStorage(0)
    result = np.asarray(cv.GetMat(v_edges), dtype=np.float)
    threshold = 6
    rects = []
    while len(rects) < 1 and threshold > 0:
        rects = []
        cv.Convert(vertical, v_edges)
        cv.AdaptiveThreshold(v_edges, v_edges, 255,
                             cv.CV_ADAPTIVE_THRESH_MEAN_C,
                             cv.CV_THRESH_BINARY_INV, 17, threshold)
        threshold -= 1
        storage = cv.CreateMemStorage(0)
        contour_image = cv.CloneImage(v_edges)
        contours = cv.FindContours(contour_image, storage, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_NONE, (0, 0))
        ext.filter_contours(contours, 30, ext.LESSTHAN)
        max_size = int(image.width * image.height * 0.1)
        # ext.filter_contours(contours, 200**2, ext.GREATERTHAN)
        ext.filter_contours(contours, max_size, ext.GREATERTHAN)

        if display:
            cv.Merge(v_edges, v_edges, v_edges, None, display)
        seeds = []
        if contours:
            seq = contours
            rects = []
            while seq:
                c = ext.as_contour(ext.wrapped(seq))
                r = (c.rect.x, c.rect.y, c.rect.width, c.rect.height)
                rects.append(r)
                if display:
                    cv.Rectangle(
                        display, (c.rect.x, c.rect.y),
                        (c.rect.x + c.rect.width, c.rect.y + c.rect.height),
                        (0, 0, 255), 1)
                seq = seq.h_next()
    rects.sort(lambda x, y: cmp(x[0] + x[2] / 2, y[0] + y[2] / 2))
    seeds = rects[:]
    seeds.sort(lambda x, y: cmp(y[2] * y[3], x[2] * x[3]))
    groups = []
    skip = False
    for seed in seeds:
        if seed not in rects:
            break
        found = False
        for group in groups:
            if seed in group:
                found = True
        if found:
            continue
        r = seed

        start = seed
        start_index = rects.index(seed)
        groups.append([seed])
        i = start_index - 1
        # delta = max(150, seed[2]/2)
        delta = seed[2] * 0.66
        if debug:
            print "left", seed, delta
            col = (randint(0, 255), randint(0, 255), randint(0, 255))
            cv.Rectangle(display, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]),
                         (255, 255, 255), 3)
            cv.Rectangle(display, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]),
                         col, -1)
            cv.ShowImage("main", display)
            if not skip:
                c = cv.WaitKey(0)
            if c == ord("a"):
                skip = True
        # scan left
        while 1:
            if i < 0:
                break
            rect = rects[i]
            if rect[0] + rect[2] < seed[0] - delta:
                if debug:
                    print "esc1", rect
                break
            if in_vertical(seed, start, rect):
                seed = rect
                groups[-1].append(rect)
                r = rect
                if debug:
                    print rect
                    cv.Rectangle(display, (r[0], r[1]),
                                 (r[0] + r[2], r[1] + r[3]), col, -1)
                    cv.ShowImage("main", display)
                    if not skip:
                        c = cv.WaitKey(0)
                    if c == ord("a"):
                        skip = True
            else:
                if debug:
                    print "rej1", rect
            i -= 1
        # scan right
        seed = start
        start_index = rects.index(seed)
        i = start_index + 1
        if debug:
            print
            print "right", seed
        while 1:
            if i >= len(rects):
                break
            rect = rects[i]
            if rect[0] > seed[0] + seed[2] + delta:
                if debug:
                    print "esc2", rect, rect[0] + rect[2] / 2, seed[
                        0] + seed[2] / 2 + delta
                break
            if in_vertical(seed, start, rect):
                seed = rect
                groups[-1].append(rect)
                r = rect
                if debug:
                    print rect
                    cv.Rectangle(display, (r[0], r[1]),
                                 (r[0] + r[2], r[1] + r[3]), col, -1)
                    cv.ShowImage("main", display)
                    if not skip:
                        c = cv.WaitKey(0)
                    if c == ord("a"):
                        skip = True
            else:
                if debug:
                    print "rej2", rect
            i += 1
        if debug:
            print

    # find min and max extent of group
    group_rects = []
    for group in groups:
        min_x, min_y = 1E6, 1E6
        max_x, max_y = -1, -1
        dev = []
        col = (randint(0, 255), randint(0, 255), randint(0, 255))
        for rect in group:
            r = rect
            if display:
                if r == group[0]:
                    cv.Rectangle(display, (r[0], r[1]),
                                 (r[0] + r[2], r[1] + r[3]), (255, 255, 255),
                                 3)
                cv.Rectangle(display, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]),
                             col, -1)
            min_x = min(min_x, r[0])
            min_y = min(min_y, r[1])
            max_x = max(max_x, r[0] + r[2])
            max_y = max(max_y, r[1] + r[3])
        if display:
            cv.Rectangle(display, (min_x, min_y), (max_x, max_y), (0, 255, 0),
                         1)
        width = max_x - min_x
        height = max_y - min_y
        rect = (min_x, min_y, width, height)
        group_rects.append(rect)
    return group_rects
Esempio n. 26
0
def segment_rect(image,
                 rect,
                 debug=False,
                 display=None,
                 target_size=None,
                 group_range=(3, 25)):
    global next
    skip = False
    best_chars = []
    best_threshold = None
    thresholded = cv.CloneImage(image)
    contour_image = cv.CloneImage(image)
    edges = cv.CloneImage(image)

    min_x, min_y, width, height = rect
    # cv.SetImageROI(thresholded, rect)
    cv.SetImageROI(contour_image, rect)
    cv.SetImageROI(image, rect)
    cv.SetImageROI(edges, rect)

    horizontal = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
    magnitude32f = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
    vertical = cv.CloneImage(horizontal)
    magnitude = cv.CloneImage(horizontal)
    cv.Sobel(image, horizontal, 0, 1, 3)
    cv.Sobel(image, vertical, 1, 0, 3)
    cv.Pow(horizontal, horizontal, 2)
    cv.Pow(vertical, vertical, 2)
    cv.Add(vertical, horizontal, magnitude)
    cv.Convert(magnitude, magnitude32f)
    cv.Pow(magnitude32f, magnitude32f, 0.5)
    cv.Convert(magnitude32f, edges)

    original_rect = rect
    if display:
        cv.SetImageROI(display, rect)
    for threshold in range(1, 20, 1):
        cv.SetImageROI(thresholded, original_rect)
        #for i in range(30, 60, 1):
        if display:
            cv.Merge(image, image, image, None, display)
        cv.Copy(image, thresholded)
        #cv.Threshold(thresholded, thresholded, i, 255, cv.CV_THRESH_BINARY_INV)
        cv.AdaptiveThreshold(thresholded, thresholded, 255,
                             cv.CV_ADAPTIVE_THRESH_MEAN_C,
                             cv.CV_THRESH_BINARY_INV, 17, threshold)
        #cv.AdaptiveThreshold(thresholded, thresholded, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 5, i)
        # skip rects greater than 50% thresholded
        summed = cv.Norm(thresholded, None, cv.CV_L1,
                         None) / 255 / thresholded.width / thresholded.height
        if summed > 0.5:
            continue
        if debug:
            cv.ShowImage("edge", thresholded)
        storage = cv.CreateMemStorage(0)
        cv.Copy(thresholded, contour_image)
        contours = cv.FindContours(contour_image, storage, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
        ext.filter_contours(contours, 20, ext.LESSTHAN)
        groups = []
        rects = []
        edge_counts = []
        overlappings = {}
        if contours:
            seq = contours
            while seq:
                c = ext.as_contour(ext.wrapped(seq))
                r = (c.rect.x, c.rect.y, c.rect.width, c.rect.height)
                rects.append(r)
                seq = seq.h_next()
            similarity = 0.45  #0.3
            rects.sort(lambda x, y: cmp(y[2] * y[3], x[2] * x[3]))
            for rect in rects:
                if debug:
                    print
                    print "R", rect, len(groups)
                cv.SetImageROI(edges,
                               (original_rect[0] + rect[0],
                                original_rect[1] + rect[1], rect[2], rect[3]))
                edge_count = cv.Sum(edges)[0] / 255 / (rect[2] * rect[3])
                edge_counts.append(edge_count)
                #                cv.ShowImage("edges", edges)
                #                cv.WaitKey(0)
                if debug and target_size:
                    print "X", target_size, rect
                    print(target_size[0] - rect[2]) / target_size[0]
                    print(target_size[1] - rect[3]) / target_size[1]
                if rect[2] > rect[3] or float(rect[3])/rect[2] < 3./3 or edge_count < 0.1\
                or (rect[2] == image.width and rect[3] == image.height) \
                or (target_size and not 0 < (target_size[0] - rect[2]) / target_size[0] < 0.3 \
                and not 0 < (target_size[1] - rect[3]) / target_size[1] < 0.05):
                    if debug:
                        print "rej", rect[2], ">", rect[3], "edge=", edge_count
                        cv.Rectangle(display, (rect[0], rect[1]),
                                     (rect[0] + rect[2], rect[1] + rect[3]),
                                     (0, 0, 255), 1)
                        cv.ShowImage("main", display)
                        if not skip and not next:
                            c = cv.WaitKey(0)
                            if c == ord("a"):
                                skip = True
                            if c == ord("z"):
                                next = True
                    continue
                added = False
                for group_id, group in enumerate(groups):
                    avg_width, avg_height, avg_y = 0, 0, 0
                    overlap = None
                    c = 0
                    for r in group:
                        avg_y += r[1] + r[3] / 2.0
                        avg_width += r[2]
                        avg_height += r[3]
                        irect = intersect(r, rect)
                        if irect[2] * irect[3] > 0.2 * r[2] * r[3]:
                            overlappings.setdefault(group_id,
                                                    []).append([r, rect])
                    avg_y /= float(len(group))
                    avg_width /= float(len(group))
                    avg_height /= float(len(group))
                    if debug:
                        print group
                    if (abs(avg_width - rect[2]) / avg_width < similarity or \
                     (rect[2] < avg_width)) and \
                    abs(avg_height - rect[3])/ avg_height < similarity and \
                    abs(avg_y - (rect[1] + rect[3]/2.0)) / avg_y < similarity:
                        group.append(rect)
                        added = True
                    else:
                        pass
                if not added:
                    # first char in group
                    groups.append([rect])
                if debug:
                    print "now:"
                    for g in groups:
                        print g
                    cv.Rectangle(display, (rect[0], rect[1]),
                                 (rect[0] + rect[2], rect[1] + rect[3]),
                                 (255, 0, 0), 1)
                    cv.ShowImage("main", display)
                    if not skip and not next:
                        c = cv.WaitKey(0)
                        if c == ord("a"):
                            skip = True
                        if c == ord("z"):
                            next = True
        if groups:
            #handle overlapping regions, default to average width match
            for group_id, over in overlappings.items():
                group = groups[group_id]
                avg_width = 0
                avg_height = 0
                for r in group:
                    avg_width += r[2]
                    avg_height += r[3]
                avg_width /= float(len(group))
                avg_height /= float(len(group))
                for r1, r2 in over:
                    if r2 not in group or r1 not in group:
                        continue
                    if debug:
                        print "over", r1, r2, r1[2] * r1[3], r2[2] * r2[
                            3], avg_width
                    d1 = abs(r1[2] - avg_width) + abs(r1[3] - avg_height)
                    d2 = abs(r2[2] - avg_width) + abs(r2[3] - avg_height)
                    if d1 < d2:
                        group.remove(r2)
                    else:
                        group.remove(r1)

            #group = max(groups, key=len)
            # from longest groups, find largest area
            groups.sort(key=len)
            groups.reverse()
            max_area = 0
            mad_index = -1
            for i, g in enumerate(groups[:5]):
                area = 0
                for r in g:
                    area += r[2] * r[3]
                if area > max_area:
                    max_area = area
                    max_index = i
            group = groups[max_index]
            # vertical splitting
            avg_width, avg_height, avg_y = 0, 0, 0
            if debug:
                print "G", group
            for r in group:
                avg_y += r[1] + r[3] / 2.0
                avg_width += r[2]
                avg_height += r[3]
            avg_y /= float(len(group))
            avg_width /= float(len(group))
            avg_height /= float(len(group))
            band_rects = []
            bound = bounding_rect(group)
            for i, rect in enumerate(rects):
                if edge_counts[i] < 0.1:
                    continue
                if (abs(avg_width - rect[2]) / avg_width < similarity or \
                 (rect[2] < avg_width)) and \
                 (abs(avg_height - rect[3]) / avg_height < similarity or  \
                 (rect[3] < avg_height)) and \
                abs(avg_y - (rect[1] + rect[3]/2.0)) < avg_height/2:
                    band_rects.append(rect)

            band_rects.sort(lambda x, y: cmp(y[2] * y[3], x[2] * x[3]))

            for i, rect_a in enumerate(band_rects[:-1]):
                if rect_a[2] * rect_a[3] < 0.2 * avg_width * avg_height:
                    continue
                merge_rects = []
                for rect_b in band_rects[i + 1:]:
                    w = avg_width
                    m1 = rect_a[0] + rect_a[2] / 2
                    m2 = rect_b[0] + rect_b[2] / 2
                    if abs(m1 - m2) < w:
                        merge_rects.append(rect_b)
                if debug:
                    print "M", merge_rects
                if merge_rects:
                    merge_rects.append(rect_a)
                    rect = bounding_rect(merge_rects)
                    area = 0
                    for r in merge_rects:
                        area += r[2] * r[3]
                    if (abs(avg_width - rect[2]) / avg_width < similarity or \
                    (rect[2] < avg_width)) and \
                    abs(avg_height - rect[3])/ avg_height < similarity and \
                    area > 0.5*(avg_width*avg_height) and \
                    abs(avg_y - (rect[1] + rect[3]/2.0)) / avg_y < similarity:
                        for r in merge_rects:
                            if r in group:
                                group.remove(r)
                        # merge into group
                        new_group = []
                        merged = False
                        for gr in group:
                            area2 = max(gr[2] * gr[3], rect[2] * rect[3])
                            isect = intersect(gr, rect)
                            if isect[2] * isect[3] > 0.4 * area2:
                                x = min(gr[0], rect[0])
                                y = min(gr[1], rect[1])
                                x2 = max(gr[0] + gr[2], rect[0] + rect[2])
                                y2 = max(gr[1] + gr[3], rect[1] + rect[3])
                                new_rect = (x, y, x2 - x, y2 - y)
                                new_group.append(new_rect)
                                merged = True
                            else:
                                new_group.append(gr)
                        if not merged:
                            new_group.append(rect)
                        group = new_group
                        cv.Rectangle(display, (rect[0], rect[1]),
                                     (rect[0] + rect[2], rect[1] + rect[3]),
                                     (255, 0, 255), 2)
            # avoid splitting
            split = False
            # select higher threshold if innovates significantly
            best_width = 0.0
            if best_chars:
                best_area = 0.0
                for rect in best_chars:
                    best_area += rect[2] * rect[3]
                    best_width += rect[2]
                best_width /= len(best_chars)
                area = 0.0
                overlapped = 0.0
                avg_width = 0.0
                avg_height = 0.0
                for rect in group:
                    area += rect[2] * rect[3]
                    avg_width += rect[2]
                    avg_height += rect[3]
                    for char in best_chars:
                        section = intersect(rect, char)
                        if section[2] * section[3] > 0:
                            overlapped += section[2] * section[3]
                avg_width /= len(group)
                avg_height /= len(group)
                quotient = overlapped / area
                quotient2 = (area - overlapped) / best_area
                if debug:
                    print area, overlapped, best_area
                    print group
                    print "QUO", quotient
                    print "QUO2", quotient2
            else:
                quotient = 0
                quotient2 = 1
                best_area = 0

            group.sort(lambda x, y: cmp(x[0] + x[2] / 2, y[0] + y[2] / 2))
            best_chars.sort(lambda x, y: cmp(x[0] + x[2] / 2, y[0] + y[2] / 2))
            if group_range[0] <= len(group) <= group_range[1] and avg_width > 5 and avg_height > 10 and \
            ((quotient2 > 0.05 and (best_area == 0 or abs(area - best_area)/best_area < 0.4))
            or (quotient2 > 0.3 and area > best_area)):
                if debug:
                    print "ASSIGNED", group
                best_chars = group
                best_threshold = threshold  #get_patch(thresholded, original_rect)
            else:
                if debug:
                    print "not", quotient2, len(
                        group), avg_width, avg_height, area, best_area

        # best_chars = groups
        if debug:
            for rect in best_chars:
                cv.Rectangle(display, (rect[0], rect[1]),
                             (rect[0] + rect[2], rect[1] + rect[3]),
                             (0, 255, 0), 1)
            cv.ShowImage("main", display)
            if not skip and not next:
                c = cv.WaitKey(0)
                if c == ord("a"):
                    skip = True
                if c == ord("z"):
                    next = True
    best_chars.sort(lambda x, y: cmp(x[0], y[0]))
    cv.ResetImageROI(thresholded)
    cv.ResetImageROI(contour_image)
    cv.ResetImageROI(image)
    cv.ResetImageROI(edges)
    if display:
        cv.ResetImageROI(display)
    return best_chars, best_threshold
Esempio n. 27
0
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 16:41:21 2012

@author: IntelligentSystems
"""

import cv
import os
import glob

count = 0
average = None
path = 'Clips/'

for infile in glob.glob(os.path.join(path, '*.bmp')):
    image = cv.LoadImage(infile, False)
    if average is None:
        average = cv.CreateImage(cv.GetSize(image), 32, 1)
        image_aux = cv.CloneImage(average)
        cv.Convert(image, average)
    else:
        count = count + 1
        scale = 1 / count
        cv.Convert(image, image_aux)
        cv.Sub(image_aux, average, image_aux)
        cv.Scale(image_aux, image_aux, scale)
        cv.Add(average, image_aux, average)

cv.Convert(average, image)
cv.SaveImage('background.bmp', image)
Esempio n. 28
0
def old_GeneratePerceptualHash(path):

    # I think what I should be doing here is going cv2.imread( path, flags = cv2.CV_LOAD_IMAGE_GRAYSCALE )
    # then efficiently resize

    thumbnail = GeneratePILImage(path)

    # convert to 32 x 32 greyscale

    if thumbnail.mode == 'P':

        thumbnail = thumbnail.convert(
            'RGBA'
        )  # problem with some P images converting to L without RGBA step in between

    if thumbnail.mode == 'RGBA':

        # this is some code i picked up somewhere
        # another great example of PIL failing; it turns all alpha to pure black on a RGBA->RGB

        thumbnail.load()

        canvas = PILImage.new('RGB', thumbnail.size, (255, 255, 255))

        canvas.paste(thumbnail, mask=thumbnail.split()[3])

        thumbnail = canvas

    thumbnail = thumbnail.convert('L')

    thumbnail = thumbnail.resize((32, 32), PILImage.ANTIALIAS)

    # convert to mat

    numpy_thumbnail_8 = cv.CreateMatHeader(32, 32, cv.CV_8UC1)

    cv.SetData(numpy_thumbnail_8, thumbnail.tostring())

    numpy_thumbnail_32 = cv.CreateMat(32, 32, cv.CV_32FC1)

    cv.Convert(numpy_thumbnail_8, numpy_thumbnail_32)

    # compute dct

    dct = cv.CreateMat(32, 32, cv.CV_32FC1)

    cv.DCT(numpy_thumbnail_32, dct, cv.CV_DXT_FORWARD)

    # take top left 8x8 of dct

    dct = cv.GetSubRect(dct, (0, 0, 8, 8))

    # get mean of dct, excluding [0,0]

    mask = cv.CreateMat(8, 8, cv.CV_8U)

    cv.Set(mask, 1)

    mask[0, 0] = 0

    channel_averages = cv.Avg(dct, mask)

    average = channel_averages[0]

    # make a monochromatic, 64-bit hash of whether the entry is above or below the mean

    bytes = []

    for i in range(8):

        byte = 0

        for j in range(8):

            byte <<= 1  # shift byte one left

            value = dct[i, j]

            if value > average: byte |= 1

        bytes.append(byte)

    answer = str(bytearray(bytes))

    # we good

    return answer
Esempio n. 29
0
def find_loop(input_data, IterationClosing=6):
    """
        This function detect support (or loop) and return the coordinates if there is a detection,
        and -1 if not.
        in : filename : string image Filename / Format accepted :
        in : IterationClosing : int : Number of iteration for closing contour procedure
        Out : tupple of coordiante : (string, coordinate X, coordinate Y) where string take value
             'Coord' or 'No loop detected depending if loop was detected or not. If no loop was
              detected coordinate X and coordinate y take the value -1.
     """
    #Definition variable Global
    global AIRE_MIN_REL
    global AIRE_MIN
    global NORM_IMG
    global NiteClosing
    global pointRef
    #Chargement image
    try:
        if type(input_data) == str:
            #Image filename is passed
            img_ipl = cv.LoadImageM(input_data)
        elif type(input_data) == np.ndarray:
            img_ipl = cv.fromarray(input_data)
        else:
            print "ERROR : Input image could not be opened, check format or path"
            return (
                "ERROR : Input image could not be opened, check format or path",
                -10, -10)
    except:
        print "ERROR : Input image could not be opened, check format or path"
        return (
            "ERROR : Input image could not be opened, check format or path",
            -10, -10)
    img_cont = img_ipl  # img used for
    NORM_IMG = img_ipl.width * img_ipl.height
    AIRE_MIN = NORM_IMG * AIRE_MIN_REL
    #traitement
    #Converting input image in Grey scale image
    img_gray_ini = cv.CreateImage((img_ipl.width, img_ipl.height), 8, 1)
    cv.CvtColor(img_ipl, img_gray_ini, cv.CV_BGR2GRAY)
    #Removing Offset from image
    img_gray_resize = cv.CreateImage(
        (img_ipl.width - 2 * Offset[0], img_ipl.height - 2 * Offset[1]), 8, 1)
    cv.SetImageROI(img_gray_ini,
                   (Offset[0], Offset[1], img_ipl.width - 2 * Offset[0],
                    img_ipl.height - 2 * Offset[1]))
    cv.Copy(img_gray_ini, img_gray_resize)
    #    #creat image used for treatment
    img_gray = cv.CreateImage((img_gray_resize.width, img_gray_resize.height),
                              8, 1)
    img_trait = cv.CreateImage((img_gray.width, img_gray.height), 8, 1)
    # image used for treatment is the same than img_gray_resize
    cv.Copy(img_gray_resize, img_gray)
    #Img is smooth with asymetric kernel
    cv.Smooth(img_gray, img_gray, param1=11, param2=9)
    cv.Canny(img_gray, img_trait, 40, 60)
    # Laplacian treatment
    # Creating buffer image
    img_lap_ini = cv.CreateImage((img_gray.width, img_gray.height), 32, 1)
    img_lap = cv.CreateImage((img_lap_ini.width - 2 * Offset[0],
                              img_lap_ini.height - 2 * Offset[1]), 32, 1)
    # Creating buffer img
    img_lap_tmp = cv.CreateImage((img_lap.width, img_lap.height), 32, 1)
    #Computing laplacian
    cv.Laplace(img_gray, img_lap_ini, 5)
    #Applying Offset to avoid border effect
    cv.SetImageROI(img_lap_ini,
                   (Offset[0], Offset[1], img_lap_ini.width - 2 * Offset[0],
                    img_lap_ini.height - 2 * Offset[1]))
    #Copying laplacian treated image to final laplacian image
    cv.Copy(img_lap_ini, img_lap)
    #Apply an asymetrique  smoothing
    cv.Smooth(img_lap, img_lap, param1=21, param2=11)
    #Define the Kernel for closing algorythme
    MKernel = cv.CreateStructuringElementEx(7, 3, 3, 1, cv.CV_SHAPE_RECT)
    # Closing contour procedure
    cv.MorphologyEx(img_lap, img_lap, img_lap_tmp, MKernel, cv.CV_MOP_CLOSE,
                    NiteClosing)
    # Conveting img in 8bit image
    img_lap8_ini = cv.CreateImage((img_lap.width, img_lap.height), 8, 1)
    cv.Convert(img_lap, img_lap8_ini)
    # Add white border to image
    mat_bord = WhiteBorder(np.asarray(img_lap8_ini[:]), XSize, YSize)
    img_lap8 = cv.CreateImage(
        (img_lap.width + 2 * XSize, img_lap.height + 2 * YSize), 8, 1)
    img_lap8 = cv.fromarray(mat_bord)
    #Compute threshold
    seuil_tmp = Seuil_var(img_lap8)
    #If Seuil_tmp is not null
    if seuil_tmp != 0:
        seuil = seuil_tmp
    #Else seuil is fixed to 20, which prevent from wrong positiv detection
    else:
        seuil = 20
    #Compute thresholded image
    img_lap_bi = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    img_lap_color = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 3)
    img_trait_lap = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    #Compute thresholded image
    cv.Threshold(img_lap8, img_lap_bi, seuil, 255, cv.CV_THRESH_BINARY)
    #Gaussian smoothing on laplacian
    cv.Smooth(img_lap_bi, img_lap_bi, param1=11, param2=11)
    #Convert grayscale laplacian image to binarie image using "seuil" as threshold value
    cv.Threshold(img_lap_bi, img_lap_bi, 1, 255, cv.CV_THRESH_BINARY_INV)
    cv.CvtColor(img_lap_bi, img_lap_color, cv.CV_GRAY2BGR)
    #Compute edge in laplacian image
    cv.Canny(img_lap_bi, img_trait_lap, 0, 2)
    #Find contour
    seqlapbi = cv.FindContours(img_trait_lap, cv.CreateMemStorage(),
                               cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
    #contour is filtered
    try:
        contour_list = parcourt_contour(seqlapbi, img_lap_color)
    except:
        #      If error is traped then there is no loop detected
        return (0, 0, ("No loop detected", -1, -1))


#    If there contours's list is not empty
    NCont = len(contour_list)
    if (NCont > 0):
        #      The CvSeq is inversed : X(i) became i(X)
        indice = MapCont(contour_list[0], img_lap_color.width,
                         img_lap_color.height)
        #      The coordinate of target is computed in the traited image
        point_shift = integreCont(indice, contour_list[0])
        #      The coordinate in original image are computed taken into account Offset and white bordure
        point = (point_shift[0], point_shift[1] + 2 * Offset[0] - XSize,
                 point_shift[2] + 2 * Offset[1] - YSize)
    else:
        #Else no loop is detected
        point = ("No loop detected", -1, -1)
        Aire_Max = 0

    return point
Esempio n. 30
0
# Otsu threshold
image = loadGreyscale()
cv.Threshold(image, image, threshold, color, cv.CV_THRESH_OTSU)
showWindow("Otsu threshold")

# Dilation
image = loadGreyscale()
element_shape = cv.CV_SHAPE_RECT
pos = 1
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                        element_shape)
cv.Dilate(image, image, element, 2)
showWindow("Dilate")

# Erosion
image = loadGreyscale()
cv.Erode(image, image, element, 2)
showWindow("Erode")

# Morphology
image = loadGreyscale()
cv.MorphologyEx(image, image, image, element, cv.CV_MOP_CLOSE, 2)
showWindow("Morphology")

# Laplace
image = loadGreyscale()
dst_16s2 = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
cv.Laplace(image, dst_16s2)
cv.Convert(dst_16s2, image)
showWindow('Laplace')