Ejemplo n.º 1
0
    def _preprocess(self, frame):
        '''
        Formats a raw rgb image into a processed image which template matching
        is performed on.

        In this case, the laplacian of the image and template is calculated,
        and the matching is done on those preprocessed frames.  In the future,
        this function should implement multiple preprocessing methods that can
        be selected between on init.

        '''

        dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F,
                             frame.channels)
        cv.Laplace(frame, dst, 19)
        return dst
Ejemplo n.º 2
0
def camera_capture():

    # /dev/video0
    c = cv.CaptureFromCAM(0)
    #assert type(c) ==  "cv.Capture"

    # or use QueryFrame. It's the same
    cv.GrabFrame(c)
    image = cv.RetrieveFrame(c)
    #image = cv.QueryFrame(c)
    assert image != None

    dst = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 3)
    #im = cv.CloneImage(image)
    laplace = cv.Laplace(image, dst)
    cv.SaveImage("my-camera.png", dst)

    print cv.GetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_HEIGHT)
Ejemplo n.º 3
0
def plot_contours(src_image, dest_image):
    cv.NamedWindow("debug", cv.CV_WINDOW_AUTOSIZE)

    # Better to use HSV when doing extraction
    (rows, cols) = cv.GetSize(src_image)
    image_hsv = cv.CreateImage((rows, cols), cv.IPL_DEPTH_8U, 3)
    h_plane = cv.CreateImage((rows, cols), 8, 1)
    s_plane = cv.CreateImage((rows, cols), 8, 1)
    image_bin = cv.CreateImage((rows, cols), 8, 1)
    cv.CvtColor(src_image, image_hsv, cv.CV_RGB2HSV)
    cv.Laplace(h_plane, h_plane, 3)
    cv.Threshold(h_plane, h_plane, 40.0, 255.0, cv.CV_THRESH_BINARY)
    cv.Smooth(h_plane, h_plane, cv.CV_BLUR, 5,
              5)  # Its suggested that smoothing first gives better results
    cv.ShowImage("debug", h_plane)

    # Create binary image to be used for contour detection
    # image_gray = cv.CreateImage(cv.GetSize(src_image), 8, 1)
    # cv.CvtColor(image_hsv, image_gray, cv.CV_BGR2GRAY)
    # cv.Laplace(image_gray, image_gray, 3)
    # cv.Threshold(image_gray, image_gray, 40.0, 255.0, cv.CV_THRESH_BINARY)
    # cv.Smooth(image_gray, image_gray, cv.CV_BLUR, 5, 5) # Its suggested that smoothing first gives better results
    # cv.ShowImage("debug", image_gray)

    contours = cv.FindContours(image_gray, cv.CreateMemStorage(),
                               cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE)
    #cv.DrawContours(dest_image, contours, (255,0,0), (0,255,0), 1, 2)

    # Draw bounding box + mass center of shape
    while contours:
        rect = cv.MinAreaRect2(contours)
        box = cv.BoxPoints(rect)
        for i in range(4):
            cv.Line(dest_image, box[i], box[(i + 1) % 4], (0, 0, 255), 1, 8)

        mom = cv.Moments(contours)
        for j in mom:
            print j
        # momPoint = cv.CvPoint(mom.m10/mom.m00,mom.m01/mom.m00)
        # cv.Circle(dest_image, (mom.m10/mom.m00,mom.m01/mom.m00), 2, (0,255,255))
        # r0 = cv.BoundingRect(cnt)
        # Rectangle(dest_image, pt1, pt2, (0,255,0))
        contours = contours.h_next()
Ejemplo n.º 4
0
def get_length(filenames, selections_name='selections.txt', select=True):
    """Fits length
    """
    selections = ImageInspections(filenames=filenames)
    try:
        selections.analysis.from_file(selections_name)
    except:
        pass
    if select:
        if selections.configure_traits():
            selections.analysis.to_file(selections_name)

    fit_f = (FitFunction(name='elastomer.gauss1'),
             FitFunction(name='elastomer.gauss1'),
             FitFunction(name='elastomer.gauss1'))

    fitters = [DataFitter(function=fit) for fit in fit_f]

    centers = tuple(
        [rect.center.copy() for rect in selections.analysis.selections])
    vshift = 0
    hshift = 0

    fit_f[0].set_parameters(s=2, a=1, x0=centers[0][0])
    fit_f[1].set_parameters(s=2, a=-1, x0=centers[1][0])
    fit_f[2].set_parameters(s=2, a=1, x0=centers[2][1])

    length = []

    cv.NamedWindow('Image')

    for index, fname in enumerate(filenames):
        print('%d of %d' % (index, len(filenames)))
        mat = cv.LoadImageM(fname)
        #mattmp = cv.CreateMat(mat.rows,mat.cols,cv.CV_16SC3)
        #cv.Laplace(mat,mattmp,7)
        #a = numpy.asarray(mattmp)
        print('%s loaded' % fname)

        for i in range(2):
            rectangle = selections.analysis.selections[i]
            fit = fit_f[i]
            rectangle.center[1] = centers[i][1] + vshift
            fit.set_parameters(x0=rectangle.center[0])
            #ims = (rectangle.slice_image(a))[:,:,0]
            matsl = cv.GetSubRect(mat, rectangle.box)
            mattmp = cv.CreateMat(matsl.rows, matsl.cols, cv.CV_16SC3)
            cv.Laplace(matsl, mattmp, 7)
            ims = numpy.asarray(mattmp)[:, :, 0]

            find_edge(ims, rectangle, fitters[i], direction=0)

        rect0 = selections.analysis.selections[0]
        rect1 = selections.analysis.selections[1]
        hshift = ((-centers[0][0] + rect0.center[0]) +
                  (-centers[1][0] + rect1.center[0])) / 2

        for i in range(1):
            rectangle = selections.analysis.selections[2 + i]
            fit = fit_f[2 + i]
            rectangle.center[0] = centers[2][0] + hshift
            fit.set_parameters(x0=rectangle.center[1])
            #ims = (rectangle.slice_image(a))[:,:,0]
            matsl = cv.GetSubRect(mat, rectangle.box)
            mattmp = cv.CreateMat(matsl.rows, matsl.cols, cv.CV_16SC3)
            cv.Laplace(matsl, mattmp, 7)
            ims = numpy.asarray(mattmp)[:, :, 0]
            find_edge(ims, rectangle, fitters[2 + i], direction=1)

        rect2 = selections.analysis.selections[2]
        vshift = (-centers[2][1] + rect2.center[1])
        print('fits done')

        cv.Circle(mat, tuple(rect0.center), 5, (2**15, 0, 2**15))
        cv.Circle(mat, tuple(rect1.center), 5, (2**15, 0, 2**15))
        cv.Circle(mat, tuple(rect2.center), 5, (2**15, 0, 2**15))
        cv.ShowImage('Image', mat)
        c = cv.WaitKey(10)  #exit on escape key
        if c == 27:
            break
        w = rect1.center[0] - rect0.center[0]

        length.append(w)
    cv.DestroyWindow('Image')
    return length
Ejemplo n.º 5
0
def find_loop(input_data, IterationClosing=6):
    """
        This function detect support (or loop) and return the coordinates if there is a detection,
        and -1 if not.
        in : filename : string image Filename / Format accepted :
        in : IterationClosing : int : Number of iteration for closing contour procedure
        Out : tupple of coordiante : (string, coordinate X, coordinate Y) where string take value
             'Coord' or 'No loop detected depending if loop was detected or not. If no loop was
              detected coordinate X and coordinate y take the value -1.
     """
    #Definition variable Global
    global AIRE_MIN_REL
    global AIRE_MIN
    global NORM_IMG
    global NiteClosing
    global pointRef
    #Chargement image
    try:
        if type(input_data) == str:
            #Image filename is passed
            img_ipl = cv.LoadImageM(input_data)
        elif type(input_data) == np.ndarray:
            img_ipl = cv.fromarray(input_data)
        else:
            print "ERROR : Input image could not be opened, check format or path"
            return (
                "ERROR : Input image could not be opened, check format or path",
                -10, -10)
    except:
        print "ERROR : Input image could not be opened, check format or path"
        return (
            "ERROR : Input image could not be opened, check format or path",
            -10, -10)
    img_cont = img_ipl  # img used for
    NORM_IMG = img_ipl.width * img_ipl.height
    AIRE_MIN = NORM_IMG * AIRE_MIN_REL
    #traitement
    #Converting input image in Grey scale image
    img_gray_ini = cv.CreateImage((img_ipl.width, img_ipl.height), 8, 1)
    cv.CvtColor(img_ipl, img_gray_ini, cv.CV_BGR2GRAY)
    #Removing Offset from image
    img_gray_resize = cv.CreateImage(
        (img_ipl.width - 2 * Offset[0], img_ipl.height - 2 * Offset[1]), 8, 1)
    cv.SetImageROI(img_gray_ini,
                   (Offset[0], Offset[1], img_ipl.width - 2 * Offset[0],
                    img_ipl.height - 2 * Offset[1]))
    cv.Copy(img_gray_ini, img_gray_resize)
    #    #creat image used for treatment
    img_gray = cv.CreateImage((img_gray_resize.width, img_gray_resize.height),
                              8, 1)
    img_trait = cv.CreateImage((img_gray.width, img_gray.height), 8, 1)
    # image used for treatment is the same than img_gray_resize
    cv.Copy(img_gray_resize, img_gray)
    #Img is smooth with asymetric kernel
    cv.Smooth(img_gray, img_gray, param1=11, param2=9)
    cv.Canny(img_gray, img_trait, 40, 60)
    # Laplacian treatment
    # Creating buffer image
    img_lap_ini = cv.CreateImage((img_gray.width, img_gray.height), 32, 1)
    img_lap = cv.CreateImage((img_lap_ini.width - 2 * Offset[0],
                              img_lap_ini.height - 2 * Offset[1]), 32, 1)
    # Creating buffer img
    img_lap_tmp = cv.CreateImage((img_lap.width, img_lap.height), 32, 1)
    #Computing laplacian
    cv.Laplace(img_gray, img_lap_ini, 5)
    #Applying Offset to avoid border effect
    cv.SetImageROI(img_lap_ini,
                   (Offset[0], Offset[1], img_lap_ini.width - 2 * Offset[0],
                    img_lap_ini.height - 2 * Offset[1]))
    #Copying laplacian treated image to final laplacian image
    cv.Copy(img_lap_ini, img_lap)
    #Apply an asymetrique  smoothing
    cv.Smooth(img_lap, img_lap, param1=21, param2=11)
    #Define the Kernel for closing algorythme
    MKernel = cv.CreateStructuringElementEx(7, 3, 3, 1, cv.CV_SHAPE_RECT)
    # Closing contour procedure
    cv.MorphologyEx(img_lap, img_lap, img_lap_tmp, MKernel, cv.CV_MOP_CLOSE,
                    NiteClosing)
    # Conveting img in 8bit image
    img_lap8_ini = cv.CreateImage((img_lap.width, img_lap.height), 8, 1)
    cv.Convert(img_lap, img_lap8_ini)
    # Add white border to image
    mat_bord = WhiteBorder(np.asarray(img_lap8_ini[:]), XSize, YSize)
    img_lap8 = cv.CreateImage(
        (img_lap.width + 2 * XSize, img_lap.height + 2 * YSize), 8, 1)
    img_lap8 = cv.fromarray(mat_bord)
    #Compute threshold
    seuil_tmp = Seuil_var(img_lap8)
    #If Seuil_tmp is not null
    if seuil_tmp != 0:
        seuil = seuil_tmp
    #Else seuil is fixed to 20, which prevent from wrong positiv detection
    else:
        seuil = 20
    #Compute thresholded image
    img_lap_bi = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    img_lap_color = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 3)
    img_trait_lap = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    #Compute thresholded image
    cv.Threshold(img_lap8, img_lap_bi, seuil, 255, cv.CV_THRESH_BINARY)
    #Gaussian smoothing on laplacian
    cv.Smooth(img_lap_bi, img_lap_bi, param1=11, param2=11)
    #Convert grayscale laplacian image to binarie image using "seuil" as threshold value
    cv.Threshold(img_lap_bi, img_lap_bi, 1, 255, cv.CV_THRESH_BINARY_INV)
    cv.CvtColor(img_lap_bi, img_lap_color, cv.CV_GRAY2BGR)
    #Compute edge in laplacian image
    cv.Canny(img_lap_bi, img_trait_lap, 0, 2)
    #Find contour
    seqlapbi = cv.FindContours(img_trait_lap, cv.CreateMemStorage(),
                               cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
    #contour is filtered
    try:
        contour_list = parcourt_contour(seqlapbi, img_lap_color)
    except:
        #      If error is traped then there is no loop detected
        return (0, 0, ("No loop detected", -1, -1))


#    If there contours's list is not empty
    NCont = len(contour_list)
    if (NCont > 0):
        #      The CvSeq is inversed : X(i) became i(X)
        indice = MapCont(contour_list[0], img_lap_color.width,
                         img_lap_color.height)
        #      The coordinate of target is computed in the traited image
        point_shift = integreCont(indice, contour_list[0])
        #      The coordinate in original image are computed taken into account Offset and white bordure
        point = (point_shift[0], point_shift[1] + 2 * Offset[0] - XSize,
                 point_shift[2] + 2 * Offset[1] - YSize)
    else:
        #Else no loop is detected
        point = ("No loop detected", -1, -1)
        Aire_Max = 0

    return point
Ejemplo n.º 6
0
# Otsu threshold
image = loadGreyscale()
cv.Threshold(image, image, threshold, color, cv.CV_THRESH_OTSU)
showWindow("Otsu threshold")

# Dilation
image = loadGreyscale()
element_shape = cv.CV_SHAPE_RECT
pos = 1
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                        element_shape)
cv.Dilate(image, image, element, 2)
showWindow("Dilate")

# Erosion
image = loadGreyscale()
cv.Erode(image, image, element, 2)
showWindow("Erode")

# Morphology
image = loadGreyscale()
cv.MorphologyEx(image, image, image, element, cv.CV_MOP_CLOSE, 2)
showWindow("Morphology")

# Laplace
image = loadGreyscale()
dst_16s2 = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
cv.Laplace(image, dst_16s2)
cv.Convert(dst_16s2, image)
showWindow('Laplace')
Ejemplo n.º 7
0
def laplace(im):
    im = rgb2gray(im)
    new_im = new_from(im, depth=cv.IPL_DEPTH_16S)
    cv.Laplace(im, new_im)
    cv.ConvertScaleAbs(new_im, im)
    return im
Ejemplo n.º 8
0
from PIL import Image
import cv2
import numpy as np
from urllib2 import urlopen
from cStringIO import StringIO as StringIO2

# File settings
saveWidth = 1280
saveHeight = 960


# Save a full size image to disk
def saveImage(width, height):
    time = datetime.now()
    filename = "capture-%04d%02d%02d-%02d%02d%02d.jpg" % (
        time.year, time.month, time.day, time.hour, time.minute, time.second)
    subprocess.call("raspistill -w 1296 -h 972 -t 0 -e jpg -q 15 -o %s" %
                    filename,
                    shell=True)
    return filename


filename = saveImage(saveWidth, saveHeight)
src = cv.LoadImageM(filename, cv.CV_LOAD_IMAGE_COLOR)
image = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(src, image, cv.CV_RGB2GRAY)
laplace = cv.Laplace(src, image)
cv.NamedWindow('testImage', cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage('testImage', laplace)
cv.WaitKey(0)
Ejemplo n.º 9
0
 def loc(self):
     """Compute the Laplacian"""
     dst = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_16S, 3)
     laplace = cv.Laplace(im, dst)
     return (laplace, dst)
Ejemplo n.º 10
0
    def repeat(begin, unmute, last, hold, beginhold):
        """Actual finger detection function, passes mute and click status"""

        #captures input frame
        frame = cv.QueryFrame(capture)

        #creates horizontally flipped copy of input frame to work with
        cv.Copy(frame, sframe)
        cv.Flip(sframe, sframe, 1)

        #makes mask of skintones
        dog = skin(sframe, ccolor)

        #inverts skintone mask to all non-skin areas
        cv.ConvertScale(dog, dog, -1, 255)

        #makes greyscale copy of frame
        cv.CvtColor(sframe, grey, cv.CV_BGR2GRAY)

        #replaces nonskin areas with white
        cv.Add(grey, white, grey, dog)

        #implements laplacian edge detection on greyscale image
        dst_16s2 = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_16S, 1)
        cv.Laplace(grey, dst_16s2, 5)
        cv.Convert(dst_16s2, grey)

        #creates a threshold to binarize the image
        cv.Threshold(grey, grey, 75, 255, cv.CV_THRESH_BINARY)

        #creates contours on greyscale image
        storage = cv.CreateMemStorage(0)
        contours = cv.FindContours(grey, storage, cv.CV_RETR_TREE,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        #sets final display frame background to black
        cv.Set(cframe, 0)

        #sets minimum range for object detection
        mx = 20000
        #initializes hand position to previous
        best = last
        #creates some cvSeq maxcont by copying contours
        maxcont = contours

        #goes through all contours and finds bounding box
        while contours:
            bound_rect = cv.BoundingRect(list(contours))

            #if bounding box area is greater than min range or current max box
            if bound_rect[3] * bound_rect[2] > mx:

                #sets max to current object, creates position at center of box, and sets display contour to current
                mx = bound_rect[3] * bound_rect[2]
                maxcont = contours

            #goes to next contour
            contours = contours.h_next()

        #draws largest contour on final frame
        cv.DrawContours(cframe, maxcont, 255, 127, 0)

        if maxcont:
            #creates convex hull of largest contour
            chull = cv.ConvexHull2(maxcont, storage, cv.CV_CLOCKWISE, 1)
            cv.PolyLine(cframe, [chull], 1, 255)
            chulllist = list(chull)
            chull = cv.ConvexHull2(maxcont, storage, cv.CV_CLOCKWISE, 0)
            cdefects = cv.ConvexityDefects(maxcont, chull, storage)

            #filters small convexity defects and draws large ones
            truedefects = []
            for j in cdefects:
                if j[3] > 30:
                    truedefects.append(j)
                    cv.Circle(cframe, j[2], 6, 255)

            #if hand is in a pointer position, detects tip of convex hull
            if cdefects and len(truedefects) < 4:
                tipheight = 481
                tiploc = 0
                for j in chulllist:
                    if j[1] < tipheight:
                        tipheight = j[1]
                        tiploc = chulllist.index(j)
                best = chulllist[tiploc]

        #keeps last position if movement too quick, or smooths slower movement
        xdiff = best[0] - last[0]
        ydiff = best[1] - last[1]
        dist = math.sqrt(xdiff**2 + ydiff**2)
        if dist > 100:
            best = last
        else:
            best = (last[0] + xdiff * .75, last[1] + ydiff * .75)

        #draws main position circle
        cv.Circle(cframe, (int(best[0]), int(best[1])), 20, 255)

        #displays image with contours
        cv.ShowImage("w2", cframe)
        cv.MoveWindow('w2', 600, 0)
        #delay between frame capture
        c = cv.WaitKey(10)

        if not hold:
            #if largest contour covers half the screen
            if mx > 153600 / 2:
                #begins timer if not yet started
                if begin == 0: begin = time.time()
                else:

                    #sets volume to new volume, or 0 if muted
                    #in Linux
                    if sysname == True:
                        os.system('amixer set Master %s' %
                                  (.64 * unmute * (100 - best[1] / 4.8)))
                    #in Mac
                    else:
                        os.system(
                            'osascript -e \'set volume output volume %s\'' %
                            (.64 * unmute * (100 - best[1] / 4.8)))

                    #if 3 seconds have passed, stops timer and switches mute status
                    if time.time() - begin > 3:
                        unmute = 1 - unmute
                        begin = 0

            #stops timer and sets volume to new, if unmuted
            else:
                begin = 0
                #in Linux
                if sysname == True:
                    os.system('amixer set Master %s' %
                              (int(.64 * unmute *
                                   (100 - best[1] / 4.8)) * .75))
                #in Mac
                else:
                    os.system('osascript -e \'set volume output volume %s\'' %
                              (int(.64 * unmute *
                                   (100 - best[1] / 4.8)) * .75))

        #returns timer start, mute status, and previous hand position
        return (begin, unmute, best, hold, beginhold)
Ejemplo n.º 11
0
    def detect(self):
        self.detected = 0
        cv.Smooth(self.grey, self.dst2, cv.CV_GAUSSIAN, 3)
        cv.Laplace(self.dst2, self.d)
        cv.CmpS(self.d, 8, self.d2, cv.CV_CMP_GT)

        if self.onlyBlackCubes:
            # can also detect on black lines for improved robustness
            cv.CmpS(grey, 100, b, cv.CV_CMP_LT)
            cv.And(b, d2, d2)

        # these weights should be adaptive. We should always detect 100 lines
        if self.lastdetected > self.dects:
            self.THR = self.THR + 1

        if self.lastdetected < self.dects:
            self.THR = max(2, self.THR - 1)

        self.li = cv.HoughLines2(self.d2, cv.CreateMemStorage(),
                                 cv.CV_HOUGH_PROBABILISTIC, 1, 3.1415926 / 45,
                                 self.THR, 10, 5)

        # store angles for later
        angs = []
        for (p1, p2) in self.li:
            # cv.Line(sg,p1,p2,(0,255,0))
            a = atan2(p2[1] - p1[1], p2[0] - p1[0])
            if a < 0:
                a += pi
            angs.append(a)

        # log.info("THR %d, lastdetected %d, dects %d, houghlines %d, angles: %s" % (self.THR, self.lastdetected, self.dects, len(self.li), pformat(angs)))

        # lets look for lines that share a common end point
        t = 10
        totry = []

        for i in range(len(self.li)):
            p1, p2 = self.li[i]

            for j in range(i + 1, len(self.li)):
                q1, q2 = self.li[j]

                # test lengths are approximately consistent
                dd1 = sqrt((p2[0] - p1[0]) * (p2[0] - p1[0]) +
                           (p2[1] - p1[1]) * (p2[1] - p1[1]))
                dd2 = sqrt((q2[0] - q1[0]) * (q2[0] - q1[0]) +
                           (q2[1] - q1[1]) * (q2[1] - q1[1]))

                if max(dd1, dd2) / min(dd1, dd2) > 1.3:
                    continue

                matched = 0
                if areclose(p1, q2, t):
                    IT = (avg(p1, q2), p2, q1, dd1)
                    matched = matched + 1

                if areclose(p2, q2, t):
                    IT = (avg(p2, q2), p1, q1, dd1)
                    matched = matched + 1

                if areclose(p1, q1, t):
                    IT = (avg(p1, q1), p2, q2, dd1)
                    matched = matched + 1

                if areclose(p2, q1, t):
                    IT = (avg(p2, q1), q2, p1, dd1)
                    matched = matched + 1

                if matched == 0:
                    # not touching at corner... try also inner grid segments hypothesis?
                    self.p1 = (float(p1[0]), float(p1[1]))
                    self.p2 = (float(p2[0]), float(p2[1]))
                    self.q1 = (float(q1[0]), float(q1[1]))
                    self.q2 = (float(q2[0]), float(q2[1]))
                    success, (ua, ub), (x, y) = intersect_seg(
                        self.p1[0], self.p2[0], self.q1[0], self.q2[0],
                        self.p1[1], self.p2[1], self.q1[1], self.q2[1])

                    if success and ua > 0 and ua < 1 and ub > 0 and ub < 1:
                        # if they intersect
                        # cv.Line(sg, p1, p2, (255,255,255))
                        ok1 = 0
                        ok2 = 0

                        if abs(ua - 1.0 / 3) < 0.05:
                            ok1 = 1

                        if abs(ua - 2.0 / 3) < 0.05:
                            ok1 = 2

                        if abs(ub - 1.0 / 3) < 0.05:
                            ok2 = 1

                        if abs(ub - 2.0 / 3) < 0.05:
                            ok2 = 2

                        if ok1 > 0 and ok2 > 0:
                            # ok these are inner lines of grid
                            # flip if necessary
                            if ok1 == 2:
                                self.p1, self.p2 = self.p2, self.p1

                            if ok2 == 2:
                                self.q1, self.q2 = self.q2, self.q1

                            # both lines now go from p1->p2, q1->q2 and
                            # intersect at 1/3
                            # calculate IT
                            z1 = (self.q1[0] + 2.0 / 3 *
                                  (self.p2[0] - self.p1[0]), self.q1[1] +
                                  2.0 / 3 * (self.p2[1] - self.p1[1]))
                            z2 = (self.p1[0] + 2.0 / 3 *
                                  (self.q2[0] - self.q1[0]), self.p1[1] +
                                  2.0 / 3 * (self.q2[1] - self.q1[1]))
                            z = (self.p1[0] - 1.0 / 3 *
                                 (self.q2[0] - self.q1[0]), self.p1[1] -
                                 1.0 / 3 * (self.q2[1] - self.q1[1]))
                            IT = (z, z1, z2, dd1)
                            matched = 1

                # only single one matched!! Could be corner
                if matched == 1:

                    # also test angle
                    a1 = atan2(p2[1] - p1[1], p2[0] - p1[0])
                    a2 = atan2(q2[1] - q1[1], q2[0] - q1[0])

                    if a1 < 0:
                        a1 += pi

                    if a2 < 0:
                        a2 += pi

                    ang = abs(abs(a2 - a1) - pi / 2)

                    if ang < 0.5:
                        totry.append(IT)
                        # cv.Circle(sg, IT[0], 5, (255,255,255))

        # now check if any points in totry are consistent!
        # t=4
        res = []
        for i in range(len(totry)):

            p, p1, p2, dd = totry[i]
            a1 = atan2(p1[1] - p[1], p1[0] - p[0])
            a2 = atan2(p2[1] - p[1], p2[0] - p[0])

            if a1 < 0:
                a1 += pi

            if a2 < 0:
                a2 += pi

            dd = 1.7 * dd
            evidence = 0

            # cv.Line(sg,p,p2,(0,255,0))
            # cv.Line(sg,p,p1,(0,255,0))

            # affine transform to local coords
            A = matrix([[p2[0] - p[0], p1[0] - p[0], p[0]],
                        [p2[1] - p[1], p1[1] - p[1], p[1]], [0, 0, 1]])
            Ainv = A.I

            v = matrix([[p1[0]], [p1[1]], [1]])

            # check likelihood of this coordinate system. iterate all lines
            # and see how many align with grid
            for j in range(len(self.li)):

                # test angle consistency with either one of the two angles
                a = angs[j]
                ang1 = abs(abs(a - a1) - pi / 2)
                ang2 = abs(abs(a - a2) - pi / 2)

                if ang1 > 0.1 and ang2 > 0.1:
                    continue

                # test position consistency.
                q1, q2 = self.li[j]
                qwe = 0.06

                # test one endpoint
                v = matrix([[q1[0]], [q1[1]], [1]])
                vp = Ainv * v

                # project it
                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # the other end point
                v = matrix([[q2[0]], [q2[1]], [1]])
                vp = Ainv * v

                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # cv.Circle(sg, q1, 3, (255,255,0))
                # cv.Circle(sg, q2, 3, (255,255,0))
                # cv.Line(sg,q1,q2,(0,255,255))
                evidence += 1

            res.append((evidence, (p, p1, p2)))

        minch = 10000
        res.sort(reverse=True)
        # log.info("dects %s, res:\n%s" % (self.dects, pformat(res)))

        if len(res) > 0:
            minps = []
            pt = []

            # among good observations find best one that fits with last one
            for i in range(len(res)):

                if res[i][0] > 0.05 * self.dects:
                    # OK WE HAVE GRID
                    p, p1, p2 = res[i][1]
                    p3 = (p2[0] + p1[0] - p[0], p2[1] + p1[1] - p[1])

                    # cv.Line(sg,p,p1,(0,255,0),2)
                    # cv.Line(sg,p,p2,(0,255,0),2)
                    # cv.Line(sg,p2,p3,(0,255,0),2)
                    # cv.Line(sg,p3,p1,(0,255,0),2)
                    # cen=(0.5*p2[0]+0.5*p1[0],0.5*p2[1]+0.5*p1[1])
                    # cv.Circle(sg, cen, 20, (0,0,255),5)
                    # cv.Line(sg, (0,cen[1]), (320,cen[1]),(0,255,0),2)
                    # cv.Line(sg, (cen[0],0), (cen[0],240), (0,255,0),2)

                    w = [p, p1, p2, p3]
                    p3 = (self.prevface[2][0] + self.prevface[1][0] -
                          self.prevface[0][0], self.prevface[2][1] +
                          self.prevface[1][1] - self.prevface[0][1])
                    tc = (self.prevface[0], self.prevface[1], self.prevface[2],
                          p3)
                    ch = compfaces(w, tc)

                    # log.info("ch %s, minch %s" % (ch, minch))
                    if ch < minch:
                        minch = ch
                        minps = (p, p1, p2)

            # log.info("minch %d, minps:\n%s" % (minch, pformat(minps)))

            if len(minps) > 0:
                self.prevface = minps

                if minch < 10:
                    # good enough!
                    self.succ += 1
                    self.pt = self.prevface
                    self.detected = 1
                    # log.info("detected %d, succ %d" % (self.detected, self.succ))

            else:
                self.succ = 0

            # log.info("succ %d\n\n" % self.succ)

            # we matched a few times same grid
            # coincidence? I think NOT!!! Init LK tracker
            if self.succ > 2:

                # initialize features for LK
                pt = []
                for i in [1.0 / 3, 2.0 / 3]:
                    for j in [1.0 / 3, 2.0 / 3]:
                        pt.append(
                            (self.p0[0] + i * self.v1[0] + j * self.v2[0],
                             self.p0[1] + i * self.v1[1] + j * self.v2[1]))

                self.features = pt
                self.tracking = True
                self.succ = 0
                log.info("non-tracking -> tracking: succ %d" % self.succ)
Ejemplo n.º 12
0
    def repeat1(begin, unmute, last, hold, beginhold):
        """actual function for moving and clicking mouse"""
        def click_down():
            """Simulates a down click"""
            fake_input(d, ButtonPress, 1)
            d.sync()

        def click_up():
            """Simulates an up click"""
            fake_input(d, ButtonRelease, 1)
            d.sync()

        #captures input frame
        frame = cv.QueryFrame(capture)

        #initializes mouse behavior
        d = Display()
        s = d.screen()
        root = s.root

        #creates horizontally flipped copy of input frame to work with
        cv.Copy(frame, sframe)
        cv.Flip(sframe, sframe, 1)

        #makes mask of skintones
        dog = skin(sframe, ccolor)

        #inverts skintone mask to all non-skin areas
        cv.ConvertScale(dog, dog, -1, 255)

        #makes greyscale copy of frame
        cv.CvtColor(sframe, grey, cv.CV_BGR2GRAY)

        #replaces nonskin areas with white
        cv.Add(grey, white, grey, dog)

        #implements laplacian edge detection on greyscale image
        dst_16s2 = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_16S, 1)
        cv.Laplace(grey, dst_16s2, 5)
        cv.Convert(dst_16s2, grey)

        #creates a threshold to binarize the image
        cv.Threshold(grey, grey, 75, 255, cv.CV_THRESH_BINARY)

        #creates contours on greyscale image
        storage = cv.CreateMemStorage(0)
        contours = cv.FindContours(grey, storage, cv.CV_RETR_TREE,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        #sets final display frame background to black
        cv.Set(cframe, 0)

        #sets minimum range for object detection
        mx = 20000
        #initializes hand position to previous
        best = last
        #creates some cvSeq maxcont by copying contours
        maxcont = contours

        #goes through all contours and finds bounding box
        while contours:
            bound_rect = cv.BoundingRect(list(contours))

            #if bounding box area is greater than min range or current max box
            if bound_rect[3] * bound_rect[2] > mx:

                #sets max to current object, creates position at center of box, and sets display contour to current
                mx = bound_rect[3] * bound_rect[2]
                maxcont = contours

            #goes to next contour
            contours = contours.h_next()

        #draws largest contour on final frame
        cv.DrawContours(cframe, maxcont, 255, 127, 0)

        if maxcont:
            #draws and finds convex hull and convexity defects
            chull = cv.ConvexHull2(maxcont, storage, cv.CV_CLOCKWISE, 1)
            cv.PolyLine(cframe, [chull], 1, 255)
            chulllist = list(chull)
            chull = cv.ConvexHull2(maxcont, storage, cv.CV_CLOCKWISE, 0)
            cdefects = cv.ConvexityDefects(maxcont, chull, storage)

            #filters smaller convexity defects and displays larger ones
            truedefects = []
            for j in cdefects:
                if j[3] > 30:
                    truedefects.append(j)
                    cv.Circle(cframe, j[2], 6, 255)

            #Finds highest point of convex hull if hand follows smooth vertical shape
            if cdefects and len(truedefects) < 4:
                tipheight = 481
                tiploc = 0
                for j in chulllist:
                    if j[1] < tipheight:
                        tipheight = j[1]
                        tiploc = chulllist.index(j)
                best = chulllist[tiploc]

            #if hand is open, begin click
            if len(truedefects) >= 4:
                if beginhold == 0:
                    beginhold = time.time()
                else:
                    #if .05 seconds have passed, clicks down
                    if (time.time() - beginhold > .05) and not hold:
                        hold = True
                        beginhold = 0
                        click_down()

            #unclicks if hand returns to smooth
            else:
                if hold:
                    click_up()
                    hold = False
                beginhold = 0

        #keeps last position if movement too quick, or smooths slower movement
        xdiff = best[0] - last[0]
        ydiff = best[1] - last[1]
        dist = math.sqrt(xdiff**2 + ydiff**2)
        if dist > 100:
            best = last
        else:
            best = (last[0] + xdiff * .75, last[1] + ydiff * .75)

        #displays main position circle
        cv.Circle(cframe, (int(best[0]), int(best[1])), 20, 255)
        #displays image with contours
        cv.ShowImage("w2", cframe)
        cv.MoveWindow('w2', 500, 0)
        #delay between frame capture
        c = cv.WaitKey(10)

        #Mouse Move/ Bottom Pointer
        Dx, Dy = mousedelta(last, best)
        root.warp_pointer((best[0] - 320) * 1600 / 600 + 800,
                          best[1] * 900 / 360)
        d.sync()

        return (begin, unmute, best, hold, beginhold)
Ejemplo n.º 13
0
#!/usr/bin/python
import cv #Import functions from OpenCV

cv.NamedWindow('a_window', cv.CV_WINDOW_AUTOSIZE)
storage=cv.CreateMemStorage()

image=cv.LoadImage('amundi.jpg', cv.CV_LOAD_IMAGE_COLOR) #Load the image

grey = cv.CreateImage(cv.GetSize(image), 8, 1)
cv.CvtColor(image, grey, cv.CV_BGR2GRAY)

#cv.EqualizeHist(grey, grey)
cv.Laplace(grey, grey, 3)

#clone = cv.CloneImage(image)
#contours = cv.FindContours(grey, storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))

#cv.DrawContours(image, contours, -1, (255,0,0), 5)

cv.ShowImage('a_window', grey) #Show the image
cv.WaitKey(10000)

Ejemplo n.º 14
0
        sys.exit(-1)

    cv.NamedWindow("Laplacian", 1)

    while True:
        frame = cv.QueryFrame(capture)
        if frame:
            if not laplace:
                planes = [
                    cv.CreateImage((frame.width, frame.height), 8, 1)
                    for i in range(3)
                ]
                laplace = cv.CreateImage((frame.width, frame.height),
                                         cv.IPL_DEPTH_16S, 1)
                colorlaplace = cv.CreateImage((frame.width, frame.height), 8,
                                              3)

            cv.Split(frame, planes[0], planes[1], planes[2], None)
            for plane in planes:
                cv.Laplace(plane, laplace, 3)
                cv.ConvertScaleAbs(laplace, plane, 1, 0)

            cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)

            cv.ShowImage("Laplacian", colorlaplace)

        if cv.WaitKey(10) != -1:
            break

    cv.DestroyWindow("Laplacian")
Ejemplo n.º 15
0
image = cv.LoadImage('miches.jpg',cv.CV_LOAD_IMA­GE_GRAYSCALE)

# original image during smoothing and subtraction
blur_image = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)


# the original image during filtering
cv.Smooth(image, blur_image, cv.CV_BLUR, 15, 15)
fil = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)


# subtraction of the original minus the filtered one
cv.AbsDiff(image,blur_image,fil)
cv.ShowImage('Image', fil)
cv.WaitKey()
cv.SaveImage('result_image.jpg', fil)
________________________________________­______________________________



from PIL import Image
from numpy import *
from pylab import *

import cv

im = cv.LoadImageM("miches.jpg", 1)
dst = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_16S, 3)
laplace = cv.Laplace(im, dst)
cv.SaveImage("miches_laplace.png", dst)
Ejemplo n.º 16
0
import cv

img = cv.LoadImageM("2005_Nickel_Proof_Obv.tif", cv.CV_LOAD_IMAGE_GRAYSCALE)
eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)

# create the source image
canny_image = cv.CreateImage(cv.GetSize(img), 8, 1)

window_name = "Good Features To Track"
# create window and display the original picture in it
#cv.NamedWindow(window_name, 1)
#cv.SetZero(laplace)
#cv.ShowImage(window_name, img)

cv.Laplace(img, temp_image, 3)
cv.ShowImage("Laplace", temp_image)
#cv.SetZero(temp_image)
cv.Canny(img, canny_image, 50, 150)
cv.ShowImage("Canny", canny_image)

for (x, y) in cv.GoodFeaturesToTrack(img,
                                     eig_image,
                                     temp_image,
                                     20,
                                     0.04,
                                     1.0,
                                     useHarris=True):
    print "good feature at", x, y
    #Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)
    cv.Circle(img, (x, y), 6, (255, 0, 0), 1, cv.CV_AA, 0)