def draw_gui(image):
    # Reverse areas
    cv.cvRectangle(image, box_backwards_left[0], box_backwards_left[1],
                   cv.CV_RGB(255, 0, 0), 3, 8, 0)
    cv.cvRectangle(image, box_backwards_right[0], box_backwards_right[1],
                   cv.CV_RGB(255, 0, 0), 3, 8, 0)
    # Forward areas
    cv.cvRectangle(image, box_forward_left[0], box_forward_left[1],
                   cv.CV_RGB(0, 255, 0), 3, 8, 0)
    cv.cvRectangle(image, box_forward_right[0], box_forward_right[1],
                   cv.CV_RGB(0, 255, 0), 3, 8, 0)
def detectObject(image):
    grayscale = cv.cvCreateImage(size, 8, 1)
    cv.cvFlip(image, None, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)
    cv.cvEqualizeHist(grayscale, grayscale)
    cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1))
    objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                     cv.CV_HAAR_DO_CANNY_PRUNING,
                                     cv.cvSize(100, 100))

    # Draw dots where hands are
    if objects:
        for i in objects:
            #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
            #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
            #               cv.CV_RGB(0,255,0), 3, 8, 0)
            center = cv.cvPoint(int(i.x + i.width / 2),
                                int(i.y + i.height / 2))
            cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0)
            # Left side check
            if center.x > box_forward_left[
                    0].x and center.x < box_backwards_left[
                        1].x and center.y > box_forward_left[
                            0].y and center.y < box_backwards_left[1].y:
                set_speed('left', center)
            # Right side check
            if center.x > box_forward_right[
                    0].x and center.x < box_backwards_right[
                        1].x and center.y > box_forward_right[
                            0].y and center.y < box_backwards_right[1].y:
                set_speed('right', center)
Esempio n. 3
0
def draw_bounding_boxes(cascade_list, img, r, g, b, width):
    if cascade_list:
        for rect in cascade_list:
            opencv.cvRectangle(
                img, opencv.cvPoint(int(rect.x), int(rect.y)),
                opencv.cvPoint(int(rect.x + rect.width),
                               int(rect.y + rect.height)),
                opencv.CV_RGB(r, g, b), width)
Esempio n. 4
0
def drawBox(x):
    """
    This is a template for a function that can be fed into VideoCapturePlayer
    It must take a CvMat, and return a CvMat.
    It draws a rectangle on the screen."""
    pt1, pt2 = cv.CvPoint(), cv.CvPoint()
    pt1.x = pt1.y = 200
    pt2.x = pt2.y = 250

    cv.cvRectangle(x, pt1, pt2, cv.CV_RGB(30, 0, 200))
    return x
Esempio n. 5
0
def draw_ellipse(image,
                 center,
                 axes,
                 angle,
                 start_angle=0.0,
                 end_angle=360.0,
                 color=(255, 0, 0),
                 thickness=1):
    center = cv.cvPoint(rnd(center[0]), rnd(center[1]))
    axes = cv.cvSize(rnd(axes[0]), rnd(axes[1]))
    color = cv.CV_RGB(color[0], color[1], color[2])
    cv.cvEllipse(image, center, axes, angle, start_angle, end_angle, color,
                 thickness)
Esempio n. 6
0
    def HarrisPoints(self, imgfile):
        self.points = []
        self.drawimg = highgui.cvLoadImage(imgfile)
        c = 1
        try:
            gray = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 8, 1)
            cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY)
            eig = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            tmpimg = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            p = cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None,
                                         7, 1, 0.04)
            for x in p:
                cv.cvCircle(self.drawimg, x, 3, cv.CV_RGB(0, 255, 0), 8, 0)
                self.points.append(x)

        except Exception, e:
            print e
            print 'ERROR: problem handling ' + imgfile
    def detect_face(self, img):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1)
        small_img = cv.cvCreateImage(
            cv.cvSize(cv.cvRound(img.width / image_scale),
                      cv.cvRound(img.height / image_scale)), 8, 1)
        cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
        cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            if faces:
                for r in faces:
                    pt1 = cv.cvPoint(int(r.x * image_scale),
                                     int(r.y * image_scale))
                    pt2 = cv.cvPoint(int((r.x + r.width) * image_scale),
                                     int((r.y + r.height) * image_scale))
                    cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                                   0)
        return img
Esempio n. 8
0
def filter_and_render_mixed(image, corners):
    """
    Takes a numpy array of corners and a cvMat image.
    
    """
    n = 15
    footprint = ones((n, n))
    mx = maximum_filter(corners, footprint=footprint)
    local_maxima = (corners == mx) * (corners != zeros(
        corners.shape))  # make sure to remove completly dark points

    points = nonzero(local_maxima)
    del local_maxima

    points = array([points[0], points[1]]).transpose()
    L = []

    for each in points:
        L.append((corners[each[0], each[1]], each[0], each[1], None))
        i = cv.cvPoint(int(each[0]), int(each[1]))
        cv.cvCircle(image, i, 2, cv.CV_RGB(0, 0, 200), 3)

    #cv.cvCvtColor(grayimage, image, cv.CV_GRAY2RGB)
    return image
Esempio n. 9
0
def draw_target(img, x, y):
    width = 10
    color = cv.CV_RGB(0, 255, 0)

    size = cv.cvGetSize(img)

    #cv.cvSet2D(img,x,y,color);

    for i in range(width):
        for j in range(width):
            if i == 0 or j == 0 or j == 9 or i == 9:
                px = x + j - width / 2
                py = y + i - width / 2

                if px < 0:
                    px = 0
                if py < 0:
                    py = 0
                if px >= size.width:
                    px = size.width - 1
                if py >= size.height:
                    py = size.height - 1

                cv.cvSet2D(img, py, px, color)
Esempio n. 10
0
    def detect_squares(self, img):
        """ Find squares within the video stream and draw them """
        N = 11
        thresh = 5
        sz = cv.cvSize(img.width & -2, img.height & -2)
        timg = cv.cvCloneImage(img)
        gray = cv.cvCreateImage(sz, 8, 1)
        pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3)
        # create empty sequence that will contain points -
        # 4 points per square (the square's vertices)
        squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint,
                                 self.storage)
        squares = cv.CvSeq_CvPoint.cast(squares)

        # select the maximum ROI in the image
        # with the width and height divisible by 2
        subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height))

        # down-scale and upscale the image to filter out the noise
        cv.cvPyrDown(subimage, pyr, 7)
        cv.cvPyrUp(pyr, subimage, 7)
        tgray = cv.cvCreateImage(sz, 8, 1)
        # find squares in every color plane of the image
        for c in range(3):
            # extract the c-th color plane
            channels = [None, None, None]
            channels[c] = tgray
            cv.cvSplit(subimage, channels[0], channels[1], channels[2], None)
            for l in range(N):
                # hack: use Canny instead of zero threshold level.
                # Canny helps to catch squares with gradient shading
                if (l == 0):
                    # apply Canny. Take the upper threshold from slider
                    # and set the lower to 0 (which forces edges merging)
                    cv.cvCanny(tgray, gray, 0, thresh, 5)
                    # dilate canny output to remove potential
                    # holes between edge segments
                    cv.cvDilate(gray, gray, None, 1)
                else:
                    # apply threshold if l!=0:
                    #     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                    cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
                                   cv.CV_THRESH_BINARY)

                # find contours and store them all as a list
                count, contours = cv.cvFindContours(gray, self.storage,
                                                    cv.sizeof_CvContour,
                                                    cv.CV_RETR_LIST,
                                                    cv.CV_CHAIN_APPROX_SIMPLE,
                                                    cv.cvPoint(0, 0))

                if not contours:
                    continue

                # test each contour
                for contour in contours.hrange():
                    # approximate contour with accuracy proportional
                    # to the contour perimeter
                    result = cv.cvApproxPoly(
                        contour, cv.sizeof_CvContour, self.storage,
                        cv.CV_POLY_APPROX_DP,
                        cv.cvContourPerimeter(contours) * 0.02, 0)
                    # square contours should have 4 vertices after approximation
                    # relatively large area (to filter out noisy contours)
                    # and be convex.
                    # Note: absolute value of an area is used because
                    # area may be positive or negative - in accordance with the
                    # contour orientation
                    if (result.total == 4
                            and abs(cv.cvContourArea(result)) > 1000
                            and cv.cvCheckContourConvexity(result)):
                        s = 0
                        for i in range(5):
                            # find minimum angle between joint
                            # edges (maximum of cosine)
                            if (i >= 2):
                                t = abs(
                                    self.squares_angle(result[i],
                                                       result[i - 2],
                                                       result[i - 1]))
                                if s < t:
                                    s = t
                        # if cosines of all angles are small
                        # (all angles are ~90 degree) then write quandrange
                        # vertices to resultant sequence
                        if (s < 0.3):
                            for i in range(4):
                                squares.append(result[i])

        i = 0
        while i < squares.total:
            pt = []
            # read 4 vertices
            pt.append(squares[i])
            pt.append(squares[i + 1])
            pt.append(squares[i + 2])
            pt.append(squares[i + 3])

            # draw the square as a closed polyline
            cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0)
            i += 4

        return img
Esempio n. 11
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data
        img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height),
                                    8, 1)
        cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY)
        # Detect objects within the frame
        self.faces_storage = cv.cvCreateMemStorage(0)
        faces = self.detect_faces(img_grey)
        self.circles_storage = cv.cvCreateMemStorage(0)
        circles = self.detect_circles(img_grey)
        self.squares_storage = cv.cvCreateMemStorage(0)
        squares = self.detect_squares(img_grey, img_orig)
        self.lines_storage = cv.cvCreateMemStorage(0)
        lines = self.detect_lines(img_grey, img_orig)

        # Draw faces
        if faces:
            for face in faces:
                pt1, pt2 = self.face_points(face)
                cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                               0)

        # Draw lines
        if lines:
            for line in lines:
                cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0),
                          3, 8)
        # Draw circles
        if circles:
            for circle in circles:
                cv.cvCircle(
                    img_orig,
                    cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])),
                    cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0)

        # Draw squares
        if squares:
            i = 0
            while i < squares.total:
                pt = []
                # read 4 vertices
                pt.append(squares[i])
                pt.append(squares[i + 1])
                pt.append(squares[i + 2])
                pt.append(squares[i + 3])
                ## draw the square as a closed polyline
                cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3,
                              cv.CV_AA, 0)
                i += 4

        # Resize the image to display properly within the window
        #	CV_INTER_NN - nearest-neigbor interpolation,
        #	CV_INTER_LINEAR - bilinear interpolation (used by default)
        #	CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation)
        #	CV_INTER_CUBIC - bicubic interpolation.
        img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()),
                                       8, 3)
        cv.cvResize(img_orig, img_display, cv.CV_INTER_NN)
        img_pil = adaptors.Ipl2PIL(img_display)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
Esempio n. 12
0
def draw_line(image, pnt1, pnt2, color=(255, 0, 0)):
    cv.cvLine(image, cv.cvPoint(rnd(pnt1[0]), rnd(pnt1[1])),
              cv.cvPoint(rnd(pnt2[0]), rnd(pnt2[1])), cv.CV_RGB(*color))
Esempio n. 13
0
            image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3)
            image.origin = frame.origin
            hsv = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3)
            hue = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            mask = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            backproject = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1)

        # flip the image
        cv.cvFlip(frame, image, 1)

        cv.cvCvtColor(image, hsv, cv.CV_BGR2HSV)

        cv.cvLine(image, cv.cvPoint(0, image.height / 2),
                  cv.cvPoint(image.width, image.height / 2),
                  cv.CV_RGB(0, 255, 0), 2, 8, 0)

        cv.cvLine(image, cv.cvPoint(image.width / 2, 0),
                  cv.cvPoint(image.width / 2, image.height),
                  cv.CV_RGB(0, 255, 0), 2, 8, 0)

        if track_object:
            _vmin = vmin
            _vmax = vmax

            cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(_vmin, _vmax), 0),
                          cv.cvScalar(180, 256, max(_vmin, _vmax), 0), mask)

            cv.cvSplit(hsv, hue, None, None, None)

            if track_object < 0:
Esempio n. 14
0
def render_harris_points(image, filtered_coords):
    """This function renders points directly on an image with opencv"""
    pnts = [cv.cvPoint(int(i[1]), int(i[0])) for i in filtered_coords]
    for pnt in pnts:
        cv.cvCircle(image, pnt, 2, cv.CV_RGB(0, 200, 0), 3)
Esempio n. 15
0
            cv.cvCalcBackProject(hue, backproject, obj_hist)
            cv.cvAnd(backproject, mask, backproject)

            #niter, track_comp, track_box =
            cv.cvCamShift(
                backproject, track_window,
                cv.cvTermCriteria(cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10,
                                  1), track_comp, track_box)
            track_window = track_comp.rect

            #if backproject_mode:
            #    cvCvtColor( backproject, image, CV_GRAY2BGR )

            if not frame.origin:
                track_box.angle = -track_box.angle
            cv.cvEllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                            cv.CV_AA, 0)

        # we can now display the images
        highgui.cvShowImage('Camera', frame)
        highgui.cvShowImage('Histogram', histimg)

        # handle events
        k = highgui.cvWaitKey(10)

        if k == '\x1b':
            # user has press the ESC key, so exit
            break
    highgui.cvReleaseCapture(capture)
Esempio n. 16
0
def process_image(slider_pos):
    """
    Define trackbar callback functon. This function find contours,
    draw it and approximate it by ellipses.
    """
    stor = cv.cvCreateMemStorage(0)

    # Threshold the source image. This needful for cv.cvFindContours().
    cv.cvThreshold(image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY)

    # Find all contours.
    nb_contours, cont = cv.cvFindContours(image02, stor, cv.sizeof_CvContour,
                                          cv.CV_RETR_LIST,
                                          cv.CV_CHAIN_APPROX_NONE,
                                          cv.cvPoint(0, 0))

    # Clear images. IPL use.
    cv.cvZero(image02)
    cv.cvZero(image04)

    # This cycle draw all contours and approximate it by ellipses.
    for c in cont.hrange():
        count = c.total
        # This is number point in contour

        # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f).
        if (count < 6):
            continue

        # Alloc memory for contour point set.
        PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2)
        PointArray2D32f = cv.cvCreateMat(1, count, cv.CV_32FC2)

        # Get contour point set.
        cv.cvCvtSeqToArray(c, PointArray,
                           cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))

        # Convert CvPoint set to CvBox2D32f set.
        cv.cvConvert(PointArray, PointArray2D32f)

        box = cv.CvBox2D()

        # Fits ellipse to current contour.
        box = cv.cvFitEllipse2(PointArray2D32f)

        # Draw current contour.
        cv.cvDrawContours(image04, c, cv.CV_RGB(255, 255, 255),
                          cv.CV_RGB(255, 255, 255), 0, 1, 8, cv.cvPoint(0, 0))

        # Convert ellipse data from float to integer representation.
        center = cv.CvPoint()
        size = cv.CvSize()
        center.x = cv.cvRound(box.center.x)
        center.y = cv.cvRound(box.center.y)
        size.width = cv.cvRound(box.size.width * 0.5)
        size.height = cv.cvRound(box.size.height * 0.5)
        box.angle = -box.angle

        # Draw ellipse.
        cv.cvEllipse(image04, center, size, box.angle, 0, 360,
                     cv.CV_RGB(0, 0, 255), 1, cv.CV_AA, 0)

    # Show image. HighGUI use.
    highgui.cvShowImage("Result", image04)
Esempio n. 17
0
################################### UTILISATION CLASSIQUE ############################
## 1 - lancer le programme : > python fingerTracker.py (-h pour voir les options)
## 2 - regler les parametres pour une detection optimale.  (cf : trackbar + touche 'b' pour rafraichir la suppression du background)
## 3 - touche 's' pour sauver la configuration (cf : background.bmp et config)
## 4 - relancer le programme avec l'option -noGUI => le programme est lance sans interface graphique
############################################################################

##################################A FAIRE######################################
##      * inertie des pointeurs pr identitePointeur => ne pas changer pour rien d id
##      * meilleur init pr appeler identitePointeur
##      * qd aucun point pdt un certain tps remise a zero des infos sur les pointeurs
##
#############################################################################
# definition of some constants
color = cv.CV_RGB(0, 255, 255)
##bleu claire
color2 = cv.CV_RGB(0, 0, 255)
##bleu fonce
critere = cv.CvTermCriteria()  ## utils for cvMeanShift  cvCamshift
critere.type = cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS
##
critere.epsilon = 0.0
##

#  ########  parametre de reglage ########  #
nb_div_zone = [4]
## nbr_div_zone fois divise en quatre cf : trackbar
seuil_binary = [254]
## seuil utiliser pour binariser l image cf : trackbar
gain = [3]