Example #1
0
def runtracking():
    global rgb_image, hsv_image, hsvmouse, pausecam, hsvgreen, hsvyellow, hsvblue, hsvred, homographycomputed
    global hsvyellowtab, hsvrange
    global homography, pose_flag
    global hsvyellowmin, hsvyellowmax, hsvgreenmin, hsvgreenmax, hsvbluemin, hsvbluemax, hsvredmin, hsvredmax
    global cycloppoint, righteyepoint, lefteyepoint
    global capture, pausecam, size_image
    global yellowmask_image, greenmask_image, redmask_image, bluemask_image
    global p_num, modelepoints, blob_centers
    global rx, ry, rz
    global background

    size_thumb = [size_image[0] / 2, size_image[1] / 2]

    thumbgreen = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbred = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbblue = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbyellow = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)

    cv.NamedWindow("GreenBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("GreenBlobDetection", thumbgreen)

    cv.NamedWindow("YellowBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("YellowBlobDetection", thumbyellow)

    cv.NamedWindow("BlueBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("BlueBlobDetection", thumbblue)

    cv.NamedWindow("RedBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("RedBlobDetection", thumbred)

    rgb_image = cv.QueryFrame(capture)
    cv.NamedWindow("Source", cv.CV_WINDOW_AUTOSIZE)

    cv.SetMouseCallback("Source", getObjectHSV)

    print "Hit ESC key to quit..."

    # infinite loop for processing
    while True:

        time.sleep(0.02)
        blobcentergreen = findBlob(rgb_image, hsv_image, greenmask_image,
                                   greenblob_image, hsvrange, hsvgreenmin,
                                   hsvgreenmax, 'g')
        blobcenteryellow = findBlob(rgb_image, hsv_image, yellowmask_image,
                                    yellowblob_image, hsvrange, hsvyellowmin,
                                    hsvyellowmax, 'y')
        blobcenterblue = findBlob(rgb_image, hsv_image, bluemask_image,
                                  blueblob_image, hsvrange, hsvbluemin,
                                  hsvbluemax, 'b')
        blobcenterred = findBlob(rgb_image, hsv_image, redmask_image,
                                 redblob_image, hsvrange, hsvredmin, hsvredmax,
                                 'r')

        if not pausecam:
            if (blobcentergreen != None):
                cv.Resize(greenblob_image, thumbgreen)
                cv.ShowImage("GreenBlobDetection", thumbgreen)
                # print "green center: %d %d %d" %blobcentergreen
            if (blobcenteryellow != None):
                cv.Resize(yellowblob_image, thumbyellow)
                cv.ShowImage("YellowBlobDetection", thumbyellow)
                # print "yellow center: %d %d %d" %blobcenteryellow
            if (blobcenterblue != None):
                cv.Resize(blueblob_image, thumbblue)
                cv.ShowImage("BlueBlobDetection", thumbblue)
                # print "blue center: %d %d %d" %blobcenterblue
            if (blobcenterred != None):
                cv.Resize(redblob_image, thumbred)
                cv.ShowImage("RedBlobDetection", thumbred)
                # print "red center: %d %d %d" %blobcenterred

        cv.ShowImage("Source", rgb_image)
        c = cv.WaitKey(7) % 0x100
        if c == 27:
            break
        if c == ord('p') or c == ord('P'):
            pausecam = not pausecam

        if c == ord('y'):
            hsvyellowtab.append(hsvmouse)
            hsvyellowmin = mintab(hsvyellowtab)
            hsvyellowmax = maxtab(hsvyellowtab)
            print "minyellow"
            print hsvyellowmin
            print "maxyellow"
            print hsvyellowmax
        if c == ord('Y'):
            if (len(hsvyellowtab) > 0):
                hsvyellowtab.pop(len(hsvyellowtab) - 1)
            if (len(hsvyellowtab) != 0):
                hsvyellowmin = mintab(hsvyellowtab)
                hsvyellowmax = maxtab(hsvyellowtab)
            else:
                hsvyellowmin = [255, 255, 255]
                hsvyellowmax = [0, 0, 0]
        if c == ord('g'):
            hsvgreentab.append(hsvmouse)
            hsvgreenmin = mintab(hsvgreentab)
            hsvgreenmax = maxtab(hsvgreentab)
            print "mingreen"
            print hsvgreenmin
            print "maxgreen"
            print hsvgreenmax
        if c == ord('G'):
            if (len(hsvgreentab) > 0):
                hsvgreentab.pop(len(hsvgreentab) - 1)
            if (len(hsvgreentab) != 0):
                hsvgreenmin = mintab(hsvgreentab)
                hsvgreenmax = maxtab(hsvgreentab)
            else:
                hsvgreenmin = [255, 255, 255]
                hsvgreenmax = [0, 0, 0]
        if c == ord('r'):
            hsvredtab.append(hsvmouse)
            hsvredmin = mintab(hsvredtab)
            hsvredmax = maxtab(hsvredtab)
            print "minred"
            print hsvredmin
            print "maxred"
            print hsvredmax
        if c == ord('R'):
            if (len(hsvredtab) > 0):
                hsvredtab.pop(len(hsvredtab) - 1)
            if (len(hsvredtab) != 0):
                hsvredmin = mintab(hsvredtab)
                hsvredmax = maxtab(hsvredtab)
            else:
                hsvredmin = [255, 255, 255]
                hsvredmax = [0, 0, 0]
            print "RRR"
            print "min red"
            print hsvredmin
            print "max red"
            print hsvredmax
        if c == ord('b'):
            hsvbluetab.append(hsvmouse)
            hsvbluemin = mintab(hsvbluetab)
            hsvbluemax = maxtab(hsvbluetab)
            print "minblue"
            print hsvbluemin
            print "maxblue"
            print hsvbluemax
        if c == ord('B'):
            if (len(hsvbluetab) > 0):
                hsvbluetab.pop(len(hsvbluetab) - 1)
            if (len(hsvbluetab) != 0):
                hsvbluemin = mintab(hsvbluetab)
                hsvbluemax = maxtab(hsvbluetab)
            else:
                hsvbluemin = [255, 255, 255]
                hsvbluemax = [0, 0, 0]
        if c == ord('s'):
            f = open("last_range.txt", 'w')
            for hsv in [
                    hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin,
                    hsvbluemax, hsvyellowmin, hsvyellowmax
            ]:
                map(lambda v: f.write(str(int(v)) + ','), hsv)
                f.write('\n')
            f.close()
            print 'saved ranges'
        if c == ord('l'):
            f = open("last_range.txt", 'r')
            lines = f.readlines()
            [
                hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin,
                hsvbluemax, hsvyellowmin, hsvyellowmax
            ] = map(lambda l: map(lambda v: int(v),
                                  l.split(',')[:-1]), lines)
            print "loaded ranges:\n"
            print lines
        # if c == ord('R') :
    #                step=0
        if not pausecam:
            rgb_image = cv.QueryFrame(capture)
            cv.Flip(rgb_image, rgb_image, 1)  # flip l/r

    # after blob center detection we need to launch pose estimation
        if ((blobcentergreen != None) and (blobcenteryellow != None)
                and (blobcenterblue != None) and (blobcenterred != None)):
            #order is Yellow,blue,red, green
            pose_flag = 1
            blob_centers = []
            blob_centers.append((blobcenteryellow[0] - size_image[0] / 2,
                                 blobcenteryellow[1] - size_image[1] / 2))
            blob_centers.append((blobcenterblue[0] - size_image[0] / 2,
                                 blobcenterblue[1] - size_image[1] / 2))
            blob_centers.append((blobcenterred[0] - size_image[0] / 2,
                                 blobcenterred[1] - size_image[1] / 2))
            blob_centers.append((blobcentergreen[0] - size_image[0] / 2,
                                 blobcentergreen[1] - size_image[1] / 2))

            # get the tracking matrix (orientation and position) result with
            # POSIT method in the tracker (camera) referential
            matrix = find_pose(p_num, blob_centers, modelepoints)

            # We want to get the tracking result in the world referencial, i.e. with  at 60 cm of the midle of the screen, with Y up, and Z behind you.
            # The tracker referential in the camera referential, with the X axis pointing to the
            # left, the Y axis pointing down, and the Z axis pointing behind
            # you, and with the camera as origin.

            # We thus pre multiply to have the traking results in the world
            # referential, and not in the tracker (camera) referential.
            # (pre-product)
            pre_tranform_matrix = WordToTrackerTransform(matrix)

            # We do not want to track the center of the body referential (the right up point of the glasses), but the midlle of the two eyes in monoscopic (cyclops eye),
            # or left and right eyes in stereoscopic.

            # We thus post multiply the world traking results in the world
            # referential, using the referential of the eye in the body
            # referential (glasses)
            pre_tranform_matrix_post_cylcope_eye = BodyToCyclopsEyeTransform(
                pre_tranform_matrix)
            poscyclope = [
                pre_tranform_matrix_post_cylcope_eye[3][0],
                pre_tranform_matrix_post_cylcope_eye[3][1],
                pre_tranform_matrix_post_cylcope_eye[3][2]
            ]
            # print "poscylope", poscyclope

            pre_tranform_matrix_post_left_eye = BodyToLeftEyeTransform(
                pre_tranform_matrix)
            posleft = [
                pre_tranform_matrix_post_left_eye[3][0],
                pre_tranform_matrix_post_left_eye[3][1],
                pre_tranform_matrix_post_left_eye[3][2]
            ]
            # print "posleft",posleft

            pre_tranform_matrix_post_right_eye = BodyToRightEyeTransform(
                pre_tranform_matrix)
            posright = [
                pre_tranform_matrix_post_right_eye[3][0],
                pre_tranform_matrix_post_right_eye[3][1],
                pre_tranform_matrix_post_right_eye[3][2]
            ]
            # print "posright",posright

            sendPosition("/tracker/head/pos_xyz/cyclope_eye", poscyclope)
            sendPosition("/tracker/head/pos_xyz/left_eye", posleft)
            sendPosition("/tracker/head/pos_xyz/right_eye", posright)
Example #2
0
    def OnPaint(self, evt):
        if not self.timer.IsRunning():
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            return

        # Capture de l'image
        if hasattr(cv, "QueryFrame"):

            # Ancienne version OpenCV
            image_scale = 2
            min_size = (20, 20)
            haar_scale = 1.2
            min_neighbors = 2
            haar_flags = 0

            frame = cv.QueryFrame(CAMERA)
            cv.CvtColor(frame, frame, cv.CV_BGR2RGB)
            Img = wx.EmptyImage(frame.width, frame.height)
            Img.SetData(frame.tostring())
            self.bmp = wx.BitmapFromImage(Img)
            del Img
            largeur, hauteur = frame.width, frame.height

            gray = cv.CreateImage((largeur, hauteur), 8, 1)
            small_img = cv.CreateImage((cv.Round(
                frame.width / image_scale), cv.Round(hauteur / image_scale)),
                                       8, 1)
            cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
            cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
            cv.EqualizeHist(small_img, small_img)
            listeVisages = cv.HaarDetectObjects(small_img, CASCADE,
                                                cv.CreateMemStorage(0),
                                                haar_scale, min_neighbors,
                                                haar_flags, min_size)

        else:
            # Nouvelle version OpenCV
            image_scale = 1
            ret, frame = CAMERA.read()
            hauteur, largeur = frame.shape[:2]
            frame = cv.resize(frame, (largeur, hauteur), cv.INTER_LINEAR)
            frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
            self.bmp = wx.Bitmap.FromBuffer(largeur, hauteur, frame)

            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            listeVisages = CASCADE.detectMultiScale(gray, 1.3, 5)

        # Affichage de l'image
        x, y = (0, 0)
        try:
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            try:
                dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            except:
                pass
            dc.Clear()
            dc.DrawBitmap(self.bmp, x, y)

            # Dessin des rectangles des visages
            for (x, y, w, h) in listeVisages:
                dc.SetBrush(wx.TRANSPARENT_BRUSH)
                dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2))
                dc.DrawRectangle(x * image_scale, y * image_scale,
                                 w * image_scale, h * image_scale)

            self.listeVisages = listeVisages
            del dc

        except:
            pass
    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    input_name = args[0]
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("result", 1)

    if capture:
        frame_copy = None
        while True:
            frame = cv.QueryFrame(capture)
            if not frame:
                cv.WaitKey(0)
                break
            if not frame_copy:
                frame_copy = cv.CreateImage((frame.width, frame.height),
                                            cv.IPL_DEPTH_8U, frame.nChannels)
            if frame.origin == cv.IPL_ORIGIN_TL:
                cv.Copy(frame, frame_copy)
            else:
                cv.Flip(frame, frame_copy, 0)

            detect_and_draw(frame_copy, cascade)

            if cv.WaitKey(10) >= 0:
                break
Example #4
0
if __name__ == "__main__":
    motion = 0
    capture = 0

    if len(sys.argv) == 1:
        capture = cv.CreateCameraCapture(0)
    elif len(sys.argv) == 2 and sys.argv[1].isdigit():
        capture = cv.CreateCameraCapture(int(sys.argv[1]))
    elif len(sys.argv) == 2:
        capture = cv.CreateFileCapture(sys.argv[1])

    if not capture:
        print "Could not initialize capturing..."
        sys.exit(-1)

    cv.NamedWindow("Motion", 1)
    while True:
        image = cv.QueryFrame(capture)
        if (image):
            if (not motion):
                motion = cv.CreateImage((image.width, image.height), 8, 3)
                cv.Zero(motion)
                #motion.origin = image.origin
            update_mhi(image, motion, 30)
            cv.ShowImage("Motion", motion)
            if (cv.WaitKey(10) != -1):
                break
        else:
            break
    cv.DestroyWindow("Motion")
Example #5
0
 def capture(self):
     frame = cv.QueryFrame(self.cam)
Example #6
0
gray = cv.CreateImage((width, height), 8, 1)  # Will hold the current frame

prevPyr = cv.CreateImage((height / 3, width + 8), 8,
                         cv.CV_8UC1)  #Will hold the pyr frame at t-1
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)  # idem at t

max_count = 500
qLevel = 0.01
minDist = 10
prev_points = []  #Points at t-1
curr_points = []  #Points at t
lines = []  #To keep all the lines overtime

for f in xrange(nbFrames):

    frame = cv.QueryFrame(capture)  #Take a frame of the video

    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)  #Convert to gray
    output = cv.CloneImage(frame)

    prev_points = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel,
                                         minDist)  #Find points on the image

    #Calculate the movement using the previous and the current frame using the previous points
    curr_points, status, err = cv.CalcOpticalFlowPyrLK(
        prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3,
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

    #If points status are ok and distance not negligible keep the point
    k = 0
    for i in range(len(curr_points)):
Example #7
0
    def __init__(self):
        rospy.init_node('avi2ros', anonymous=True)
        
        self.input = rospy.get_param("~input", "")
        self.output = rospy.get_param("~output", "video_output")       
        self.fps = rospy.get_param("~fps", 25)
        self.loop = rospy.get_param("~loop", False)
        self.width = rospy.get_param("~width", "")
        self.height = rospy.get_param("~height", "")
        self.start_paused = rospy.get_param("~start_paused", False)
        self.show_viz = not rospy.get_param("~headless", False)
        self.show_text = True

        image_pub = rospy.Publisher(self.output, Image, queue_size=10)
        
        rospy.on_shutdown(self.cleanup)
        
        video = cv.CaptureFromFile(self.input)
        fps = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
        
        """ Bring the fps up to the specified rate """
        try:
            fps = int(fps * self.fps / fps)
        except:
            fps = self.fps
    
        if self.show_viz:
            cv.NamedWindow("AVI Video", True) # autosize the display
            cv.MoveWindow("AVI Video", 650, 100)

        bridge = CvBridge()
                
        self.paused = self.start_paused
        self.keystroke = None
        self.restart = False
        
        # Get the first frame to display if we are starting in the paused state.
        frame = cv.QueryFrame(video)
        image_size = cv.GetSize(frame)
        
        if self.width and self.height and (self.width != image_size[0] or self.height != image_size[1]):
            rospy.loginfo("Resizing! " + str(self.width) + " x " + str(self.height))
            resized_frame = cv.CreateImage((self.width, self.height), frame.depth, frame.channels)
            cv.Resize(frame, resized_frame)
            frame = cv.CloneImage(resized_frame)
                        
        text_frame = cv.CloneImage(frame)
        cv.Zero(text_frame)
    
        while not rospy.is_shutdown():
            """ Handle keyboard events """
            self.keystroke = cv.WaitKey(1000 / fps)

            """ Process any keyboard commands """
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'q':
                    """ user has press the q key, so exit """
                    rospy.signal_shutdown("User hit q key to quit.")
                elif cc == ' ':
                    """ Pause or continue the video """
                    self.paused = not self.paused
                elif cc == 'r':
                    """ Restart the video from the beginning """
                    self.restart = True
                elif cc == 't':
                    """ Toggle display of text help message """
                    self.show_text = not self.show_text
                
            if self.restart:
                #video = cv.CaptureFromFile(self.input)
                print "restarting video from beginning"
                cv.SetCaptureProperty(video, cv.CV_CAP_PROP_POS_AVI_RATIO, 0)
                self.restart = None
    
            if not self.paused:
                frame = cv.QueryFrame(video)
                if frame and self.width and self.height:
                    if self.width != image_size[0] or self.height != image_size[1]:
                        cv.Resize(frame, resized_frame)
                        frame = cv.CloneImage(resized_frame)
                
            if frame == None:
                if self.loop:
                    self.restart = True
            else:
                if self.show_text:
                    frame_size = cv.GetSize(frame)
                    text_font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.2, 1, 0, 1, 8)
                    cv.PutText(text_frame, "Keyboard commands:", (20, int(frame_size[1] * 0.6)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, " ", (20, int(frame_size[1] * 0.65)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "space - toggle pause/play", (20, int(frame_size[1] * 0.72)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     r - restart video from beginning", (20, int(frame_size[1] * 0.79)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     t - hide/show this text", (20, int(frame_size[1] * 0.86)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     q - quit the program", (20, int(frame_size[1] * 0.93)), text_font, cv.RGB(255, 255, 0))
                
                cv.Add(frame, text_frame, text_frame)
                if self.show_viz:
                    cv.ShowImage("AVI Video", text_frame)
                cv.Zero(text_frame)
                
                try:
                    test = np.asarray(frame[:,:])
                    publishing_image = bridge.cv2_to_imgmsg(test, "bgr8")
                    image_pub.publish(publishing_image)
                except CvBridgeError, e:
                    print e         
#main
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 5, 5, 0, 3, 8)  #initialize
SignsList = ["a.jpg", "b.jpg", "c.jpg", "d.jpg", "e.jpg",
             "f.jpg"]  # list which contain all images of signs
imagesList = {"a.jpg": cv.LoadImage("signs/a.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)}

for e in SignsList:
    imagesList[e] = cv.LoadImage("signs/" + e, cv.CV_LOAD_IMAGE_GRAYSCALE)
    #imagesList.append(cv.LoadImage("signs/"+e,cv.CV_LOAD_IMAGE_GRAYSCALE))
cv.NamedWindow("Input", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("Gesture Space", cv.CV_WINDOW_AUTOSIZE)
matchresult = 1
p_capWebcam = cv.CaptureFromCAM(0)
while 1:
    p_imgOriginal = cv.QueryFrame(p_capWebcam)
    cv.Flip(p_imgOriginal, p_imgOriginal, 1)

    # capture from webcam
    p_gray = cv.CreateImage(cv.GetSize(p_imgOriginal), 8, 1)
    cv.CvtColor(p_imgOriginal, p_gray, cv.CV_BGR2GRAY)
    cv.SetImageROI(p_gray, (400, 200, 200, 200))
    # Region setting of fixed interest
    cv.Threshold(p_gray, p_gray, 100, 255, cv.CV_THRESH_BINARY_INV)

    cv.Rectangle(p_imgOriginal, (400, 200), (600, 400), (255, 0, 0), 4)
    j = 0
    for imageI in imagesList:  # path of the image list and test each image with the ROI (region of interest)
        #image_to_test=cv.LoadImage("signs/"+image_path,cv.CV_LOAD_IMAGE_GRAYSCALE)
        matchresult = compare_2_formes(p_gray, imagesList[imageI])  #comparison
        #print("le match est "+str(matchresult))
Example #9
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "time taken for detection = %gms" % (
            t / (cv.GetTickFrequency() * 1000.))
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

        cv.ShowImage("video", img)

    if __name__ == '__main__':

        parser = OptionParser(
            usage="usage: %prog [options] [filename|camera_index]")
        parser.add_option(
            "-c",
            "-cascade",
            action="store",
            dest="cascade",
            type="str",
            help="Haar cascade file, default %default",
            default="../data/haarcascades/haarcascade_frontalface_alt.xml")(
                options, args) = parser.parse_args()

    cascade = cv.Load(options.cascade)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    input_name = args[0]
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("video", 1)

    #size of the video
    width = 160
    height = 120

    if width is None:
        width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
    while True:

        frame = cv.QueryFrame(capture)
        if not frame:
            cv.WaitKey(0)
            break
        if not frame_copy:
            frame_copy = cv.CreateImage((frame.width, frame.height),
                                        cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, frame_copy)
        else:
            cv.Flip(frame, frame_copy, 0)

    detect_and_draw(frame_copy, cascade)

    if cv.WaitKey(10) >= 0:
        break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)

    cv.DestroyWindow("video")
Example #10
0
#!/usr/bin/env python2

from cv2 import cv

cap = cv.CaptureFromCAM(0)
if cap:
    cv.NamedWindow("cam-test", cv.CV_WINDOW_AUTOSIZE)
    frame = cv.QueryFrame(cap)
    if frame:
        cv.ShowImage("cam-test", frame)
        cv.WaitKey(0)
        cv.SaveImage("cam-test.jpg", frame)
cv.DestroyWindow("cam-test")
Example #11
0
    def repeat():

        frame = cv.QueryFrame(capture)
        cv.ShowImage(":)", frame)
Example #12
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height #Surface area of the image
        cursurface = 0 #Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) #Remove false positives

            if not difference: #For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None) #Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            #Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18) #to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours #Save contours

            while contours: #For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (cursurface*100)/surface #Calculate the average of contour area on the total size
            if avg > self.ceil:
                print ("Something is moving !")
            #print avg,"%"
            cursurface = 0 #Put back the current surface to 0

            #Draw the contours on the image
            _red =  (0, 0, 255); #Red for external contours
            _green =  (0, 255, 0);# Gren internal contours
            levels=1 #1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours (color_image, backcontours,  _red, _green, levels, 2, cv.CV_FILLED)

            cv.ShowImage("Target", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
Example #13
0
def savePicture():
    fileName = '{0}.jpg'.format(
        str(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')))
    filePath = os.path.join(picturesDir, fileName)
    img = cv.QueryFrame(capture)
    cv.SaveImage(filePath, img)
    def run(self):
        started = time.time()
        menu=np.zeros((500,600,3), np.uint8)
        menu[5:492,7 :592] = (0, 100, 220)
        cv2.putText(menu, "MENU", (195, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255),2)
        cv2.putText(menu, "_____", (190, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (70, 70, 70), 2)
        cv2.putText(menu, "Button 0 : Number", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        cv2.putText(menu, " 1: One  , 2: Two  , 3: Three", (10, 120), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)
        cv2.putText(menu, "      4: Four  , 5: Five", (10, 140), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)

        cv2.putText(menu, "Button 1 : OS Commands", (10, 170), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        cv2.putText(menu, " 1: Play Music   , 2: Open Web  , 3: Kill Processes", (10, 190), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)
        cv2.putText(menu, "    4: Open Text Files  , 5: Open Image Files", (10, 210), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)

        cv2.putText(menu, "Button 2 : Sentences", (10, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        cv2.putText(menu, " 1: Hey  , 2: How r u ?   , 3: I am Fine !", (10, 260), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)
        cv2.putText(menu, "          4: See u  , 5: Bye", (10, 280), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)

        cv2.putText(menu, "Button 3 : Animations", (10, 310), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        cv2.putText(menu, " 1: Horizontal Motion  , 2: Vertical Motion  , 3: Diagonal Motion", (10, 330), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)
        cv2.putText(menu, "         4: Fast Motion  , 5: Slow Motion", (10, 350), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)

        #cv2.putText(menu, "Button 4 : Arduino", (10, 380), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
        #cv2.putText(menu, " 1: LED On  , 2: LED Off  , 3: Obstacle Detection", (10, 400), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)
        #cv2.putText(menu, "      4: Not Defined  , 5: Not Defined", (10, 420), cv2.FONT_ITALIC, 0.5, (255, 255, 255), 1)

        cv2.imshow('Menu', menu)
        cv2.moveWindow('Menu',0,0)
        c_one = 1
        c_two = 1
        c_three = 1
        c_four = 1
        c_zero = 1
        c_check = 7
        c_button=0
        c_reac_time=0
        path = 'E:\project_main\shanta'
        files = os.listdir(path)
        coun=0
        print "Gesture Recognizer Started :\n Button 0"
        process=('notepad.exe','WINWORD.EXE','IEXPLORE.exe','ois.exe','dllhost.exe','excel.exe','mpc-hc.exe','wmplayer.exe','wordpad.exe','calc.exe','powerpnt.exe','firefox.exe')
        fle = open('abc.txt', 'w')
        print >> fle, "----------------------------------OUTPUT OF THE GESTURES-----------------------------------"
        print >> fle,"---Instructions---"
        print >> fle, "Button 0 : Number"
        print >> fle, "Button 1 : OS Commands"
        print >> fle, "Button 2 : Sentences"
        print >> fle, "Button 3 : Animations"
        #print >> fle, "Button 4 : Arduino"
        print >> fle, "--------------------------------------------------------------------------------------------"
        print >> fle, "-------------------------------------------OUTPUT-------------------------------------------"
        print >> fle, "Button No. 0"
        str = ""

        while True:

            #######
            #######
            #######
            ret, img = cap.read()
            cv2.rectangle(img, (400, 400), (100, 100), (0, 255, 0), 0)
            crop_img = img[100:400, 100:400]
            grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
            value = (35, 35)
            blurred = cv2.GaussianBlur(grey, value, 0)
            _, thresh1 = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
            #cv2.imshow('Thresholded', thresh1)

            contours, hierarchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

            cnt = max(contours, key=lambda x: cv2.contourArea(x))

            x, y, w, h = cv2.boundingRect(cnt)
            cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 0)
            hull = cv2.convexHull(cnt)
            #drawing = np.zeros(crop_img.shape, np.uint8)
            #cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
            #cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 0)
            hull = cv2.convexHull(cnt, returnPoints=False)

            count_defects = 0
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
            defects = cv2.convexityDefects(cnt, hull)
            for i in range(cv2.convexityDefects(cnt, hull).shape[0]):
                s, e, f, d = defects[i, 0]
                start = tuple(cnt[s][0])
                end = tuple(cnt[e][0])
                far = tuple(cnt[f][0])
                a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
                b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
                c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
                angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 57
                if angle <= 90:
                    count_defects += 1
                    cv2.circle(crop_img, far, 1, [0, 0, 255], -1)
                # dist = cv2.pointPolygonTest(cnt,far,True)
                cv2.line(crop_img, start, end, [0, 255, 0], 2)
                # cv2.circle(crop_img,far,5,[0,0,255],-1)
 
            c_reac_time=c_reac_time+1 
            if count_defects == 1:

                cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                c_one = c_one + 1
                c_two = c_three = c_four = c_zero = 1
                if (c_one == c_check):

                    if(c_button==0):
                        str = "two"
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    elif(c_button==1):
                        wb.open("www.rknec.edu")
                        str="Opened web"
                    elif(c_button==2):
                        str="How r u ?"
                    elif (c_button == 3):
                        tk = Tk()
                        xspeed=0
                        yspeed=3
                        canvas = Canvas(tk, width=WIDTHH, height=HEIGHTT)
                        tk.title("BALL ANIMATION")
                        canvas.pack()
                        coun = 1
                        ball = canvas.create_oval(10, 10, 60, 60, fill="blue")
                        while True:
                            try:
                                coun = coun + 1
                                if (coun == 500):
                                    tk.destroy()
                                    break
                                canvas.move(ball, xspeed, yspeed)
                                pos = canvas.coords(ball)
                                if pos[3] >= HEIGHTT or pos[1] <= 0:
                                    yspeed = -yspeed
                                # if pos[2] >= WIDTHH or pos[0] <= 0:
                                #     xspeed = -xspeed
                                tk.update()
                                time.sleep(0.01)

                            except Exception as e:
                                break

                        str = "Played Vertical Animation"
                        coun=0
                    cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    speaker.Speak(str)
                    print >> fle, str


            elif count_defects == 2:

                cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                c_two = c_two + 1
                c_three = c_one = c_four = c_zero = 1
                if (c_two == c_check):

                    if (c_button == 0):
                        str = "Three"
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
                    elif(c_button==1):
                        str = "Killed No Processes"
                        for i in process:
                            if (pc.processExists(i)):
                                 os.system("taskkill /f /im " + i)
                                 str = "Killed Process : " + i
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 1)
                    elif(c_button==2):
                        str="i m fine"

                    elif(c_button==3):
                        tk = Tk()
                        xspeed=3
                        yspeed=3
                        canvas = Canvas(tk, width=WIDTHH, height=HEIGHTT)
                        tk.title("BALL ANIMATION")
                        canvas.pack()
                        coun = 1
                        ball = canvas.create_oval(10, 10, 60, 60, fill="blue")
                        while True:
                            try:
                                coun=coun+1
                                if(coun==500):
                                    tk.destroy()
                                    break
                                canvas.move(ball, xspeed, yspeed)
                                pos = canvas.coords(ball)
                                if pos[3] >= HEIGHTT or pos[1] <= 0:
                                    yspeed = -yspeed
                                if pos[2] >= WIDTHH or pos[0] <= 0:
                                    xspeed = -xspeed
                                tk.update()
                                time.sleep(0.01)

                            except Exception as e:
                                break

                        str="Played Diagonal Animation"
                        coun=0
                    cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 1)
                    speaker.Speak(str)
                    print >> fle, str

            elif count_defects == 3:

                cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                c_three = c_three + 1
                c_two = c_one = c_four = c_zero = 1
                if (c_three == c_check):

                    if (c_button == 0):
                        str = "Four"
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    elif(c_button==1):
                        files_txt = [i for i in files if i.endswith('.txt')]
                        for i in files_txt:
                            os.startfile( path + "\\" + i)
                        str="Opened text files"
                    elif(c_button==2):
                        str="see u"
                    elif (c_button == 3):
                        tk = Tk()
                        yspeed=7
                        xspeed=7
                        canvas = Canvas(tk, width=WIDTHH, height=HEIGHTT)
                        tk.title("BALL ANIMATION")
                        canvas.pack()
                        coun = 1
                        ball = canvas.create_oval(10, 10, 60, 60, fill="blue")
                        while True:
                            try:
                                coun = coun + 1
                                if (coun == 500):
                                    tk.destroy()
                                    break
                                canvas.move(ball, xspeed, yspeed)
                                pos = canvas.coords(ball)
                                if pos[3] >= HEIGHTT or pos[1] <= 0:
                                     yspeed = -yspeed
                                if pos[2] >= WIDTHH or pos[0] <= 0:
                                    xspeed = -xspeed
                                tk.update()
                                time.sleep(0.01)

                            except Exception as e:
                                break

                        str = "Played Fast Animation"
                        coun=0

                    cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    speaker.Speak(str)
                    print >> fle, str

            elif count_defects == 4:

                cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                c_four = c_four + 1
                c_two = c_three = c_one = c_zero = 1
                if (c_four == c_check):

                    if (c_button == 0):
                        str = "Five"
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    elif (c_button == 1):
                        for i in files:
                            if i.endswith('.JPG'):
                                imgs = PIL.Image.open(path + "\\" + i)
                                imgs.show()

                        str = "Opened image files"

                    elif(c_button==2):
                        str="bye"
                    elif (c_button == 3):
                        tk = Tk()
                        yspeed=1
                        xspeed=1
                        canvas = Canvas(tk, width=WIDTHH, height=HEIGHTT)
                        tk.title("BALL ANIMATION")
                        canvas.pack()
                        coun = 1
                        ball = canvas.create_oval(10, 10, 60, 60, fill="blue")
                        while True:
                            try:
                                coun = coun + 1
                                if (coun == 500):
                                    tk.destroy()
                                    break
                                canvas.move(ball, xspeed, yspeed)
                                pos = canvas.coords(ball)
                                if pos[3] >= HEIGHTT or pos[1] <= 0:
                                     yspeed = -yspeed
                                if pos[2] >= WIDTHH or pos[0] <= 0:
                                    xspeed = -xspeed
                                tk.update()
                                time.sleep(0.01)

                            except Exception as e:
                                break

                        str = "Played Slow Animation"
                        coun=0

                    cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    speaker.Speak(str)
                    print >> fle, str

            elif count_defects == 0:

                cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                c_zero = c_zero + 1
                c_two = c_three = c_four = c_one = 1
                if (c_zero == c_check):

                    if (c_button == 0):
                        str = "one"
                        cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                    elif(c_button==1):
                        os.system('start ratta.mp3')
                        str = "Playing music"
                    elif(c_button==2):
                        str="Hey"
                    elif (c_button == 3):
                        tk = Tk()
                        yspeed=0
                        xspeed=5
                        canvas = Canvas(tk, width=WIDTHH, height=HEIGHTT)
                        tk.title("BALL ANIMATION")
                        canvas.pack()
                        coun = 1
                        ball = canvas.create_oval(10, 10, 60, 60, fill="blue")
                        while True:
                            try:
                                coun = coun + 1
                                if (coun == 500):
                                    tk.destroy()
                                    break
                                canvas.move(ball, xspeed, yspeed)
                                pos = canvas.coords(ball)
                                # if pos[3] >= HEIGHTT or pos[1] <= 0:
                                #     yspeed = -yspeed
                                if pos[2] >= WIDTHH or pos[0] <= 0:
                                    xspeed = -xspeed
                                tk.update()
                                time.sleep(0.01)

                            except Exception as e:
                                break

                        str = "Played Horizontal Animation"
                        coun=0



                    cv2.putText(img, str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 1)
                    speaker.Speak(str)
                    print >>fle, str


            img[1:100,540:640]=(0,0,255)
            cv2.imshow('Gesture', img)
            cv2.moveWindow('Gesture',650,0)
            #all_img = np.hstack((drawing, crop_img))
            #cv2.imshow('Contours', all_img)
            #####
            #####
            #####



            curframe1 = cv.QueryFrame(self.capture)
            curframe=curframe1[1:100,540:640]


            instant = time.time()  # Get timestamp o the frame

            self.processImage(curframe)  # Process the image

    #        if not self.isRecording:
            if self.somethingHasMoved():
                    self.trigger_time = instant  # Update the trigger_time
                    if instant > started + 5:  # Wait 5 second after the webcam start for luminosity adjusting etc..
                        if c_reac_time>=10:
                            c_button=np.mod(c_button+1,4)
                            c_reac_time=0
                            print datetime.now().strftime("%b %d, %H:%M:%S"), "Button No. " ,c_button
                            print >> fle, datetime.now().strftime("%b %d, %H:%M:%S"), "Button No. " ,c_button


            cv.Copy(self.frame2gray, self.frame1gray)
            c = cv.WaitKey(1) % 0x100
            if c == 27 :
                break
Example #15
0
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_eye.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = np.asarray(cv.GetMat(grey_image))
            non_black_coords_array = np.where(grey_image_as_array > 3)
            # Convert from np.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = np.array(non_black_coords_array, dtype='f')
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            size = cv.GetSize(image)
            large = cv.CreateImage(
                (int(size[0] * display_ratio), int(size[1] * display_ratio)),
                image.depth, image.nChannels)
            cv.Resize(image, large, interpolation=cv2.INTER_CUBIC)
            cv.ShowImage("Target", large)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
Example #16
0
    def OnPaint(self, evt):
        if not self.timer.IsRunning():
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            return

        # Capture de l'image
        frame = cv.QueryFrame(CAMERA)
        cv.CvtColor(frame, frame, cv.CV_BGR2RGB)
        Img = wx.EmptyImage(frame.width, frame.height)
        Img.SetData(frame.tostring())
        self.bmp = wx.BitmapFromImage(Img)
        width, height = frame.width, frame.height

        # Détection des visages
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0

        gray = cv.CreateImage((frame.width, frame.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(
            frame.width / image_scale), cv.Round(frame.height / image_scale)),
                                   8, 1)
        cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        listeVisages = cv.HaarDetectObjects(small_img, CASCADE,
                                            cv.CreateMemStorage(0), haar_scale,
                                            min_neighbors, haar_flags,
                                            min_size)

        # Affichage de l'image
        x, y = (0, 0)
        try:
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            try:
                dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            except:
                pass
            dc.Clear()
            dc.DrawBitmap(self.bmp, x, y)

            # Dessin des rectangles des visages
            if listeVisages:
                for ((x, y, w, h), n) in listeVisages:
                    dc.SetBrush(wx.TRANSPARENT_BRUSH)
                    dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2))
                    dc.DrawRectangle(x * image_scale, y * image_scale,
                                     w * image_scale, h * image_scale)

            self.listeVisages = listeVisages
            del dc
            del Img

        except TypeError:
            pass
        except wx.PyDeadObjectError:
            pass
Example #17
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        print "hitting run section"
        x = 0
        while True:
            #print x
            #x = x + 1
            frame = cv.QueryFrame(self.capture)
            cv.Flip(frame, frame, 1)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                print self.track_window
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect
                print self.track_window
            try:
                #prints the center x and y value of the tracked ellipse
                coord = track_box[0]
                print "center = {}".format(coord)
                if (coord[0] < 320):
                    print "move right"
                # ser.write("R")
                elif (coord[0] == 320):
                    print "do nothing"
                else:
                    print "move left"
                # ser.write("L")
            except UnboundLocalError:
                print "track_box is None"

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                print track_box
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Example #18
0
#! /usr/bin/env python

import cv2.cv as cv

cap = cv.CreateFileCapture("../c/tree.avi")
img = cv.QueryFrame(cap)
print "Got frame of dimensions (", img.width, " x ", img.height, ")"

cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage("win", img)
cv.MoveWindow("win", 200, 200)
cv.WaitKey(0)
Example #19
0
import cv2.cv as cv
import numpy

if __name__ == "__main__":
    cv.NamedWindow("camera", 1)

    capture = cv.CaptureFromCAM(0)

    paste = cv.CreateMat(960, 1280, cv.CV_8UC3)
    topleft = numpy.asarray(cv.GetSubRect(paste, (0, 0, 640, 480)))
    topright = numpy.asarray(cv.GetSubRect(paste, (640, 0, 640, 480)))
    bottomleft = numpy.asarray(cv.GetSubRect(paste, (0, 480, 640, 480)))
    bottomright = numpy.asarray(cv.GetSubRect(paste, (640, 480, 640, 480)))

    while True:
        img = cv.GetMat(cv.QueryFrame(capture))

        n = (numpy.asarray(img)).astype(numpy.uint8)

        red = n[:, :, 0]
        grn = n[:, :, 1]
        blu = n[:, :, 2]

        topleft[:, :, 0] = 255 - grn
        topleft[:, :, 1] = red
        topleft[:, :, 2] = blu

        topright[:, :, 0] = blu
        topright[:, :, 1] = 255 - red
        topright[:, :, 2] = grn
x_co = 0
y_co = 0


def on_mouse(event, x, y, flag, param):
    global x_co
    global y_co
    if (event == cv.CV_EVENT_MOUSEMOVE):
        x_co = x
        y_co = y


cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
# capture=cv.CaptureFromFile("http://192.168.1.2:8080/videofeed?dummy=file.mjpeg")
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
while True:
    src = cv.QueryFrame(capture)
    cv.Smooth(src, src, cv.CV_BLUR, 3)
    hsv = cv.CreateImage(cv.GetSize(src), 8, 3)
    thr = cv.CreateImage(cv.GetSize(src), 8, 1)
    cv.CvtColor(src, hsv, cv.CV_BGR2HSV)
    cv.SetMouseCallback("camera", on_mouse, 0)
    s = cv.Get2D(hsv, y_co, x_co)
    print "H:", s[0], "      S:", s[1], "       V:", s[2]
    cv.PutText(src,
               str(s[0]) + "," + str(s[1]) + "," + str(s[2]), (x_co, y_co),
               font, (55, 25, 255))
    cv.ShowImage("camera", src)
    if cv.WaitKey(10) == 27:
        break
Example #21
0
file_num = 1
input = ['airport', 'hall', 'office', 'pedestrian', 'smoke']
video = 'data/noshake_static/' + input[file_num] + '/input.avi'
capture = cv.CaptureFromFile(video) # 从文件获取图片cap

video_dynamic = 'data/noshake_dynamic/waterSurface/input.avi'
# capture = cv.CaptureFromFile(video_dynamic)

# video_shake = 'data/shake/people2/input.avi'
# capture = cv.CaptureFromFile(video_shake)

video_campus = 'data/Campus.avi'
video_shake = 'data/shake/people2/input.avi'
# capture = cv.CaptureFromFile(video)

frame1 = cv.QueryFrame(capture)

# 获得视频码率及尺寸
nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC)
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
duration = (nbFrames * fps) / 1000 # 时长计算
print 'Num. Frames = ', nbFrames # frame为帧数,frames为总帧数
print 'Frame Rate = ', fps, 'fps' # fps为文件的帧率
print 'Duration = ', duration, 'sec'
print 'codec = ', codec

# 定义输出
out_list = ['airport', 'hall', 'office', 'pedestrian', 'smoke']
Example #22
0
'''
@author Jeremy Barr
@date 5/1/2013
@brief sample script to test camera with OpenCV
'''

import cv2.cv as cv
import time

cv.NamedWindow("webcam", cv.CV_WINDOW_AUTOSIZE)
capture = cv.CaptureFromCAM(0)

cv.ShowImage("webcam", cv.QueryFrame(capture))

print "Press any key or Wait 5 seconds..."
# pause five seconds
cv.WaitKey(5000)
Example #23
0
def parseVedio(_,filename,Id,url):

    def ifFace(img,size):
        gray=cv.CreateImage(size,8,1)
        cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
        newMem1=cv.CreateMemStorage(0)
        newMem2=cv.CreateMemStorage(0)
        newMem3=cv.CreateMemStorage(0)
        cv.EqualizeHist(gray,gray)
        face=cv.HaarDetectObjects(gray,c_f,newMem1,1.2,3,cv.CV_HAAR_DO_CANNY_PRUNING,(50,50))
        mouth=cv.HaarDetectObjects(gray,c_m,newMem2,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(10,10))
        body=cv.HaarDetectObjects(gray,c_m,newMem3,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(100,100))
        if face and mouth or body:
            cv.SaveImage("img/out.jpg",img)
            return 1
        else:
            return 0

    capture=cv.CaptureFromFile(filename)
    width=cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH)
    height=cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT)
    size=(int(width),int(height))
    fps=15
    i=0
    count=[0]

    def scanFaces(src):
        total=0
        c=cv.CloneImage(src)
        frams=[]
        frams.append(src)  # 原图

        cv.Flip(c,None,0) 
        frams.append(c)  # 水平翻转后的

        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(src,dst)
        cv.Flip(dst,None,0)
        frams.append(dst) # 逆时针90
        
        c2=cv.CloneImage(src)
        cv.Flip(c2,None,0) 
        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(c2,dst)
        frams.append(dst) # 顺时针90

        for i,img in enumerate(frams):
            count[0]+=ifFace(img,(img.width,img.height))

        if count[0]>=15:
            return True
        else:
            return False
            

    while True:
        img=cv.QueryFrame(capture)
        if not img:break
        if int((i+1)%fps)==0:
            if scanFaces(img):
                mess="%s:有脸"%filename
                yesfd.write("%s %s\n"%(Id,url))
                yesfd.flush()
                print mess
                return None
        i+=1
    mess="%s:无脸"%filename
    nofd.write("%s %s\n"%(Id,url))
    nofd.flush()
    print mess
Example #24
0
posy = 0


def getthresholdedimg(im):
    '''this function take RGB image.Then convert it into HSV for easy colour detection and threshold it with yellow part as white and all other regions as black.Then return that image'''
    imghsv = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(im, imghsv, cv.CV_BGR2HSV)  # Convert image from RGB to HSV
    imgthreshold = cv.CreateImage(cv.GetSize(im), 8, 1)
    cv.InRangeS(imghsv, cv.Scalar(20, 100, 100), cv.Scalar(30, 255, 255),
                imgthreshold)  # Select a range of yellow color
    return imgthreshold


capture = cv.CaptureFromCAM(0)
sleep(5)
frame = cv.QueryFrame(capture)
frame_size = cv.GetSize(frame)
grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("Real")
cv.NamedWindow("Threshold")
while (1):
    color_image = cv.QueryFrame(capture)
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.Flip(color_image, color_image, 1)
    cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
    imgyellowthresh = getthresholdedimg(color_image)
    cv.Erode(imgyellowthresh, imgyellowthresh, None, 3)
    cv.Dilate(imgyellowthresh, imgyellowthresh, None, 10)

    storage = cv.CreateMemStorage(0)
Example #25
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        i = 1
        o_x = 0
        o_y = 0
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)
                #print track_box
                trace_val = track_box[0]
                f_x = trace_val[0]
                f_y = trace_val[1]
                print 'value1', f_x
                print 'value2', f_y
                if i % 10 == 0:
                    o_x = f_x
                    o_y = f_y
                if (f_x != o_x):
                    a = (f_x - o_x) / float(10)
                    round(a)
                    cam.Azimuth(-a)
                if (f_y != o_y):
                    a = (f_y - o_y) / float(10)
                    round(a)
                    cam.Elevation(-a)
                ren1.ResetCameraClippingRange()
                renWin.Render()
                i += 1

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Example #26
0
    def sd_loop(self):
        """
        The main seizure detector loop - call this function to start
        the seizure detector.
        """
        self.timeSeries = []  # array of times that data points were collected.
        self.maxFreq = None
        if (self.X11):
            cv.NamedWindow('Seizure_Detector', cv.CV_WINDOW_AUTOSIZE)
            cv.CreateTrackbar('FeatureTrackbar', 'Seizure_Detector', 0,
                              self.MAX_COUNT, self.onTrackbarChanged)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8)

        # Intialise the video input source
        # ('camera' - may be a file or network stream though).
        #camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp")
        #camera = cv.CaptureFromFile("../testcards/testcard.mpg")
        #camera = cv.CaptureFromFile("/home/graham/laura_sample.mpeg")
        camera = cv.CaptureFromCAM(0)

        # Set the VideoWriter that produces the output video file.
        frameSize = (640, 480)
        videoFormat = cv.FOURCC('p', 'i', 'm', '1')
        # videoFormat = cv.FOURCC('l','m','p','4')
        vw = cv.CreateVideoWriter(self.videoOut, videoFormat, self.outputfps,
                                  frameSize, 1)
        if (vw == None):
            print "ERROR - Failed to create VideoWriter...."

        # Get the first frame.
        last_analysis_time = datetime.datetime.now()
        last_feature_search_time = datetime.datetime.now()
        last_frame_time = datetime.datetime.now()
        frame = cv.QueryFrame(camera)

        print "frame="
        print frame

        # Main loop - repeat forever
        while 1:
            # Carry out initialisation, memory allocation etc. if necessary
            if self.image is None:
                self.image = cv.CreateImage(cv.GetSize(frame), 8, 3)
                self.image.origin = frame.origin
                grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
                prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
                pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
                prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
                # self.features = []

            # copy the captured frame to our self.image object.
            cv.Copy(frame, self.image)

            # create a grey version of the image
            cv.CvtColor(self.image, grey, cv.CV_BGR2GRAY)

            # Look for features to track.
            if self.need_to_init:
                #cv.ShowImage ('loop_grey',grey)
                self.initFeatures(grey)
                self.timeSeries = []
                self.maxFreq = None
                last_analysis_time = datetime.datetime.now()
                self.need_to_init = False

            # Now track the features, if we have some.
            if self.features != []:
                # we have points to track, so track them and add them to
                # our time series of positions.
                self.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                    prev_grey, grey, prev_pyramid, pyramid, self.features,
                    (self.win_size, self.win_size), 3,
                    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03),
                    self.flags)
                self.timeSeries.append((last_frame_time, self.features))
                print "Features..."
                for featNo in range(len(self.features)):
                    if (status[featNo] == 0):
                        self.features[featNo] = (-1, -1)
                    print status[featNo], self.features[featNo]
                # and plot them.
                for featNo in range(len(self.features)):
                    pointPos = self.features[featNo]
                    cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])),
                              3, (0, 255, 0, 0), -1, 8, 0)
                    if (self.alarmActive[featNo] == 2):
                        cv.Circle(self.image,
                                  (int(pointPos[0]), int(pointPos[1])), 10,
                                  (0, 0, 255, 0), 5, 8, 0)
                    if (self.alarmActive[featNo] == 1):
                        cv.Circle(self.image,
                                  (int(pointPos[0]), int(pointPos[1])), 10,
                                  (0, 0, 255, 0), 2, 8, 0)

                    # there will be no maxFreq data until we have
                    # run doAnalysis for the first time.
                    if (not self.maxFreq == None):
                        msg = "%d-%3.1f" % (featNo, self.maxFreq[featNo])
                        cv.PutText(
                            self.image, msg,
                            (int(pointPos[0] + 5), int(pointPos[1] + 5)), font,
                            (255, 255, 255))
                # end of for loop over features
            else:
                #print "Oh no, no features to track, and you haven't told me to look for more."
                # no features, so better look for some more...
                self.need_to_init = True

            # Is it time to analyse the captured time series.
            if ((datetime.datetime.now() - last_analysis_time).total_seconds()
                    > self.Analysis_Period):
                if (len(self.timeSeries) > 0):
                    self.doAnalysis()
                    self.doAlarmCheck()
                    last_analysis_time = datetime.datetime.now()
                else:
                    # print "Not doing analysis - no time series data..."
                    a = True

            # Is it time to re-acquire the features to track.
            if ((datetime.datetime.now() -
                 last_feature_search_time).total_seconds() >
                    self.Feature_Search_Period):
                print "resetting..."
                last_feature_search_time = datetime.datetime.now()
                self.need_to_init = True

            # save current data for use next time around.
            prev_grey, grey = grey, prev_grey
            prev_pyramid, pyramid = pyramid, prev_pyramid

            # we can now display the image
            if (self.X11): cv.ShowImage('Seizure_Detector', self.image)
            cv.WriteFrame(vw, self.image)

            # handle events
            c = cv.WaitKey(10)
            if c == 27:
                # user has press the ESC key, so exit
                break

            # Control frame rate by pausing if we are going too fast.
            frameTime = (datetime.datetime.now() - last_frame_time)\
                .total_seconds()
            actFps = 1.0 / frameTime
            if (frameTime < 1 / self.inputfps):
                cv.WaitKey(1 + int(1000. * (1. / self.inputfps - frameTime)))

            # Grab the next frame
            last_frame_time = datetime.datetime.now()
            frame = cv.QueryFrame(camera)
Example #27
0
i = 0

# initialisation des paramètres pour la capture
camera = PiCamera()
camera.resolution = (1920, 1080)
camera.framerate = 32
camera.rotation = 180

# temps réservé pour l'autofocus
time.sleep(0.1)
camera.start_preview()
while True:
    key = cv2.waitKey(1) & 0xFF
    if key == ord("s"):
        camera.stop_preview()
    if key == "c":
        camera.capture('/home/pi/Desktop/image' + str(i) + '.jpg')
        i += 1

import cv2.cv as cv

if __name__ == '__main__':
    ma_caméra = cv.CaptureFromCAM(0)
    cv.NamedWindow("Test_Webcam")
    while True:
        ma_frame = cv.QueryFrame(ma_caméra)
        cv.ShowImage("Test_Webcam", ma_frame)

        if (cv.WaitKey(10) % 0x100) == 113:
            break
def ActivateCamera():
    capture = cv.CaptureFromCAM(0)

    # decalre the zbar_setter as zbar scanner
    zbar_scanner = zbar.ImageScanner()
    checkScan = False;
    while (checkScan == False):
        # create the variable as a frame of camera
        img = cv.QueryFrame(capture)
        height = int(img.height)
        width = int(img.width)

        SubRect = cv.GetSubRect(img, (1, 1, width - 1, height - 1))

        # cv.Rectangle(img,(0,0),(width,height),(255,0,0))

        # to create the image
        set_image = cv.CreateImage((SubRect.width, SubRect.height), cv.IPL_DEPTH_8U, 1)

        cv.ConvertImage(SubRect, set_image)

        image = zbar.Image(set_image.width, set_image.height, 'Y800', set_image.tostring())

        zbar_scanner.scan(image)

        for item in image:

            getFromScan = item.data

            splitString = getFromScan.split(":")
            checkProductName = splitString[0]
            priceOfdata = splitString[1]
            checkExpirydate = splitString[2]
            itemCode = splitString[3]
            
            data = {'price': splitString[1] , 'Code': splitString[3], 'name': splitString[0], 'expiryDate': splitString[2], 'Checkin': str(datetime.now())}
            checkDuplicate = False
            counterOuter = 0
            """"""
            # 1.1
            for i in itemlist:

                # 2.1 same product name
                if i['name'] == checkProductName:
                    print "have same product name"
                    checkDuplicate = True
                    # 3.1 same expiryDate
                    if i['expiryDate'] == checkExpirydate:
                        print "have same expiryDate"
                       
                        print i['Code']
                        list_of_ItemCode = CreateArrayOfKeyDict(json.dumps(i['Code']))
                        list_of_Checkin = CreateArrayOfKeyDict(json.dumps(i['Checkin']))
                        counterInner = 0
                        for itemID in listOfCode:
                            
                            print 'U r in the new for loop'
                            # 4.1 itemCode(itemID) is duplicate
                            print 'itemID is ' + itemID
                            print 'itemCode is ' + itemCode
                            if itemID == itemCode:

                                # 5.1 array of Code is equal 1
                                if len(listOfCode) == 1:
                                    # remove statement

                                    print 'U are in the code is duplicate condition'

                                    checkDuplicate = True
                                    removeItemKey = itemlist_key[counterOuter]
                                    print removeItemKey
                                    forRemoveDupCode = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                                    forRemoveDupCode.child(removeItemKey).delete()

                                    # create new data
                                    checkoutData = data = {'price': splitString[1], 'Code': itemCode, 'name': checkProductName,
                                                           'expiryDate': checkExpirydate,
                                                           'Checkin': list_of_Checkin[counterInner],
                                                           'Checkout': str(datetime.now()),
                                                           'Province' : 'Chiang Mai'}
                                    # add to usage database
                                    usageItem = firebase.ref('Usage')
                                    usageItem.push(checkoutData)

                                    itemlist.pop(counterOuter)
                                    itemlist_key.pop(counterOuter)

                                    print ("Successful for removing item" + getFromScan)
                                    microgear.chat("outdoor/temp", json.dumps(itemlist))
                                    time.sleep(5)
                                    return True
                                # 5.2 array of Code is greater than 1
                                else:
                                    # delete item code from dict and update
                                    # update statement

                                    print 'Code  duplicate but length is greater than 1 condition'

                                    checkDuplicate = True
                                    removeItemKey = itemlist_key[counterOuter]
                                    print removeItemKey

                                    # update item code
                                    list_of_ItemCode.remove(itemID)
                                    # getting new values to update data in firebase
                                    newCode5 = ReplaceValuesInDict(json.dumps(list_of_ItemCode))
                                    # update values in firebase
                                    forRemoveExCode = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                                    forRemoveExCode.child(removeItemKey).child('Code').set(newCode5)
                                    # update local database
                                    i['Code'] = UpdateNewValuesDictKey(list_of_ItemCode)

                                    # create new data
                                    checkoutData = data = {'price': splitString[1], 'Code': itemCode, 'name': checkProductName,
                                                           'expiryDate': checkExpirydate, 'Checkin': list_of_Checkin[counterInner],
                                                           'Checkout': str(datetime.now()),
                                                           'Province' : 'Chiang Mai'}
                                    # update check in
                                    list_of_Checkin.pop(counterInner)
                                    updateCheckinValues = ReplaceValuesInDict(json.dumps(list_of_Checkin))
                                    forupdateNewCheckin = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                                    forupdateNewCheckin.child(removeItemKey).child('Checkin').set(updateCheckinValues)

                                    i['Checkin'] = UpdateNewValuesDictKey(list_of_Checkin)

                                    # add to usage database

                                    usageItem = firebase.ref('Usage')
                                    usageItem.push(checkoutData)

                                    print ("Successful for removing item Code")

                                    microgear.chat("outdoor/temp", json.dumps(itemlist))
                                    time.sleep(5)
                                    return True
                            counterInner += 1
                        # 4.2 itemCode(itemID) is not duplicate
                        else:
                            # update statement with append new itemCode into existing dict
                            print 'update'
                            checkDuplicate = True
                            removeItemKey = itemlist_key[counterOuter]

                            # update item code
                            # add new code to list
                            list_of_ItemCode.append(itemCode)
                            # getting new values to update data in firebase
                            newCode5 = ReplaceValuesInDict(json.dumps(list_of_ItemCode))
                            # update values in firebase
                            forupdateNewCode = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                            forupdateNewCode.child(removeItemKey).child('Code').set(newCode5)
                            # update local database
                            i['Code'] = UpdateNewValuesDictKey(list_of_ItemCode)

                            # update check in
                            # add new check in time to list
                            list_of_Checkin.append(data['Checkin'])
                            # getting new values to update data in firebase
                            newCodeForCheckin = ReplaceValuesInDict(json.dumps(list_of_Checkin))
                            # update values in firebase
                            forupdateNewCheckin = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                            forupdateNewCheckin.child(removeItemKey).child('Checkin').set(newCodeForCheckin)
                            # update local database
                            i['Checkin'] = UpdateNewValuesDictKey(list_of_Checkin)

                            microgear.chat("outdoor/temp", json.dumps(itemlist))
                            time.sleep(5)
                            return True


                    # 3.2 expiry date is not duplicate
                    else:
                        # add statement
                        foraddnewDate = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')

                        NewItemKey = foraddnewDate.push(data)
                        print("Successful for adding new date")
                        abcd = json.dumps(NewItemKey.values())
                        pkpk = re.sub('[^a-zA-Z_0-9-]+', '', abcd)
                        itemlist.append(data)
                        itemlist_key.append(pkpk)

                        microgear.chat("outdoor/temp", json.dumps(itemlist))
                        time.sleep(5)
                        return True
                counterOuter += 1

            # 1.2 if checkDuplicate == false it will add the new one
            if checkDuplicate == False:
                # add statement
                print 'add new item'
                foraddNewitemRef = firebase.ref('CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')

                NewItemKey = foraddNewitemRef.push(data)
                print("Successful for adding new item")
                abcd = json.dumps(NewItemKey.values())
                pkpk = re.sub('[^a-zA-Z_0-9-]+', '', abcd)
                itemlist.append(data)
                itemlist_key.append(pkpk)

                microgear.chat("outdoor/temp", json.dumps(itemlist))
                time.sleep(5)
                return True

        #cv.ShowImage("ISR Scanner", img)

        # less for fast video rendering
        cv.WaitKey(1)
Example #29
0
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
    img = cv.QueryFrame(capture)
    cv.ShowImage("camera", img)
    if cv.WaitKey(10) == 27:
        break
Example #30
0
def main():
    hc = cv.Load(face_cascade)
    cv.NamedWindow("camera", 1)
    capture = cv.CaptureFromCAM(0)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FORMAT, cv.IPL_DEPTH_32F)

    print "Press ESC to exit"
    print "capture image with keys 1 to 7"
    print "1: Neutral"
    print "2: anger"
    print "3: disgust"
    print "4: fear"
    print "5: happy"
    print "6: sadness"
    print "7: surprise"

    while True:
        img = cv.QueryFrame(capture)
        #img = cv.LoadImage("data/o_happy_24.jpg")

        returned = handel_camera_image(img, hc)

        if returned == None:
            pass
        else:
            (img_f, img_r) = returned

            cv.ShowImage("camera", img_f)
            cv.ShowImage("normalized", img_r)

            if show_gabor:
                kernel_var = 50
                gabor_psi = 90

                gabor_pulsation = 2
                gabor_phase = 0
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor1", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 0
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor2", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 0
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor3", img_g_mag)

                # ////////////////////////////////////////

                gabor_pulsation = 2
                gabor_phase = 30
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor4", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 30
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor5", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 30
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor6", img_g_mag)

                # ////////////////////////////////////////

                gabor_pulsation = 2
                gabor_phase = 60
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor7", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 60
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor8", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 60
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor9", img_g_mag)

                # ////////////////////////////////////////

                gabor_pulsation = 2
                gabor_phase = 90
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor10", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 90
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor11", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 90
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor12", img_g_mag)

                # ////////////////////////////////////////

                gabor_pulsation = 2
                gabor_phase = 120
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor13", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 120
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor14", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 120
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor15", img_g_mag)

                # ////////////////////////////////////////

                gabor_pulsation = 2
                gabor_phase = 150
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor16", img_g_mag)

                gabor_pulsation = 4
                gabor_phase = 150
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor17", img_g_mag)

                gabor_pulsation = 6
                gabor_phase = 150
                (img_g_mag, img_g) = gabor.Process(img_r, kernel_var,
                                                   gabor_pulsation,
                                                   gabor_phase, gabor_psi)
                cv.ShowImage("Gabor18", img_g_mag)

                # ////////////////////////////////////////

        key_pressed = cv.WaitKey(1)
        # print "key pressed: " + str(key_pressed)

        if (key_pressed == 27):
            break
        elif (key_pressed == 49):
            save_img("neutral", img, img_r)
        elif (key_pressed == 50):
            save_img("anger", img, img_r)
        elif (key_pressed == 51):
            save_img("disgust", img, img_r)
        elif (key_pressed == 52):
            save_img("fear", img, img_r)
        elif (key_pressed == 53):
            save_img("happy", img, img_r)
        elif (key_pressed == 54):
            save_img("sadness", img, img_r)
        elif (key_pressed == 55):
            save_img("surprise", img, img_r)

    cv.DestroyAllWindows()