def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(curframe)  #Process the image

            if self.somethingHasMoved():
                self.trigger_time = instant  #Update the trigger_time
                if instant > started + 5:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                    print datetime.now().strftime(
                        "%b %d, %H:%M:%S"), "Something is moving !"
                    os.system("cvlc --play-and-exit --fullscreen ./movie.mp4")
                    started = time.time()

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:  #Break if user enters 'Esc'.
                break
Example #2
0
    def getDepth(self, image, image2):
          
        grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1)
        cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY)
        
        grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1)
        cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY)
           
        [mat_w, mat_h] = self.size
        
        r = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1)
        r2 = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1)
        print type(r)
        print type(image)
        print type(self.map1x)
        print cv.GetSize(r)
        print cv.GetSize(self.map1x)
        cv.Remap(grayScaleFullImage, r, self.map1x, self.map1y)
        cv.Remap(grayScaleFullImage2, r2, self.map2x, self.map2y)
        
        cv.ShowImage("win3", r)
        cv.ShowImage("win4", r2)
        
        
        #stereo_match that comes in opencv
        
        # disparity range is tuned for 'aloe' image pair
        window_size = 3
        min_disp = 16
        num_disp = 112 - min_disp
        stereo = cv2.StereoSGBM(minDisparity=min_disp,
            numDisparities=num_disp,
            SADWindowSize=window_size,
            uniquenessRatio=10,
            speckleWindowSize=100,
            speckleRange=32,
            disp12MaxDiff=1,
            P1=8 * 3 * window_size ** 2,
            P2=32 * 3 * window_size ** 2,
            fullDP=False
        )
    
        print 'computing disparity...'
        disp = stereo.compute(np.asarray(r), np.asarray(r2)).astype(np.float32) / 16.0
    
        print 'generating 3d point cloud...'
        points = cv2.reprojectImageTo3D(disp, np.asarray(self.Q))
        
        
        colors = cv2.cvtColor(np.asarray(r), cv2.COLOR_GRAY2RGB)
        mask = disp > disp.min()
        out_points = points[mask]
        out_colors = colors[mask]
        # Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net
        out_fn = 'out.ply'
        write_ply('out.ply', out_points, out_colors)
        print '%s saved' % 'out.ply'
    

        cv2.imshow('disparity', (disp - min_disp) / num_disp)
    def cannyGradient(self, image, t1=20, t2=250):
        '''Returns the canny gradient'''
        #Checks whether inputs are correct
        if self.image_check(image) < 0:
            return -1

        #Converts the image if it is not B&W
        gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        if image.channels > 1:
            temp = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
            cv.CvtColor(image, temp, cv.CV_BGR2GRAY)
            gsimage = temp
        else:
            gsimage = image

        #Gets the edges from the image
        edges = cv.CreateImage(cv.GetSize(gsimage), cv.IPL_DEPTH_8U, 1)

        #Warning: the threshold 1 and threshold 2 should be selected by experiment
        cv.Canny(gsimage, edges, threshold1=t1, threshold2=t2)

        if self.visualize:
            while True:
                cv.NamedWindow("Original")
                cv.ShowImage("Original", gsimage)
                cv.NamedWindow("Edges")
                cv.ShowImage("Edges", edges)
                c = cv.WaitKey(5)
                if c > 0:

                    break
        cv.DestroyAllWindows()
        return edges
    def update_brightcont(self):
        # The algorithm is by Werner D. Streidt
        # (http://visca.com/ffactory/archives/5-99/msg00021.html)

        if self.contrast > 0:
            delta = 127. * self.contrast / 100
            a = 255. / (255. - delta * 2)
            b = a * (self.brightness - delta)
        else:
            delta = -128. * self.contrast / 100
            a = (256. - delta * 2) / 255.
            b = a * self.brightness + delta

        cv.ConvertScale(self.src_image, self.dst_image, a, b)
        cv.ShowImage("image", self.dst_image)

        cv.CalcArrHist([self.dst_image], self.hist)
        (min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist)
        cv.Scale(self.hist.bins, self.hist.bins,
                 float(self.hist_image.height) / max_value, 0)

        cv.Set(self.hist_image, cv.ScalarAll(255))
        bin_w = round(float(self.hist_image.width) / hist_size)

        for i in range(hist_size):
            cv.Rectangle(self.hist_image,
                         (int(i * bin_w), self.hist_image.height),
                         (int((i + 1) * bin_w), self.hist_image.height -
                          cv.Round(self.hist.bins[i])), cv.ScalarAll(0), -1, 8,
                         0)

        cv.ShowImage("histogram", self.hist_image)
    def normalize(self, image):

        #Checks whether inputs are correct
        if self.image_check(image) < 0:
            return -1

        #chaning the image to grayscale

        gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        newgsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        cv.CvtColor(image, gsimage, cv.CV_RGB2GRAY)
        cv.EqualizeHist(gsimage, newgsimage)

        if self.visualize:
            while True:
                cv.NamedWindow("Normal")
                cv.ShowImage("Normal", gsimage)
                cv.WaitKey(5)
                cv.NamedWindow("Histogram Equalized")
                cv.ShowImage("Histogram Equalized", newgsimage)
                if cv.WaitKey(5) == 1048603:
                    break
            cv.DestroyAllWindows()

        return newgsimage
Example #6
0
     def show_frame(self):        
                 
         color_image = cv.QueryFrame(self.capture)
        
         color_image1 = cv.CreateImage(cv.GetSize(color_image), 8, 3)
         grey_image = cv.CreateImage(cv.GetSize(color_image), cv.IPL_DEPTH_8U, 1)
         moving_average = cv.CreateImage(cv.GetSize(color_image), cv.IPL_DEPTH_32F, 3)

         grey = cv.CreateImage(cv.GetSize(color_image), 8, 3)
         HSV = cv.CreateImage(cv.GetSize(color_image), 8, 3)
         red = cv.CreateImage(cv.GetSize(color_image), 8, 3)         

         cv.CvtColor(color_image, grey, cv.CV_RGB2HLS)
         cv.CvtColor(color_image, HSV, cv.CV_RGB2HSV)
         cv.Not(color_image, red)

         cv.ShowImage(self.window1, color_image)
         cv.ShowImage(self.window2, grey) 
         cv.ShowImage(self.window3, HSV) 
         cv.ShowImage(self.window4, red)

         cv.MoveWindow(self.window1, 30, 120) 
         cv.MoveWindow(self.window2, 430, 120)
         cv.MoveWindow(self.window3, 430, 470)
         cv.MoveWindow(self.window4, 30, 470)

         while self.arduino.inWaiting() > 0:
            self.data += self.arduino.read(1)
Example #7
0
 def run(self):
     started = time.time()
     while True:
         
         curframe = cv.QueryFrame(self.capture)
         instant = time.time() #Get timestamp o the frame
         
         self.processImage(curframe) #Process the image
         
         if not self.isRecording:
             if self.somethingHasMoved():
                 self.trigger_time = instant #Update the trigger_time
                 if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                     print datetime.now().strftime("%b %d, %H:%M:%S"), "Something is moving !"
                     if self.doRecord: #set isRecording=True only if we record a video
                         self.isRecording = True
         else:
             if instant >= self.trigger_time +10: #Record during 10 seconds
                 print datetime.now().strftime("%b %d, %H:%M:%S"), "Stop recording"
                 self.isRecording = False
             else:
                 cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                 cv.WriteFrame(self.writer, curframe) #Write the frame
         
         if self.show:
             cv.ShowImage("Image", curframe)
             cv.ShowImage("Res", self.res)
             
         cv.Copy(self.frame2gray, self.frame1gray)
         c=cv.WaitKey(1) % 0x100
         if c==27 or c == 10: #Break if user enters 'Esc'.
             break            
Example #8
0
def detect_and_draw(img, cascade):
    # create model for face recognition
    #model = cv.reateFisherFaceRecognizer()

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "time taken for detection = %gms" % (
            t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("video", img)
    cv.ShowImage("gray", gray)
Example #9
0
    def eyeRemove(self, region):
        """ Crops an eye from the facePhoto and returns it as a seperate photo

        This method takes in a region which is interpreted to be a region representing
        and eye and crops the eye out. It then returns the cropped photo

        Args:
            region region - a region representing the eye

        Return:
            cv2.cv.cvmat eyePhoto - a photo of just the eye
        """
        # really takes in four points per region
        crop = (region[0],region[1], region[2] - region[0], region[3] - region[1])
        if DEBUG:
            print "Region passed to eye remove: " + str(region)
            print "And here's crop: " + str(crop)
            print "Before crop we have type: " + str(type(self.facePhoto))
            print self.facePhoto
            cv.ShowImage("We're cropping", self.facePhoto)
            cv.WaitKey(0)
            cv.DestroyWindow("We're cropping")
        eye = cv.GetSubRect(self.facePhoto, crop)
        #eye = face.crop(region)
        if DEBUG:
            print "After crop we have type: " + str(type(eye))
            cv.ShowImage("Cropped", eye)
            cv.WaitKey(0)
            cv.DestroyWindow("Cropped")
        return eye
Example #10
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = True

        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.CalcArrBackProject([self.hue], backproject, hist)

            # Run the cam-shift (if the a window is set and != 0)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window,
                                          crit)  #Call the camshift !!
                self.track_window = rect  #Put the current rectangle as the tracked area

            # If mouse is pressed, highlight the current selected rectangle and recompute histogram
            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                #Take the same area but in hue image to calculate histogram
                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)

                #Used to rescale the histogram with the max value (to draw it later on)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)

            elif self.track_window and is_rect_nonzero(
                    self.track_window):  #If window set draw an elipseBox
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            cv.ShowImage("CamShiftDemo", frame)
            cv.ShowImage("Backprojection", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Example #11
0
def testkinect1():
    # test video modes
    [ctx,dev] = initkinect()
    print(freenect.get_video_format(dev))
    freenect.set_video_mode(dev,1,1)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    cv2.waitKey(1000)
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,2)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    cv2.waitKey(1000)
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,3)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,5)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,6)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()
Example #12
0
def repeat():
    global capture  #declare as globals since we are assigning to them now
    global camera_index
    global done

    frame = cv.QueryFrame(capture)
    cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3)

    imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV)
    #imgHsv2 = GetThresholdedImage(imgHsv)
    #print(numpy.asarray(cv.GetMat(imgHsv)))

    imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4)
    cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA)

    cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3)
    (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640,
                                                     480)  #3D array

    d = numpy.sqrt(offsetX * offsetX + offsetY * offsetY)

    if d != 0:
        print("Distance = " + str(c1 / d + c2) + "cm")
        print("OffsetX = " + str(offsetX) + "; OffsetY = " + str(offsetY))
        print("")

    imgRGB = cv.CreateImage(cv.GetSize(frame), 8, 3)
    #cv.CvtColor(Image.fromarray(filteredImg), imgRGB, cv.CV_RGBA2RGB)

    imgRGBA = cv.fromarray(numpy.reshape(filteredImg, (480, 640, 4)))
    if offsetX != 0 or offsetY != 0:
        cv.Rectangle(imgRGBA, (320 + offsetX - 6, 240 + offsetY - 6),
                     (320 + offsetX + 6, 240 + offsetY + 6),
                     (255, 0, 255, 255), 1, 8)
        cv.Line(imgRGBA, (0, 240 + offsetY), (639, 240 + offsetY),
                (255, 0, 255, 255), 1, 8)
        cv.Line(imgRGBA, (320 + offsetX, 0), (320 + offsetX, 479),
                (255, 0, 255, 255), 1, 8)

    cv.ShowImage(HSVWindow, imgRGBA)
    cv.ShowImage(original, frame)

    cv.SetMouseCallback(original, onMouseMove, [
        cv.CV_EVENT_MOUSEMOVE,
        numpy.asarray(cv.GetMat(imgHsv)),
        numpy.asarray(cv.GetMat(frame))
    ])
    #cv.SetMouseCallback(HSVWindow, onMouseMove, [cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame))])

    #cv.ShowImage(filtered, imgHsv2)
    c = cv.WaitKey(10)

    if (str(c) == "27"):  #if ESC is pressed
        print("Thank You!")
        done = True
    if (str(c) == "99"):  #'c' for calibration
        calibration(int(input("How many data points: ")))
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        while True:
            frame = 0
            frame = self.capture  #cv.QueryFrame( self.capture )

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            #             if self.track_window and is_rect_nonzero(self.track_window):
            #                 crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            #                 (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
            #                 self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                #cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)


#             elif self.track_window and is_rect_nonzero(self.track_window):
#                 cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )

            if not backproject_mode:
                cv.ShowImage("SelectROI", frame)
            else:
                cv.ShowImage("SelectROI", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                f = open('newtree.yaml', "w")
                yaml.dump(self.selection, f)
                f.close()
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Example #14
0
def histogramequalization():
    src = cv.LoadImage(getpath(), cv.CV_LOAD_IMAGE_GRAYSCALE)
    dst = cv.CreateImage((src.width, src.height), src.depth, src.channels)
    cv.EqualizeHist(src, dst)
    cv.NamedWindow("SourceImage", 1)
    cv.NamedWindow("EqualizedImage", 1)
    cv.ShowImage("SourceImage", src)
    cv.ShowImage("EqualizedImage", dst)
    cv.WaitKey(0)
Example #15
0
def pupilRemove(image, region):
    """ Crops the eye photo to show only the pupil
            and then returns it.

        Args:
            tuple region - the coordinates of the pupil circle in
            the form (centerX, centerY, radius)

        Return:
            photo  - TODO: I'm not sure of the type
        """
    # Converting to (topLeftX, topLeftY, width, length)
    if region[0] - region[2] < 0:
        topLeftX = 0
    else:
        topLeftX = region[0] - region[2]

    if region[1] - region[2] < 0:
        topLeftY = 0
    else:
        topLeftY = region[1] - region[2]

    if region[2] < 0:
        width = 0
    else:
        width = region[2] + region[2]

    if region[2] < 0:
        length = 0
    else:
        length = region[2] + region[2]

    crop = (topLeftX, topLeftY, width, length)
    if DEBUG:
        print "Region passed to pupil remove: " + str(region)
        print "And here's crop: " + str(crop)
        print "Before crop we have type: " + str(type(image))
        print image
        cv.ShowImage("We're cropping", image)
        cv.WaitKey(0)
        cv.DestroyWindow("We're cropping")
    if crop[0] < 0:
        crop[0] = 0
    if crop[1] < 0:
        crop[1] = 0
    if crop[2] < 0:
        crop[2] = abs(crop[2])
    else:
        pupil = cv.GetSubRect(image, crop)
        if DEBUG:
            print "After crop we have type: " + str(type(pupil))
            cv.ShowImage("Cropped", pupil)
            cv.WaitKey(0)
            cv.DestroyWindow("Cropped")
        return pupil
    return None
def medianfiltering():
    src = cv.LoadImageM(k, cv.CV_LOAD_IMAGE_COLOR)
    dst = cv.CreateImage((src.width, src.height), 8, src.channels)
    cv.SetZero(dst)
    cv.NamedWindow("Median Filtering", 1)
    cv.NamedWindow("After Filtering", 1)
    cv.Smooth(src, dst, cv.CV_MEDIAN, 9, 9)
    cv.ShowImage("Median Filtering", src)
    cv.ShowImage("After Filtering", dst)
    cv.WaitKey(0)
def displayImage(infoList,orig):
    result = False
    if infoList == None:
        print "Control Dot Not Located"
        cv.ShowImage('win',orig)
    else:
        cv.ShowImage('thresh',infoList[1])
        cv.ShowImage('win', infoList[0][2])
        result = True
    return (result)
Example #18
0
 def __init__(self, img0):
     self.thresh1 = 255
     self.thresh2 = 30
     self.level =4
     self.storage = cv.CreateMemStorage()
     cv.NamedWindow("Source", 0)
     cv.ShowImage("Source", img0)
     cv.NamedWindow("Segmentation", 0)
     cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1)
     cv.CreateTrackbar("Thresh2", "Segmentation",  self.thresh2, 255, self.set_thresh2)
     self.image0 = cv.CloneImage(img0)
     self.image1 = cv.CloneImage(img0)
     cv.ShowImage("Segmentation", self.image1)
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start Callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Starting Callibration...Analyzing the Object...'
    for i in range(0, 100):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb)
        vals = cv.Get2D(imagehsv, 300, 200)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
        cv.PutText(
            image,
            "  " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]),
            (200, 300), font, (55, 25, 255))
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Analyzation Completed'
    mini[0] -= 35
    mini[1] -= 15
    mini[2] -= 15
    maxi[0] += 35
    maxi[1] += 15
    maxi[2] += 15
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    bgr = (mini, maxi)
    return bgr
Example #20
0
def test_model(img_kind):
	subdir = "data/"
	model = svmutil.svm_load_model(subdir + img_kind + '.model')
	print "Finished Loading Model"

	total_count = 0
	correct_count = 0
	wrong_count = 0

	
	the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg")
	all_of_them = glob.glob(subdir + "f_*_*.jpg")
	the_others = []

	for x in all_of_them:
		total_count += 1
		if the_ones.count(x) < 1:
			the_others.append(x)
	
	for x in the_ones:
		img = cv.LoadImageM(x)
		cv.ShowImage("img", img)
		cv.WaitKey(10)
		img_features = get_image_features(img, True, img_kind)
		predict_input_data = []
		predict_input_data.append(img_features)
		(val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model)
		if int(val[0]) == 1:
			print 'correct'
			correct_count += 1
		else:
			wrong_count += 1

	for x in the_others:
		img = cv.LoadImageM(x)
		cv.ShowImage("img", img)
		cv.WaitKey(10)
		img_features = get_image_features(img, True, img_kind)
		predict_input_data = []
		predict_input_data.append(img_features)
		(val, val_2, val_3) = svmutil.svm_predict([1], predict_input_data, model)
		if int(val[0]) == -1:
			correct_count += 1
		else:
			wrong_count += 1
	
	print "Total Pictures: " + str(total_count)
	print "Correct: " + str(correct_count)
	print "Wrong: " + str(wrong_count)
	print "Accuracy: " + str(correct_count/float(total_count) * 100) + '%'
Example #21
0
def live_test():
	subdir = 'data/'
	img_kinds = ["happy", "anger", "neutral", "surprise"]
	models = {}
	# load all the models
	print "Loading Models"
	for img_kind in img_kinds:
		print 'loading for: ' + img_kind
		models[img_kind] = svmutil.svm_load_model(subdir + img_kind + '.model')
	print "---------------------"

	print "Loading cascade"
	face_cascade = "haarcascades/haarcascade_frontalface_alt.xml"
	hc = cv.Load(face_cascade)
	print "---------------------"

	capture = cv.CaptureFromCAM(0)
	while True:
		img = cv.QueryFrame(capture)
		cv.ShowImage("camera",img)
		key_pressed = cv.WaitKey(50)
		if key_pressed == 27:
			break
		elif key_pressed == 32:
			print '~> KEY PRESSED <~'
			# do face detection
			print 'detecting face'
			returned = face.handel_camera_image(img, hc)
			if returned == None:
				print "No face || more than one face"
				pass
			else:
				(img_o, img_face) = returned
				cv.ShowImage("face",img_face)
				# get features from the face
				results = {}
				for img_kind in img_kinds:
					test_data = get_image_features(img_face, True, img_kind)
					predict_input_data = []
					predict_input_data.append(test_data)

					# do svm query
					(val, val_2, label) = svmutil.svm_predict([1] ,predict_input_data, models[img_kind])
					results[img_kind] = label[0][0]
					print img_kind + str(results[img_kind])

				sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1))
				print sorted_results[len(sorted_results)-1][0]

				print "---------------------"
Example #22
0
File: lrf.py Project: Mnemonic7/lrf
def show_camera_and_get_images(capture, laser_ctrl, mapx=None, mapy=None):
    while 1:
        img, k = show_camera_and_wait_for_key(capture, 5, mapx, mapy)
        if k >= 0:
            img1, img2 = get_images(capture, laser_ctrl, mapx, mapy)
            cv.NamedWindow('img1', cv.CV_WINDOW_AUTOSIZE)
            cv.NamedWindow('img2', cv.CV_WINDOW_AUTOSIZE)

            cv.ShowImage('img1', img1)
            cv.ShowImage('img2', img2)

            cv.MoveWindow('img1', 10, 10)
            cv.MoveWindow('img2', 660, 10)
            return img1, img2
Example #23
0
 def on_mouse(self, event, x, y, flags, param):
     pt = (x, y)
     if event == cv.CV_EVENT_LBUTTONUP or not (flags
                                               & cv.CV_EVENT_FLAG_LBUTTON):
         self.prev_pt = None
     elif event == cv.CV_EVENT_LBUTTONDOWN:
         self.prev_pt = pt
     elif event == cv.CV_EVENT_MOUSEMOVE and (flags
                                              & cv.CV_EVENT_FLAG_LBUTTON):
         if self.prev_pt:
             for im in [self.image] + self.chans:
                 cv.Line(im, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
         self.prev_pt = pt
         cv.ShowImage("image", self.image)
         cv.ShowImage("LSB", self.chans[0])