def get_image(self):
        """
        Retrieve an image of the correct type from the Kinect, depending on the
        type that was passed to the constructor.

        Since the classes share a OpenNI camera instance, only obtain the image
        at the set update frequency.
        """
        global NI_grabtime
        global NI_camera

        if time.time() > NI_grabtime + self.grab_interval:
            cv.GrabFrame(NI_camera)
            NI_grabtime = time.time()

        if self.img_type == "depth":
            depth = cv.RetrieveFrame(NI_camera, cv.CV_CAP_OPENNI_DEPTH_MAP)
            temp = cv.CreateImage(cv.GetSize(depth), cv.IPL_DEPTH_8U, 1)
            cv.ConvertScale(depth, temp, 0.0625, 0.0)


#            temp = doUsefulConvert8(cv2array(depth))
        elif self.img_type == "rgb":
            temp = cv.RetrieveFrame(NI_camera, cv.CV_CAP_OPENNI_BGR_IMAGE)
        elif self.img_type == "pcl":
            temp = cv.RetrieveFrame(NI_camera,
                                    cv.CV_CAP_OPENNI_POINT_CLOUD_MAP)

        if temp == None:
            raise Exception("Unable to start Kinect, check connection")
        return temp
Example #2
0
def TrackFile(anim):
    tracks = cvb.Tracks()
    capture = cv.CreateFileCapture(anim)
    cv.GrabFrame(capture)
    img = cv.RetrieveFrame(capture)
    frame = cv.CreateImage(cv.GetSize(img), img.depth, img.nChannels)
    cnt = 1
    while cv.GrabFrame(capture):
        # Capture Frames
        img = cv.RetrieveFrame(capture)
        cv.ResetImageROI(frame)
        cv.ConvertScale(img, frame, 1, 0)
        cv.Threshold(frame, frame, 100, 200, cv.CV_THRESH_BINARY)
        #rct=cv.Rectangle(0, 25, 383, 287)
        cv.SetImageROI(frame, (0, 25, 383, 287))
        chB = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.Split(frame, chB, None, None, None)
        labelImg = cv.CreateImage(cv.GetSize(frame), cvb.IPL_DEPTH_LABEL, 1)

        # Get Blobs and try Update Tracks
        blobs = cvb.Blobs()
        result = cvb.Label(chB, labelImg, blobs)
        cvb.FilterByArea(blobs, 500, 1000)

        # Trys are implemented here just to ensure crashes don't happen when blobs are not present
        try:
            print type(blobs.items()[0][1])
        except:
            pass
        cvb.UpdateTracks(blobs, tracks, 5., 10, 0)
        try:
            print type(blobs.items()[0][1])
        except:
            pass
        try:
            print type(tracks.items()[0][1])
        except:
            pass
        imgOut = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
        cv.Zero(imgOut)
        cvb.RenderBlobs(
            labelImg, blobs, frame, imgOut, cvb.CV_BLOB_RENDER_COLOR
            | cvb.CV_BLOB_RENDER_CENTROID | cvb.CV_BLOB_RENDER_BOUNDING_BOX)

        # Save images to see what's blobs are getting out.
        cnt = cnt + 1
        print cnt  #
        cv.SaveImage('blobs' + str(cnt) + '.png', imgOut)

    return tracks, blobs
Example #3
0
 def ar_video_for_ui(self):
     cap = cv.CaptureFromCAM(0)
     if not cap:
         sys.stdout.write("failed CaptureFromCAM")
     while True:
         if not cv.GrabFrame(cap):
             break
         frame = cv.RetrieveFrame(cap)
         sys.stdout.write(frame.tostring())
Example #4
0
 def retrieve(self):
     '''
     The returned image also include a field named orig_frame which returns 
     the original image returned before rescaling.
     
     @returns: the frame rescaled to a given size.
     '''
     frame = cv.RetrieveFrame(self.cv_capture)
     im = pv.Image(self.resize(frame))
     im.orig_frame = pv.Image(frame)
     return im
Example #5
0
 def dequeue(self):
     # flush
     cv.GrabFrame(self.cam)
     im = cv.RetrieveFrame(self.cam)
     #cv.Flip(im, None, 1)
     img = cv.CreateImage(cv.GetSize(im), 8, 1)
     cv.CvtColor(im, img, cv.CV_BGR2GRAY)
     im = img
     a = np.fromstring(im.tostring(),
             dtype=self.depth2dtype[im.depth],
             count=im.width*im.height*im.nChannels)
     a.shape = (im.height, im.width, im.nChannels)
     return a[:, :, 0]
Example #6
0
def getDVframes(dvfile, output_dir, frame_start, frame_end):
    """
    Snip out the selected frames from the DV file
    """
    capture = cv.CaptureFromFile(dvfile)
    #  print "Dimensions: ", cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH),\
    "x", cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)
    numFrames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
    # print "Num frames: ", numFrames

    for i in range(frame_start, frame_end):
        print "Exporting frame", i
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES, i)
        img = cv.RetrieveFrame(capture)
        cv.SaveImage(os.path.join(output_dir, 'frame' + str(i)) + '.png', img)
Example #7
0
def camera_capture():

    # /dev/video0
    c = cv.CaptureFromCAM(0)
    #assert type(c) ==  "cv.Capture"

    # or use QueryFrame. It's the same
    cv.GrabFrame(c)
    image = cv.RetrieveFrame(c)
    #image = cv.QueryFrame(c)
    assert image != None

    dst = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 3)
    #im = cv.CloneImage(image)
    laplace = cv.Laplace(image, dst)
    cv.SaveImage("my-camera.png", dst)

    print cv.GetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_HEIGHT)
Example #8
0
'''capture.py'''
import cv, sys
cap = cv.CaptureFromCAM(0)  # 0 is for /dev/video0
while True:
    if not cv.GrabFrame(cap): break
    frame = cv.RetrieveFrame(cap)
    sys.stdout.write(frame.tostring())
Example #9
0
#!/usr/bin/python
import cv

video = cv.CaptureFromFile(
    "rtsp://admin:@192.168.0.19/user=admin&password=&channel=1&stream=0.sdp?")
cv.NamedWindow("IP Camera", cv.CV_WINDOW_AUTOSIZE)
#contador = 1
while True:
    #  img = cv.QueryFrame(video)
    #  cv.SaveImage("/var/www/html/teste.jpg",img)
    cv.GrabFrame(video)
    frame = cv.RetrieveFrame(video)
    cv.ShowImage("IP Camera", frame)
    cv.WaitKey(150)
#   print contador
#   contador = contador + 1
Example #10
0
#speed = [1, 1]
#black = 0, 0, 0

#print os.path.isfile('/dev/video0')
#print os.path.exists('/dev/video0')
if os.path.exists('/dev/video0') ==False:
    print "no video0,exit"
    sys.exit(2)

try:
   cam = cv.CreateCameraCapture(0)
   cv.SetCaptureProperty(cam,cv.CV_CAP_PROP_FRAME_WIDTH, 640)
   cv.SetCaptureProperty(cam,cv.CV_CAP_PROP_FRAME_HEIGHT, 480);
except:
   print "system error,exit"
   sys.exit(1)

k=0
t = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
filename = '%s.jpg' %(t)
while 1:
    k += 1
    if k > 8: break
    try:
        cv.GrabFrame(cam)
        img = cv.RetrieveFrame(cam)
        cv.SaveImage(filename, img)
    except:
        pass
    cv.WaitKey(1500)
Example #11
0
    def getCoordinates(self, target="ball", debug=False):
        t = time.time()
        """
        This function will return the best coordinates found by thresholding the received image
        by the chosen threshold.
        """
        """Get the latest frame from the camera"""
        global cam, lock
        lock.acquire()
        try:
            cv.GrabFrame(cam)
            frame = cv.RetrieveFrame(cam)
        finally:
            lock.release()
        """Initialize the coordinates to -1, which means that the object is not found"""
        x = -1
        y = -1
        """Prepair image for thresholding"""
        #cv.Smooth(thresholded_frame, thresholded_frame, cv.CV_GAUSSIAN, 5, 5)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)
        cv.CvtColor(frame, self.hsv_frame, cv.CV_BGR2HSV)
        """Threshold the image according to the chosen thresholds"""
        if target == "ball":
            cv.InRangeS(self.hsv_frame, self.ball_threshold_low,
                        self.ball_threshold_high, self.thresholded_frame)
        elif target == "blue gate":
            cv.InRangeS(self.hsv_frame, self.blue_gate_threshold_low,
                        self.blue_gate_threshold_high, self.thresholded_frame)
        elif target == "yellow gate":
            cv.InRangeS(self.hsv_frame, self.yellow_gate_threshold_low,
                        self.yellow_gate_threshold_high,
                        self.thresholded_frame)
        elif target == "black":
            cv.InRangeS(self.hsv_frame, self.black_threshold_low,
                        self.black_threshold_high, self.thresholded_frame)
        elif target == "white":
            cv.InRangeS(self.hsv_frame, self.white_threshold_low,
                        self.white_threshold_high, self.thresholded_frame)

        cv.InRangeS(self.hsv_frame, self.green_threshold_low,
                    self.green_threshold_high, self.thresholded_field)
        """Now use some function to find the object"""
        blobs_image = SimpleCV.Image(self.thresholded_frame)
        field_image = SimpleCV.Image(self.thresholded_field)

        blobs = blobs_image.findBlobs(minsize=2)
        if blobs:
            if target == "ball":
                for i in range(len(blobs)):
                    i = len(blobs) - 1 - i
                    pos_x = blobs[i].maxX()
                    pos_y = blobs[i].maxY()
                    on_field = False
                    for py in range(0, pos_y):
                        if field_image.getPixel(pos_x, py) == (255, 255, 255):
                            on_field = True
                            break
                    if on_field:
                        x, y = pos_x, pos_y
                        break
            else:
                x, y = blobs[-1].coordinates()
        """Old, openCV using contours
        contours = cv.FindContours(cv.CloneImage(thresholded_frame), cv.CreateMemStorage(),mode=cv.CV_RETR_EXTERNAL)
        
        if len(contours)!=0:
            #determine the objects moments and check that the area is large  
            #enough to be our object 
            moments = cv.Moments(contours,1) 
            moment10 = cv.GetSpatialMoment(moments, 1, 0)
            moment01 = cv.GetSpatialMoment(moments, 0, 1)
            area = cv.GetCentralMoment(moments, 0, 0) 
            
            #there can be noise in the video so ignore objects with small areas 
            if area > 2: 
                #determine the x and y coordinates of the center of the object 
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
                x = moment10/area
                y = moment01/area"""
        if debug:
            cv.ShowImage("Camera", self.thresholded_frame)
            #thresholded_frame=SimpleCV.Image(thresholded_frame)
            #thresholded_frame.show()
        print time.time() - t

        return x, y
Example #12
0
        else:
            fg = averages[clusters[1][0]]
            bg = averages[clusters[0][0]]
            corner = clusters[1][0]

        if (fg > bg):
            return (corner, "w")
        else:
            return (corner, "b")


#solve_camera(xyz,xy)
print sys.argv[1]
capture = cv.CaptureFromFile(sys.argv[1])
cv.GrabFrame(capture)
img = cv.RetrieveFrame(capture)
img2 = cv.CloneImage(img)
eig_image = cv.CreateMat(img.height, img.width, cv.CV_32FC1)
temp_image = cv.CreateMat(img.height, img.width, cv.CV_32FC1)
img32f = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 3)
cv.Convert(img, img32f)
img_gs = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
cv.CvtColor(img32f, img_gs, cv.CV_RGB2GRAY)

font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1.0, 1.0)


def tand(alpha):
    return tan(alpha * 3.1415926 / 180)

Example #13
0
        self.running = False


"""Initializing the camera and a lock"""
global _camera, lock
_camera = cv.CreateCameraCapture(0)
lock = threading.RLock()
"""Starting the thread"""
_camera_polling_thread = FrameBufferThread()
_camera_polling_thread.start()

t = time.time()
cv.NamedWindow("Camera", cv.CV_WINDOW_AUTOSIZE)

while 1:
    """Main thread"""
    t = time.time()
    lock.acquire()
    try:
        """Getting the image"""
        img = cv.RetrieveFrame(_camera)
        print time.time() - t, " time spent on retrieving the frame"
    finally:
        lock.release()
    cv.ShowImage("Camera", img)
    if cv.WaitKey(1) == 27:
        break
"""Disabling the thread and closing the window"""
_camera_polling_thread.stop()
cv.DestroyAllWindows()
    def getCoordinates(self, target="ball", debug=False):
        """
        This function will return the best coordinates found by thresholding the received image
        by the chosen threshold.
        """
        """Get the latest frame from the camera"""
        global cam, lock
        lock.acquire()
        try:
            cv.GrabFrame(cam)
            frame = cv.RetrieveFrame(cam)
        finally:
            lock.release()
        """Initialize the coordinates to -1, which means that the object is not found"""
        x = -1
        y = -1
        """Prepair image for thresholding"""
        #cv.Smooth(thresholded_frame, thresholded_frame, cv.CV_GAUSSIAN, 5, 5)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)
        cv.CvtColor(frame, self.hsv_frame, cv.CV_BGR2HSV)
        """Threshold the image according to the chosen thresholds"""
        if target == "ball":
            cv.InRangeS(self.hsv_frame, self.ball_threshold_low,
                        self.ball_threshold_high, self.thresholded_frame)
        elif target == "blue gate":
            cv.InRangeS(self.hsv_frame, self.blue_gate_threshold_low,
                        self.blue_gate_threshold_high, self.thresholded_frame)
        elif target == "yellow gate":
            cv.InRangeS(self.hsv_frame, self.yellow_gate_threshold_low,
                        self.yellow_gate_threshold_high,
                        self.thresholded_frame)
        elif target == "black":
            cv.InRangeS(self.hsv_frame, self.black_threshold_low,
                        self.black_threshold_high, self.thresholded_frame)
        elif target == "white":
            cv.InRangeS(self.hsv_frame, self.white_threshold_low,
                        self.white_threshold_high, self.thresholded_frame)
        else:  #green
            cv.InRangeS(self.hsv_frame, self.green_threshold_low,
                        self.green_threshold_high, self.thresholded_frame)
        """Now use some function to find the object"""
        blobs_image = SimpleCV.Image(self.thresholded_frame)
        blobs = blobs_image.findBlobs()
        if blobs:
            for i in range(len(blobs)):
                line = blobs[i]
                angle = line.angle()
                pos_x, pos_y = line.coordinates()
                if line.maxY() > 100:
                    y = line.maxY()
                    if angle > 0:
                        x = line.maxX()
                    else:
                        x = line.minX()
                    break

        if debug:
            cv.ShowImage("Camera", self.thresholded_frame)
            #thresholded_frame=SimpleCV.Image(thresholded_frame)
            #blobs_image.show()

        return x, y
            #if sum < 0:
                #sum = 0
            sum = sum % 255
            npimage[Y][X] = (sum, sum, sum)
            #if sum not in [0, 255]:
                #print npimage[Y][X]
        print Y
    print "iteration completed"
    return npimage


capture = cv.CaptureFromCAM(-1)
#cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
#cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
cv.GrabFrame(capture)
image = cv.RetrieveFrame(capture)
size = width, height = image.width, image.height
screen = pygame.display.set_mode(size)
pygame.mouse.set_visible(False)
# have to convert from BGR to RGB
#  see http://www.pygame.org/docs/ref/image.html#pygame.image.tostring
image = pygame.image.fromstring(image.tostring()[::-1], (image.width, image.height), "RGB")
image = pygame.transform.flip(image, True, True)
rect = image.get_rect()
black = 0, 0, 0
square_slices = get_square_slices()
start = time.time()
frames = 0
while 1:
    cv.GrabFrame(capture)
    image = cv.RetrieveFrame(capture)
Example #16
0
def GetRawDartXY():
    global capture
    global image

    if calibration.debug:
        # the coordinates
        global x_coordinate
        global y_coordinate
        x_coordinate = 0
        y_coordinate = 0

        #use this events wait for mouse click
        global mouse_click_down
        mouse_click_down = Event()

        cv.NamedWindow(window_name, 1)
        cv.SetMouseCallback(window_name, on_mouse)

        while not mouse_click_down.is_set():
            if calibration.from_video:
                cv.GrabFrame(capture)
                image = cv.RetrieveFrame(capture)
            cv.ShowImage(window_name, image)
            cv.WaitKey(1)
        mouse_click_down.clear()

        ##        clone = cv.CloneImage(image)
        ##
        ##        cv.Circle(clone,(x_coordinate,y_coordinate),3,cv.CV_RGB(255, 0, 0),2)
        ##        cv.ShowImage(window_name, clone)

        return (x_coordinate, y_coordinate)

    else:
        motion = 0
        ##        capture = 0
        darts_found = list()
        detected_x = 0
        detected_y = 0
        no_dart_prev = True
        no_dart_prev_prev = True
        ##        draw = False

        ##        parser = OptionParser(usage = "usage: %prog [options]")
        ##        parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = "default.xml")
        ##        parser.add_option("-f", "--videofile", action="store", dest="videofile", type="str", help="Video capture file, default is to capture from camera", default = None)
        ##
        ##        (options, args) = parser.parse_args()
        ##
        ##        cascade = cv.Load(options.cascade)
        ##        videofile = options.videofile
        ##
        ##        if videofile == None:
        ##            capture = cv.CreateCameraCapture(0)
        ##        else:
        ##            capture = cv.CaptureFromFile(videofile)

        cascade = cv.Load(calibration.cascadefile)

        ##        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
        ##        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT , 480)

        cv.NamedWindow("Motion")
        cv.NamedWindow('Original')

        while True:
            image = cv.QueryFrame(capture)
            ##            cv.GrabFrame(capture)
            ##            image = cv.RetrieveFrame(capture)
            image_clone = cv.CloneImage(image)
            if (image):
                if (not motion):
                    motion = cv.CreateImage((image.width, image.height), 8, 1)
                    cv.Zero(motion)

                update_mhi(image, motion, 30)
                # no darts 2 frames ago
                no_dart_prev_prev = no_dart_prev
                # no darts 1 frame ago
                no_dart_prev = (len(darts_found) == 0)

                # when we transition from having a dart frame to not having a
                # dart frame, then the dart coordinates are stable and we can return
                if (no_dart_prev_prev == False) and (no_dart_prev == True):
                    # we can return here
                    ##                    draw = True
                    ##                    print 'Dart detected at ({0},{1})'.format(detected_x,detected_y)
                    ##                    cv.Circle(image_clone,(detected_x,detected_y),3,cv.RGB(255,0,0),3)
                    ##                    cv.ShowImage('Original',image_clone)

                    ##                    print 'at',cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_POS_FRAMES)
                    return (detected_x, detected_y)

                darts_found = []

                detect(motion, cascade, darts_found)
                # if the number of pixels that changed is outside of the accepted
                # bound, then we say this is not a dart throw
                # the bounds are determined experimentally
                if not (pixels_lo_bound < cv.CountNonZero(motion) <
                        pixels_hi_bound):
                    darts_found = []

                for dart_rec in darts_found:
                    ##                    draw = False
                    x_dart = dart_rec[0]
                    y_dart = dart_rec[1]
                    width_dart = dart_rec[2]
                    height_dart = dart_rec[3]

                    cv.Rectangle(image_clone, (x_dart, y_dart),
                                 (x_dart + width_dart, y_dart + height_dart),
                                 cv.RGB(0, 0, 255), 3, 8, 0)

                    done = False
                    for x in range(x_dart, x_dart + width_dart, 1):
                        if done:
                            break
                        for y in range(y_dart + height_dart, y_dart, -1):
                            # Get2D looks at a matrix, so we have access the
                            # Yth row and the Xth column!!!
                            if cv.Get2D(motion, y, x) != cv.Scalar(0):
                                if non_zero_in_conv_table(
                                        motion, x, y) > dart_tip_threshold:
                                    if no_dart_prev:
                                        detected_x = x
                                        detected_y = y
                                    else:
                                        if x < detected_x:
                                            detected_x = x
                                            detected_y = y
                                    done = True
                                    break
##                if draw:
##                    cv.Circle(image,(detected_x,detected_y),3,cv.RGB(255,0,0),3)
##
                cv.ShowImage('Original', image_clone)
                cv.ShowImage("Motion", motion)
                key = cv.WaitKey(1)
                if (key == 27):
                    break
            else:
                break

        raise Exception('GetDart() failed unexpectedly')