示例#1
0
    def prepare(self):
        """Opens the video file for playback."""

        if self.experiment.get(u'canvas_backend') != u'legacy':
            raise osexception( \
             u'The video_player plug-in requires the legacy back-end!')

        item.item.prepare(self)
        path = self.experiment.get_file(self.video_src)
        # Open the video file
        self.video = cv.CreateFileCapture(path)
        # Convert the string to a boolean, for slightly faster evaluations in
        # the run phase
        self._fullscreen = self.fullscreen == u"yes"
        # The dimensions of the video
        self._w = int(
            cv.GetCaptureProperty(self.video, cv.CV_CAP_PROP_FRAME_WIDTH))
        self._h = int(
            cv.GetCaptureProperty(self.video, cv.CV_CAP_PROP_FRAME_HEIGHT))
        if self._fullscreen:
            # In fullscreen mode, the video is always shown in the top-left and the
            # temporary images need to be fullscreen size
            self._x = 0
            self._y = 0
            self.src_tmp = cv.CreateMat(self.experiment.height, \
             self.experiment.width, cv.CV_8UC3)
            self.src_rgb = cv.CreateMat(self.experiment.height, \
             self.experiment.width, cv.CV_8UC3)
        else:
            # Otherwise the location of the video depends on its dimensions and the
            # temporary image is the same size as the video
            self._x = max(0, (self.experiment.width - self._w) / 2)
            self._y = max(0, (self.experiment.height - self._h) / 2)
            self.src_rgb = cv.CreateMat(self._h, self._w, cv.CV_8UC3)
示例#2
0
def openfile(filepath):
    """open video at filepath and return dict with data"""
    # capture video from file
    video = cv.CaptureFromFile(filepath)

    # extract some information
    width = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_HEIGHT))
    fps = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
    fcount = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_COUNT))

    # print video data
    print "======"
    print "Opened file: " + filepath
    print "Width: " + str(width)
    print "Height: " + str(height)
    print "FPS: " + str(fps)
    print "Frame count: " + str(fcount)
    print "======"

    # store data in dict
    # TODO: check if necesarry
    data = {}
    data["video"] = video
    data["height"] = height
    data["width"] = width
    data["fps"] = fps
    data["fcount"] = fcount

    return data
示例#3
0
def saveObjects(objectDict, file, name, capture, frameNumber):
    finalFrame = frameNumber
    width = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)
    height = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    #attributes are written in alphabetical order
    root = ET.Element(
        'objects', {
            'finalFrame': str(finalFrame),
            'fps': str(fps),
            'video': 'video',
            'videoWidth': str(width),
            'videoHeight': str(height)
        })

    for frameNumber in objectDict.keys():
        frame = ET.SubElement(root, 'frame', {'number': str(frameNumber)})
        for object in objectDict[frameNumber]:
            cObject = ET.SubElement(frame, 'object')
            x = ET.SubElement(cObject, 'x')
            x.text = str(object.x)
            y = ET.SubElement(cObject, 'y')
            y.text = str(object.y)
            w = ET.SubElement(cObject, 'w')
            w.text = str(object.w)
            h = ET.SubElement(cObject, 'h')
            h.text = str(object.h)
    tree = ET.ElementTree(root)
    tree.write(name + ".xml")
    print 'done writing filename' + name + ".xml"
示例#4
0
 def run(self):
     print("camera")
     self.capture = cv.CaptureFromCAM(0)
     width = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH)
     height = cv.GetCaptureProperty(self.capture,
                                    cv.CV_CAP_PROP_FRAME_HEIGHT)
     self.writer = cv.CreateVideoWriter("myCamCapture.avi", -1, 30,
                                        (int(width), int(height)), 1)
     self.record()
示例#5
0
 def setup(self):
     self.width = int(cv.GetCaptureProperty(
         self.cam, cv.CV_CAP_PROP_FRAME_WIDTH))
     self.height = int(cv.GetCaptureProperty(
         self.cam, cv.CV_CAP_PROP_FRAME_HEIGHT))
     #self.cam.mode = self.mode
     self.max_framerate = 10
     self.add_trait("framerate", Range(1, 10, 1))
     self.add_trait("shutter", Range(-10, 10, 0))
     self.add_trait("gain", Range(0, 1, .5))
示例#6
0
 def _resolution_changed(self, value):
     width, height = value
     cv.SetCaptureProperty(self._capture, FRAME_WIDTH, width)
     cv.SetCaptureProperty(self._capture, FRAME_HEIGHT, height)
     if cv.GetCaptureProperty(self._capture, FRAME_WIDTH) != width:
         raise CameraError('Width {0} not supported'.format(width),
                           self.camera_number)
     if cv.GetCaptureProperty(self._capture, FRAME_HEIGHT) != height:
         raise CameraError('Height {0} not supported'.format(height),
                           self.camera_number)
示例#7
0
    def get_total_time(self):
        '''
            Grab the properties for the video
        '''

        self.total_frames = float(
            cv.GetCaptureProperty(self.vidcap, cv.CV_CAP_PROP_FRAME_COUNT))
        self.framerate = float(
            cv.GetCaptureProperty(self.vidcap, cv.CV_CAP_PROP_FPS))
        self.duration = self.total_frames / self.framerate
        self.milliseconds = self.duration * 1000
示例#8
0
    def duration(self):
        """
        Returns time length of video in milliseconds.

        Returns
        -------
        output : int
            Time length [ms].
        """
        return cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FPS) * \
            cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_COUNT)
示例#9
0
def read_video(filename, write_to_file=False, color=False):
    """
  Read video from the specified file
  
  parameters:
  filename: The name of the video file to be read.
  write_to_file: True if the video should be written to an h5file.
                 False otherwise.
                 If not specified, will be set to False.
  
  color: True if the ouput video should have 3 channels - RGB
         False if the output video should be grayscale.
         If not specified, will be set to False

  returns: a numpy array representing the video
  """

    if (not (CV_INSTALLED)):
        raise ImportError("Failure to load OpenCV.\n \t " \
                          "read_video requires: OpenCV\n")

    vidFile = cv.CaptureFromFile(filename)
    nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT))
    fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS)

    if nFrames == 0:
        raise TypeError("Could not read from %s.\n\t"
                        "Please make sure that the required codec is installed.\n"\
                        % filename)

    frameImg = cv.QueryFrame(vidFile)

    if color:
        size = (nFrames, frameImg.height, frameImg.width, 3)
    else:
        size = (nFrames, frameImg.height, frameImg.width)

    vid_arr = np.zeros(size)

    rgb2k = np.array([0.114, 0.587, 0.299])
    for f in xrange(nFrames):
        arr = cv2array(frameImg)
        if color:
            vid_arr[f, :, :, :] = arr
        else:
            vid_arr[f, :, :] = np.sum(arr * rgb2k, axis=-1) / 255
        frameImg = cv.QueryFrame(vidFile)

    if write_to_file:
        filename = filename.rpartition('.')[0] + ".h5"
        io.write_memory_to_file(vid_arr, filename)

    return vid_arr
示例#10
0
def myVideo(url):
    flag = 0  #flag for checking if the file in present in the memory.
    search_folder = "."
    videoFile = url.split('/')[-1].split('#')[0].split('?')[
        0]  #stripping the name of the file.

    for root, dirs, files in os.walk(
            search_folder):  # using the os.walk module to find the files.
        for name in files:
            """Checking the videofile in the current directory and the sub-directories"""
            if videoFile == os.path.join(
                    name
            ):  #checking if the file is already present in the internal memory.(Traverse through subdirectories as well)
                flag += 1
                print "The file is already present in the internal memory"
                return -1  # Returning the confirmation that the file is present.

    if flag == 0:  # dowiloding only when the flag is zero(i.e the file is not in the internal memory.)
        print "Downloading the file"
        video = urllib.FancyURLopener()  #downloading the file using urllib.
        video.retrieve(url, videoFile)
        curDir = os.getcwd()  # getting the current working directory.
        fullVideoPath = os.path.join(
            curDir, videoFile)  # Making the full path of the video file.
        """For playing the file using openCV first read the file.
                Find the number of frames and the frame rate.
                Finally use these parameters to display each extracted frame on the screen"""

        vidFile = cv.CaptureFromFile(
            fullVideoPath)  #Video capturing from the file.
        nFrames = int(
            cv.GetCaptureProperty(
                vidFile,
                cv.CV_CAP_PROP_FRAME_COUNT))  #Number of frames in the video.
        fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS)  # Frame rate
        waitPerFrame = int(1 / fps * 1000 / 1)  # Wait time between frames.

        for f in xrange(nFrames):
            frameImg = cv.QueryFrame(
                vidFile)  # decoding and returning the grabbed video frame.
            cv2.namedWindow(
                "EPIC", cv2.WND_PROP_FULLSCREEN)  #Making full size display.
            cv2.setWindowProperty(
                'EPIC', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN
            )  # setting the window property to full screen.
            cv.ShowImage("EPIC", frameImg)  # Showing the frame Image
            cv.WaitKey(waitPerFrame)  # Waiting between the frames.

        cv.DestroyWindow(
            "EPIC")  # Deleting the window once the playing is done.
        return 1  # The file is successfully played.
示例#11
0
def getDVframes(dvfile, output_dir, frame_start, frame_end):
    """
    Snip out the selected frames from the DV file
    """
    capture = cv.CaptureFromFile(dvfile)
    #  print "Dimensions: ", cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH),\
    "x", cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)
    numFrames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
    # print "Num frames: ", numFrames

    for i in range(frame_start, frame_end):
        print "Exporting frame", i
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES, i)
        img = cv.RetrieveFrame(capture)
        cv.SaveImage(os.path.join(output_dir, 'frame' + str(i)) + '.png', img)
示例#12
0
 def __init__(self,namew,width=None,height=None):
   cv.NamedWindow(namew, 1)
   self.capture = cv.CreateCameraCapture(0)
   self.width = width
   self.height = height
   if width is None:
     self.width = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH))
   else:
     cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_WIDTH,width)    
   if height is None:
     self.height = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
   else:
     cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_HEIGHT,height) 
   self.w = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3) 
   self.avg = 0 #inizializzazione media
示例#13
0
def propedit():
    maxprop = 0
    for p in prop:
        if prop[p][1]:
            maxprop = prop[p][1]
            print "%d: %10s = %g (%g)" % (maxprop, p, prop[p][2], prop[p][3])

    print "Enter property number: [1..%d] " % maxprop
    r = cv_readline(cv)
    try:
        idx = string.atoi(r)
        for p in prop:
            if prop[p][1] == idx:
                print "Enter new value %s [%g]" % (p, prop[p][2])
                r = cv_readline(cv)
                try:
                    val = string.atof(r)
                    cv.SetCaptureProperty(cam, prop[p][0], val)
                    val = cv.GetCaptureProperty(cam, prop[p][0])
                    prop[p][2] = val
                    print "prop num %d: %10s = %g" % (idx, p, prop[p][2])
                except:
                    print "value unchanged"
                    pass
    except:
        pass
示例#14
0
def play_video(path, name, posmsec=0, fps=0):
    capture = cv.CaptureFromFile(path)
    if fps <= 0:
        fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    interval = int(1000.0 / fps)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_POS_MSEC, posmsec)

    playing = [True]
    cv.NamedWindow(name)

    def on_mouse(event, x, y, flags, param):
        if event == cv.CV_EVENT_RBUTTONDOWN:
            playing[0] = False

    cv.SetMouseCallback(name, on_mouse)

    while playing[0]:
        frame = cv.QueryFrame(capture)
        if frame is None:
            playing[0] = False
        else:
            cv.ShowImage(name, frame)
            cv.WaitKey(interval)

    cv.DestroyWindow(name)
    del capture
示例#15
0
def do_video(capture):
    global debugger
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    print "framerate is ", fps
    waitframe = 1.0 / fps
    if waitframe < 0:
        waitframe = 1.0 / FRAMERATE
    print "waitframe is ", waitframe
    frame = cv.QueryFrame(capture)
    hsv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
    debugger.create_image(cv.GetSize(frame))
    last_time = time.time()
    while frame:
        do_frame(frame, hsv)
        frame = cv.QueryFrame(capture)
        cur_time = time.time()
        frame_time = cur_time - last_time
        last_time = cur_time
        print "frame time is ", frame_time
        wait_time = waitframe - frame_time
        print "wait_time is ", wait_time
        if wait_time < 0:
            wait_time = 0.001
        debugger.pause(frame, wait_time)
    debugger.pause(frame, 0)
示例#16
0
def run_extractor(maxTime):
    # Set up the objects that handle the parameters
    evolutionModel = GaussianEvolutionModel(10.0, 10.0, 0.1)
    transformEstimator = AffineTransformEstimator(100, 1e-7, 2.0, 2)
    motionExtractor = MotionExtractor(transformEstimator, evolutionModel, 0.6,
                                      12.0, 2.0, 0.5, 200)

    # Now open the video for input
    #inStream = cv.CaptureFromFile('/data/mdesnoye/fish/media_videos/12-37-08_5.avi')
    inStream = cv.CaptureFromFile(
        '/home/mdesnoyer/data/fish/tank_videos/12-37-08_5.avi')
    fps = cv.GetCaptureProperty(inStream, cv.CV_CAP_PROP_FPS)

    curTime = 0
    cvFrame = cv.QueryFrame(inStream)
    while (cvFrame is not None):

        frame = np.asarray(cv.GetMat(cvFrame)).astype(np.float64) / 255.0
        motionExtractor.AddImage(frame, curTime)

        if (motionExtractor.RetreiveObjects() is not None
                and (curTime - motionExtractor.time() < 5.0 / fps)):
            # Create a new frame showing the objects in red
            outFrame = frame
            outFrame[:,:,2] = outFrame[:,:,2]*0.6 + 0.4 * \
                              motionExtractor.RetreiveObjects().ToBinaryImageMask().astype(np.float64)

            outFrame *= 255.0

        cvFrame = cv.QueryFrame(inStream)
        curTime += 1.0 / fps
        print curTime

        if curTime > maxTime:
            return
示例#17
0
def grab_images(video_file, frame_inc=100, delay=100):
    """
    Walks through the entire video and save image for each increment
    """
    my_video = init_video(video_file)
    if my_video != None:
        # Display the video and save evry increment frames
        cpt = 0
        img = cv.QueryFrame(my_video)

        if img != None:
            cv.NamedWindow("Vid", cv.CV_WINDOW_AUTOSIZE)
        else:
            return None

        nFrames = int(
            cv.GetCaptureProperty(my_video, cv.CV_CAP_PROP_FRAME_COUNT))
        while cpt < nFrames:
            for ii in range(frame_inc):
                img = cv.QueryFrame(my_video)
                cpt += 1

            cv.ShowImage("Vid", img)
            out_name = "data/output/" + str(cpt) + ".jpg"
            cv.SaveImage(out_name, img)
            print out_name, str(nFrames)
            cv.WaitKey(delay)
    else:
        return None
示例#18
0
文件: ocv.py 项目: NidayeCC/Pyocv
    def property(self, key, val=None):
        """Get/Set capture device property"""
        if val is None:
            return cv.GetCaptureProperty(self.capture, key)
        else:
            cv.SetCaptureProperty(self.capture, key, val)

        return None
示例#19
0
def run_recognition_on_video(video_name, para_path, Labels):
    f = video_name

    # Read From Video
    capture = cv.CaptureFromFile(f)
    print capture
    print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)
    print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)

    # Neuronet Configuration
    resize_row = 20
    resize_width = 20

    weights = loadmat(para_path)
    T1 = weights['Theta1']
    T2 = weights['Theta2']

    # Start Loading Video

    history_prediction = []

    while True:
        frame = cv.QueryFrame(capture)
        process_vector = np.zeros((1, resize_row * resize_width))

        # Processing Video Frames
        if frame:

            color, contour_list = extract_and_draw_countour_above_area_threshold(
                frame, 20000)

            if contour_list:
                color, history_prediction = gesture_recognition_and_labeling(
                    color, contour_list, T1, T2, Labels, history_prediction,
                    True)

            display_cvMat = numpy_to_cvMat(color)
            cv.ShowImage('Real Time Recognition', display_cvMat)
            cv.WaitKey(1)

        # Keyboard interrupt for Exit
        c = cv.WaitKey(2)
        if c == 27:  #Break if user enters 'Esc'.
            break
示例#20
0
    def prepare(self):
        """
		Opens the video file for playback
		"""

        # Pass the word on to the parent
        item.item.prepare(self)

        # Find the full path to the video file. This will point to some
        # temporary folder where the file pool has been placed
        path = self.experiment.get_file(self.video_src)

        # Open the video file
        self.video = cv.CreateFileCapture(path)

        # Convert the string to a boolean, for slightly faster
        # evaluations in the run phase
        self._fullscreen = self.fullscreen == "yes"

        # The dimensions of the video
        self._w = cv.GetCaptureProperty(self.video, cv.CV_CAP_PROP_FRAME_WIDTH)
        self._h = cv.GetCaptureProperty(self.video,
                                        cv.CV_CAP_PROP_FRAME_HEIGHT)

        if self._fullscreen:

            # In fullscreen mode, the video is always shown in the top-left and the
            # temporary images need to be fullscreen size
            self._x = 0
            self._y = 0
            self.src_tmp = cv.CreateMat(self.experiment.height,
                                        self.experiment.width, cv.CV_8UC3)
            self.src_rgb = cv.CreateMat(self.experiment.height,
                                        self.experiment.width, cv.CV_8UC3)
        else:

            # Otherwise the location of the video depends on its dimensions and the
            # temporary image is the same size as the video
            self._x = max(0, (self.experiment.width - self._w) / 2)
            self._y = max(0, (self.experiment.height - self._h) / 2)
            self.src_rgb = cv.CreateMat(self._h, self._w, cv.CV_8UC3)

        # Report success
        return True
示例#21
0
 def query(self):
     if self.current_frame > 0 and cv.GetCaptureProperty(
             self.cv_capture, cv.CV_CAP_PROP_POS_AVI_RATIO) == 1.0:
         return None
     frame = cv.QueryFrame(self.cv_capture)
     if frame == None:
         raise StopIteration("End of video sequence")
     self.current_frame += 1
     frame = cv.CloneImage(frame)
     return pv.Image(self.resize(frame))
示例#22
0
文件: source.py 项目: bhamav/cs365p4
    def getLength(self):
        '''
        Returns the video frame count.
        '''
        # try to get the length from the file
        if self._length == 0:
            self.length = cv.GetCaptureProperty(self._video,
                                                cv.CV_CAP_PROP_FRAME_COUNT)

        return self.length
示例#23
0
    def frame_count(self):
        """
        Returns frame count of video.

        Returns
        -------
        output : int
            Frame count.
        """
        return cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_COUNT)
示例#24
0
 def print_info(self):
     for prop in [
             cv.CV_CAP_PROP_POS_MSEC, cv.CV_CAP_PROP_POS_FRAMES,
             cv.CV_CAP_PROP_POS_AVI_RATIO, cv.CV_CAP_PROP_FRAME_WIDTH,
             cv.CV_CAP_PROP_FRAME_HEIGHT, cv.CV_CAP_PROP_FPS,
             cv.CV_CAP_PROP_FOURCC, cv.CV_CAP_PROP_BRIGHTNESS,
             cv.CV_CAP_PROP_CONTRAST, cv.CV_CAP_PROP_SATURATION,
             cv.CV_CAP_PROP_HUE
     ]:
         print cv.GetCaptureProperty(self.capture, prop)
示例#25
0
def init_video(video_file):
    """
    Given the name of the video, prepares the stream and checks that everything works as attended
    """
    capture = cv.CaptureFromFile(video_file)

    nFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    if fps != 0:
        waitPerFrameInMillisec = int(1 / fps * 1000 / 1)

        print 'Num. Frames = ', nFrames
        print 'Frame Rate = ', fps, ' frames per sec'

        print '----'

        return capture
    else:
        return None
示例#26
0
def main(argv):
    if len(argv) < 3:
        sys.stderr.write("Usage: %s <inVideoFile> <outDataFile>\n" % (argv[0],))
        return 1

    """ parameters"""
    width=100
    height=100
    q=3 # data byte size
    bucket=100# num of pixels for each byte
    no_of_bits=8
    Q= np.power(2,q)

    inFile="out0.avi"
    inFile = sys.argv[1]
    vidIn = cv.CreateFileCapture(inFile)
    numFrames = int(cv.GetCaptureProperty(vidIn, cv.CV_CAP_PROP_FRAME_COUNT))
    fps = cv.GetCaptureProperty(vidIn, cv.CV_CAP_PROP_FPS)
    #print 'Num. Frames = ', nFrames
    #print 'Frame Rate = ', fps, ' frames per sec'

    dataOut=[]
    numBytes=width*height/bucket
    for i in xrange(numFrames):
        img=cv.QueryFrame(vidIn)
        for byte in range(numBytes):
            tmpList=[]
            for i in range(bucket):
                x=np.mod(byte*bucket+i,width)
                y=np.divide(byte*bucket+i,height)
                value=cv.Get2D(img,y,x)
                tmpList.append((value[1]/(np.power(2,no_of_bits)/Q))-0.5)
            dByte = np.binary_repr(np.ceil(np.mean(tmpList[0:bucket-10]))).zfill(3)
            for t in range(q):
                dataOut.append(int(dByte[t]))

    # outputing the data
    np.savetxt(sys.argv[2],dataOut,fmt='%1d')

    # difference
    """
def load_video_clip(video_file, start_frame=0, end_frame=None, verbose=False):
    """Loads frames from a video_clip

    Args:
        video_file: path of the video file
        start_frame: first frame to be loaded
        end_frame: last frame to be loaded

    Returns:
        A (#frames)x(height)x(width)x(#channels) NumPy array containing the
        video clip
    """
    if not os.path.exists(video_file):
        raise IOError, 'File "%s" does not exist!' % video_file
    capture = cv.CaptureFromFile(video_file)
    if not end_frame:
        end_frame = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    else:
        end_frame = int(
            min(end_frame,
                cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)))
    width = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)
    height = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)
    if verbose:
        print "end_frame: %d" % end_frame
        print "clip has %d frames" % int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    for _ in range(start_frame):  # frames start with 1 in annotation files
        cv.GrabFrame(capture)
    frames = np.zeros((end_frame - start_frame - 2, height, width),
                      dtype=np.uint8)
    for i in range(end_frame - start_frame -
                   2):  # end_frame = last action frame
        img = cv.QueryFrame(capture)
        if img is None:
            continue
        tmp = cv.CreateImage(cv.GetSize(img), 8, 1)
        cv.CvtColor(img, tmp, cv.CV_BGR2GRAY)
        frames[i, :] = np.asarray(cv.GetMat(tmp))
    return np.array(frames)
示例#28
0
    def __init__(self, camera_configuration, opencv_id):
        self.config = camera_configuration
        self.device = opencv_id
        self._set_registers()
        camera.camera.__init__(self, camera_configuration, opencv_id)

        #create capture and related attributes
        #self.capture = cv.CaptureFromCAM(self.device)
        #if not self.capture:
        #    raise RuntimeError("Cannot open camera!\n")
        cur_codec = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_MODE)
        print "dragonfly2: current codec interpretation is : ", cur_codec
        integ = cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_MODE,
                                      self.config['cv_cap_prop_mode'])

        #self.set_frame_rate(3.75) # set it really low to start out.
        # increase later.

        fps = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FPS)
        print "dragonfly2: fps : ", fps
        next_codec = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_MODE)
        print "dragonfly2: current codec interpretation is : ", next_codec
示例#29
0
def camera_capture():

    # /dev/video0
    c = cv.CaptureFromCAM(0)
    #assert type(c) ==  "cv.Capture"

    # or use QueryFrame. It's the same
    cv.GrabFrame(c)
    image = cv.RetrieveFrame(c)
    #image = cv.QueryFrame(c)
    assert image != None

    dst = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 3)
    #im = cv.CloneImage(image)
    laplace = cv.Laplace(image, dst)
    cv.SaveImage("my-camera.png", dst)

    print cv.GetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_HEIGHT)
示例#30
0
 def __init__(self, filename, size=None):
     '''
     The basic video class that is used to play back a movie file.
     @param filename: The full path name of the video file including extension. Also, with
     current versions of OpenCV, this can be a url to a network IP camera, but you will need
     to consult your IP camera manufacturer's documentation as url formats vary.
     @note: The following is an example of using the Video class with an IP camera.
     The rtsp url is for a linksys WVC54GCA IP camera. The ip address will need to be changed
     as appropriate for your local network. Other model cameras use different urls. It can take
     a few seconds for the feed to be established.
     cam_url = "rtsp://192.168.2.55/img/video.sav"  
     vid = Video(cam_url) 
     vid.play()
     '''
     self.filename = filename
     self.cv_capture = cv.CaptureFromFile(filename)
     self._numframes = cv.GetCaptureProperty(self.cv_capture,
                                             cv.CV_CAP_PROP_FRAME_COUNT)
     self.size = size
     self.current_frame = 0