Esempio n. 1
0
    def __data_generation(self, seVideo: pd.Series) -> (np.array(float), int):
        "Returns frames for 1 video, including normalizing & preprocessing"

        # Get the frames from disc
        ar_nFrames = files2frames(seVideo.sFrameDir)
        #print(ar_nFrames.shape)
        #scipy.misc.imsave('outfile.jpg', ar_nFrames[0])
        #scipy.misc.imsave('outfile_flip.jpg', np.fliplr(ar_nFrames[0]))
        ar_nFrames_flip = np.array(
            [np.fliplr(ar_nFrames[i]) for i in range(ar_nFrames.shape[0])])

        # only use the first nChannels (typically 3, but maybe 2 for optical flow)
        ar_nFrames = ar_nFrames[..., 0:self.nChannels]
        ar_nFrames_flip = ar_nFrames_flip[..., 0:self.nChannels]

        ar_nFrames = images_normalize(ar_nFrames,
                                      self.nFrames,
                                      self.nHeight,
                                      self.nWidth,
                                      bRescale=True)
        ar_nFrames_flip = images_normalize(ar_nFrames_flip,
                                           self.nFrames,
                                           self.nHeight,
                                           self.nWidth,
                                           bRescale=True)
        self.prv_frame = ar_nFrames
        self.prv_frame_flip = ar_nFrames_flip

        return ar_nFrames, seVideo.nLabel, ar_nFrames_flip, seVideo.nLabel
Esempio n. 2
0
    def __data_generation(self, seVideo:pd.Series) -> (np.array(float), int):
        "Returns frames for 1 video, including normalizing & preprocessing"
       
        # Get the frames from disc
        ar_nFrames = files2frames(seVideo.sFrameDir)

        # only use the first nChannels (typically 3, but maybe 2 for optical flow)
        ar_nFrames = ar_nFrames[..., 0:self.nChannels]
        
        ar_fFrames = images_normalize(ar_nFrames, self.nFrames, self.nHeight, self.nWidth, bRescale = True)
        
        return ar_fFrames, seVideo.nLabel
Esempio n. 3
0
def video_capture(oStream, sColor, sText, tuRectangle = (224, 224), nTimeDuration = 3, bOpticalFlow = False) -> \
 (float, np.array, np.array):

    if bOpticalFlow:
        oOpticalFlow = OpticalFlow(bThirdChannel=False)

    liFrames = []
    liFlows = []
    fTimeStart = time.time()

    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video file stream
        (bGrabbed, arFrame) = oStream.read()
        arFrame = rescale_frame(arFrame, 320, 240)
        arFrame = cv2.flip(arFrame, 1)
        liFrames.append(arFrame)

        fTimeElapsed = time.time() - fTimeStart
        s = sText + str(int(fTimeElapsed) + 1) + " sec"

        # paint rectangle & text, show the frame
        arFrameText = rectangle_text(arFrame, sColor, s, "", tuRectangle)
        cv2.imshow("Video", arFrameText)

        # display optical flow
        if bOpticalFlow:
            arFlow = oOpticalFlow.next(image_crop(arFrame, *tuRectangle))
            liFlows.append(arFlow)
            h, w, _ = arFlow.shape
            arZeros = np.zeros((h, w, 1), dtype=np.float32)
            arFlow = np.concatenate((arFlow, arZeros), axis=2)
            cv2.imshow("Optical flow", flow2colorimage(arFlow))

        # stop after nTimeDuration sec
        if fTimeElapsed >= nTimeDuration: break

        # Press 'q' for early exit
        key = cv2.waitKey(1)  #& 0xFF
        if key == ord('q'): break
        #cv2.waitKey(1)

        if key == 114 or key == 100:
            break

    return fTimeElapsed, images_normalize(np.array(liFrames), 40, 224, 224,
                                          True), np.array(liFlows), key