예제 #1
0
    def load_video(self, searchDir):

        print("load")

        self.path = searchDir
        (self.videoMetaList, self.labelList,
         self.labelMap) = self.findDataNames(self.path)

        videoMetaIndex = 0
        while videoMetaIndex < len(self.videoMetaList):

            # Get a single batch
            frames = []
            labels = np.zeros((0, 51))
            while len(frames) < self.batchSize:

                # Load a single video
                meta = self.videoMetaList[videoMetaIndex]
                videoFrames, info = self.loadVideo(meta)
                videoLabels = np.zeros((len(videoFrames), 51))
                videoLabels[:, self.labelList[videoMetaIndex]] = 1
                videoMetaIndex += 1

                # Skip unsupported frame types
                if info != FrameInfo(240, 320, 3, ColorSpace.RGB):
                    continue

                # Append onto frames and labels
                frames += videoFrames
                labels = np.append(labels, videoLabels, axis=0)

            yield FrameBatch(frames, info), labels
예제 #2
0
    def load(self):
        video = cv2.VideoCapture(self.video_metadata.file)
        video_start = self.offset if self.offset else 0
        video.set(cv2.CAP_PROP_POS_FRAMES, video_start)

        LoggingManager().log("Loading frames", LoggingLevel.CRITICAL)

        _, frame = video.read()
        frame_ind = video_start - 1

        info = None
        if frame is not None:
            (height, width, num_channels) = frame.shape
            info = FrameInfo(height, width, num_channels, ColorSpace.BGR)

        frames = []
        while frame is not None:
            frame_ind += 1
            eva_frame = Frame(frame_ind, frame, info)
            if self.skip_frames > 0 and frame_ind % self.skip_frames != 0:
                _, frame = video.read()
                continue

            frames.append(eva_frame)
            if self.limit and frame_ind >= self.limit:
                return FrameBatch(frames, info)

            if len(frames) % self.batch_size == 0:
                yield FrameBatch(frames, info)
                frames = []

            _, frame = video.read()

        if frames:
            return FrameBatch(frames, info)
예제 #3
0
 def create_dummy_frames(self, num_frames=NUM_FRAMES, filters=[]):
     if not filters:
         filters = range(num_frames)
     for i in filters:
         yield Frame(
             i,
             np.array(np.ones((2, 2, 3)) * 0.1 * float(i + 1) * 255,
                      dtype=np.uint8), FrameInfo(2, 2, 3, ColorSpace.BGR))
예제 #4
0
    def _load_frames(self) -> Iterator[Frame]:
        info = None
        with make_reader(self.video_metadata.file_url,
                         shard_count=self.total_shards,
                         cur_shard=self.curr_shard) \
                as reader:
            for frame_ind, row in enumerate(reader):
                if info is None:
                    (height, width, num_channels) = row.frame_data.shape
                    info = FrameInfo(height, width, num_channels,
                                     ColorSpace.BGR)

                yield Frame(row.frame_id, row.frame_data, info)
예제 #5
0
    def test_load_frame_load_frames_using_petastorm(self, mock):
        mock.return_value = self.DummyReader(
            map(lambda i: self.DummyRow(i,
                                        np.ones((2, 2, 3)) * i), range(3)))

        video_info = DataFrameMetadata("dataset_1", 'dummy.avi')

        video_loader = PetastormLoader(video_info,
                                       curr_shard=3,
                                       total_shards=3)
        actual = list(video_loader._load_frames())
        expected = [
            Frame(i,
                  np.ones((2, 2, 3)) * i, FrameInfo(2, 2, 3, ColorSpace.BGR))
            for i in range(3)
        ]

        self.assertEqual(expected, actual)
예제 #6
0
파일: video_loader.py 프로젝트: swati21/eva
    def _load_frames(self) -> Iterator[Frame]:
        video = cv2.VideoCapture(self.video_metadata.file_url)
        video_start = self.offset if self.offset else 0
        video.set(cv2.CAP_PROP_POS_FRAMES, video_start)

        LoggingManager().log("Loading frames", LoggingLevel.INFO)

        _, frame = video.read()
        frame_ind = video_start - 1

        info = None
        if frame is not None:
            (height, width, num_channels) = frame.shape
            info = FrameInfo(height, width, num_channels, ColorSpace.BGR)

        while frame is not None:
            frame_ind += 1
            yield Frame(frame_ind, frame, info)
            _, frame = video.read()
예제 #7
0
    def loadVideo(self, meta):
        video = cv2.VideoCapture(meta.file)
        video.set(cv2.CAP_PROP_POS_FRAMES, 0)

        _, frame = video.read()
        frame_ind = 0

        info = None
        if frame is not None:
            (height, width, channels) = frame.shape
            info = FrameInfo(height, width, channels, ColorSpace.RGB)

        frames = []
        while frame is not None:
            # Save frame
            eva_frame = Frame(frame_ind, frame, info)
            frames.append(eva_frame)

            # Read next frame
            _, frame = video.read()
            frame_ind += 1

        return (frames, info)
예제 #8
0
 def test_frame_info_equality(self):
     info1 = FrameInfo(250, 250, 3, ColorSpace.GRAY)
     info2 = FrameInfo(250, 250, color_space=ColorSpace.GRAY)
     self.assertEqual(info1, info2)
예제 #9
0
 def input_format(self) -> FrameInfo:
     return FrameInfo(-1, -1, 3, ColorSpace.RGB)
예제 #10
0
    def test_load_images(self):

        frame_info = FrameInfo(28, 28, 1, ColorSpace.GRAY)
        f = FrameLoader("mnist", frame_info)

        f.load_images()
예제 #11
0
    def test_frameinfo_information(self):

        frame_info = FrameInfo(2, 2, 3, ColorSpace.BGR)
        f = FrameLoader("appname", frame_info)

        self.assertEqual(f.H, 2)