def test_should_work_if_batch_size_not_in_config(self, get_val_mock): video_loader = OpenCVReader('dummy.avi') get_val_mock.return_value = None batches = list(video_loader.read()) expected = list(create_dummy_batches()) self.assertTrue(batches, expected) get_val_mock.assert_called_once_with("executor", "batch_size")
def test_should_return_batches_equivalent_to_number_of_frames_2(self): video_loader = OpenCVReader(file_url='dummy.avi', batch_size=-1) batches = list(video_loader.read()) expected = list(self.create_dummy_frames()) self.assertEqual(len(batches), NUM_FRAMES) actual = [batch.frames.to_dict('records')[0] for batch in batches] self.assertTrue(custom_list_of_dicts_equal(actual, expected))
def test_should_skip_first_two_frames_with_offset_two(self): video_loader = OpenCVReader(file_url='dummy.avi', offset=2) batches = list(video_loader.read()) expected = list( create_dummy_batches(filters=[i for i in range(2, NUM_FRAMES)])) self.assertTrue(batches, expected)
def test_should_start_frame_number_from_two(self): video_loader = OpenCVReader( file_url='dummy.avi', batch_size=NUM_FRAMES, start_frame_id=2) batches = list(video_loader.read()) expected = list(self.create_dummy_frames( filters=[i for i in range(0, NUM_FRAMES)], start_id=2)) self.assertEqual(1, len(batches)) actual = [batch.frames.to_dict('records')[0] for batch in batches] self.assertTrue(custom_list_of_dicts_equal(actual, expected))
def test_should_start_frame_number_from_two(self): video_loader = OpenCVReader(file_url='dummy.avi', batch_size=NUM_FRAMES, start_frame_id=2) batches = list(video_loader.read()) expected = list( create_dummy_batches(filters=[i for i in range(0, NUM_FRAMES)], start_id=2)) self.assertTrue(batches, expected)
def test_should_skip_first_two_frames_and_batch_size_equal_to_no_of_frames( self): video_loader = OpenCVReader( file_url='dummy.avi', batch_size=NUM_FRAMES, offset=2) batches = list(video_loader.read()) expected = list(self.create_dummy_frames( filters=[i for i in range(2, NUM_FRAMES)])) self.assertEqual(1, len(batches)) actual = [batch.frames.to_dict('records')[0] for batch in batches] self.assertTrue(custom_list_of_dicts_equal(actual, expected))
def test_should_load_and_select_real_video_in_table(self): query = """LOAD DATA INFILE 'data/ua_detrac/ua_detrac.mp4' INTO MyVideo;""" perform_query(query) select_query = "SELECT id,data FROM MyVideo;" actual_batch = perform_query(select_query) video_reader = OpenCVReader('data/ua_detrac/ua_detrac/mp4') expected_batch = Batch(frames=pd.DataFrame()) for batch in video_reader.read(): expected_batch += batch self.assertTrue(actual_batch, expected_batch)
def exec(self): """ Read the input video using opencv and persist data using storage engine """ # videos are persisted using (id, data) schema where id = frame_id # and data = frame_data. Current logic supports loading a video into # storage with the assumption that frame_id starts from 0. In case # we want to append to the existing store we have to figure out the # correct frame_id. It can also be a parameter based by the user. # We currently use create to empty exsiting table. StorageEngine.create(self.node.table_metainfo) video_reader = OpenCVReader(self.node.file_path) for batch in video_reader.read(): StorageEngine.write(self.node.table_metainfo, batch)
def exec(self): """ Read the input video using opencv and persist data using storage engine """ # Fetch batch_size from Config batch_size = ConfigurationManager().get_value("executor", "batch_size") if batch_size is None: batch_size = 50 # videos are persisted using (id, data) schema where id = frame_id # and data = frame_data. Current logic supports loading a video into # storage with the assumption that frame_id starts from 0. In case # we want to append to the existing store we have to figure out the # correct frame_id. It can also be a parameter based by the user. video_reader = OpenCVReader(self.node.file_path, batch_size=batch_size) for batch in video_reader.read(): # Hook for the storage engine append_rows(self.node.table_metainfo, batch)
class DiskStorageExecutor(AbstractStorageExecutor): """ This is a simple disk based executor. It assumes that frames are directly being read from the disk(video file). Note: For a full fledged deployment this might be replaced with a Transaction manager which keeps track of the frames. """ def __init__(self, node: StoragePlan): super().__init__(node) self.storage = OpenCVReader(node.video, batch_size=node.batch_size, offset=node.offset) def validate(self): pass def exec(self) -> Iterator[Batch]: for batch in self.storage.read(): yield batch
def test_should_return_one_batches_for_negative_size(self): video_loader = OpenCVReader(file_url='dummy.avi', batch_size=-1) batches = list(video_loader.read()) expected = list(create_dummy_batches()) self.assertTrue(batches, expected)
def test_should_return_batches_equivalent_to_number_of_frames(self): video_loader = OpenCVReader(file_url='dummy.avi', batch_size=1) batches = list(video_loader.read()) expected = list(create_dummy_batches(batch_size=1)) self.assertTrue(batches, expected)