def test_generator_processor(tmpdir, bees_image, pipeline_config): def image_generator(): ts = time.time() data_source = DataSource.new_message(filename='bees.jpeg') for i in range(2): img = imread(bees_image) yield data_source, img, ts + i repo = Repository(str(tmpdir)) pipeline = Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) gen_processor = GeneratorProcessor( pipeline, lambda: BBBinaryRepoSink(repo, camId=2)) gen_processor(image_generator()) gen_processor(image_generator()) fnames = list(repo.iter_fnames()) assert len(fnames) == 2 last_ts = 0 for fname in repo.iter_fnames(): print("{}: {}".format(fname, os.path.getsize(fname))) with open(fname, 'rb') as f: fc = FrameContainer.read(f) assert fc.dataSources[0].filename == 'bees.jpeg' assert last_ts < fc.fromTimestamp last_ts = fc.fromTimestamp
def test_generator_processor_threads(tmpdir, bees_video, filelists_path, pipeline_config): repo = Repository(str(tmpdir)) pipelines = [ Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) for _ in range(3) ] gen_processor = GeneratorProcessor(pipelines, lambda: BBBinaryRepoSink(repo, camId=0)) gen = video_generator(bees_video, ts_format="2015", path_filelists=filelists_path) gen_processor(gen) fnames = list(repo.iter_fnames()) assert len(fnames) == 1 num_frames = 0 for fname in repo.iter_fnames(): with open(fname, "rb") as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert num_frames == 3
def test_generator_processor_video(tmpdir, bees_video, filelists_path, pipeline_config): repo = Repository(str(tmpdir)) pipeline = Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) gen_processor = GeneratorProcessor(pipeline, lambda: BBBinaryRepoSink(repo, camId=0)) gen = video_generator(bees_video, ts_format="2015", path_filelists=filelists_path) gen_processor(gen) fnames = list(repo.iter_fnames()) assert len(fnames) == 1 last_ts = 0 num_frames = 0 for fname in repo.iter_fnames(): print("{}: {}".format(fname, os.path.getsize(fname))) with open(fname, "rb") as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert fc.dataSources[0].filename == os.path.basename(bees_video) assert last_ts < fc.fromTimestamp last_ts = fc.fromTimestamp assert num_frames == 3
def get_frame_to_fc_path_dict(frame_fc_map: pd.DataFrame) -> Dict: fc_files = {} for unique_fc in np.unique(frame_fc_map.fc_path.values): with open(unique_fc, 'rb') as f: # TODO replace bb_binary fc_files[unique_fc] = FrameContainer.read( f, traversal_limit_in_words=2**63) frame_to_fc_map = {} for fc_path, df in frame_fc_map.groupby("fc_path"): for frame in df.frame_id.values: frame_to_fc_map[frame] = fc_files[fc_path] return frame_to_fc_map
def check_repo(path, bees_video): repo = Repository(path) last_ts = 0 num_frames = 0 for fname in repo.iter_fnames(): print("{}: {}".format(fname, os.path.getsize(fname))) with open(fname, 'rb') as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert fc.dataSources[0].filename == os.path.basename(bees_video) assert last_ts < fc.fromTimestamp last_ts = fc.fromTimestamp assert (num_frames == 3)
def check_repo(path, bees_video): repo = Repository(path) last_ts = 0 num_frames = 0 for fname in repo.iter_fnames(): print("{}: {}".format(fname, os.path.getsize(fname))) with open(fname, 'rb') as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert fc.dataSources[0].filename == os.path.basename(bees_video) assert last_ts < fc.fromTimestamp last_ts = fc.fromTimestamp assert(num_frames == 3)
def _get_container(self): TIMESTAMP_IDX = 2 self.frames.sort(key=lambda x: x[TIMESTAMP_IDX]) start_ts = self.frames[0][TIMESTAMP_IDX] end_ts = self.frames[-1][TIMESTAMP_IDX] fc = FrameContainer.new_message( fromTimestamp=start_ts, toTimestamp=end_ts, camId=self.camId, id=unique_id() ) dataSources = fc.init("dataSources", len(self.data_sources)) for i, dsource in enumerate(self.data_sources): dataSources[i] = dsource dataSources[i].idx = int(i) frames = fc.init("frames", len(self.frames)) for i, (data_source_idx, detection, timestamp) in enumerate(self.frames): frame = frames[i] frame.id = unique_id() frame.dataSourceIdx = data_source_idx frame.frameIdx = int(i) frame.timestamp = timestamp detections_builder = frame.init( "detectionsDP", len(detection.tag_positions) ) for j, db in enumerate(detections_builder): db.idx = j db.xpos = int(detection.tag_positions[j, 1]) db.ypos = int(detection.tag_positions[j, 0]) db.zRotation = float(detection.orientations[j, 0]) db.yRotation = float(detection.orientations[j, 1]) db.xRotation = float(detection.orientations[j, 2]) db.localizerSaliency = float(detection.tag_saliencies[j]) decodedId = db.init("decodedId", len(detection.ids[j])) for k, bit in enumerate(detection.ids[j]): decodedId[k] = int(round(255 * bit)) detections_builder = frame.init( "detectionsBees", len(detection.bee_positions) ) for j, db in enumerate(detections_builder): db.idx = j db.xpos = int(detection.bee_positions[j, 1]) db.ypos = int(detection.bee_positions[j, 0]) db.localizerSaliency = float(detection.bee_saliencies[j]) db.type = self.label_map[detection.bee_types[j]] return fc
def _get_container(self): TIMESTAMP_IDX = 2 self.frames.sort(key=lambda x: x[TIMESTAMP_IDX]) start_ts = self.frames[0][TIMESTAMP_IDX] end_ts = self.frames[-1][TIMESTAMP_IDX] fc = FrameContainer.new_message(fromTimestamp=start_ts, toTimestamp=end_ts, camId=self.camId, id=unique_id()) dataSources = fc.init('dataSources', len(self.data_sources)) for i, dsource in enumerate(self.data_sources): dataSources[i] = dsource dataSources[i].idx = int(i) frames = fc.init('frames', len(self.frames)) for i, (data_source_idx, detection, timestamp) in enumerate(self.frames): frame = frames[i] frame.id = unique_id() frame.dataSourceIdx = data_source_idx frame.frameIdx = int(i) frame.timestamp = timestamp detections_builder = frame.detectionsUnion.init( 'detectionsDP', len(detection.positions)) for j, db in enumerate(detections_builder): db.idx = j db.xpos = int(detection.positions[j, 1]) db.ypos = int(detection.positions[j, 0]) db.xposHive = int(detection.hive_positions[j, 1]) db.yposHive = int(detection.hive_positions[j, 0]) db.zRotation = float(detection.orientations[j, 0]) db.yRotation = float(detection.orientations[j, 1]) db.xRotation = float(detection.orientations[j, 2]) db.localizerSaliency = float(detection.saliencies[j, 0]) db.radius = float(detection.radii[j]) descriptor = db.init('descriptor', len(detection.descriptors[j])) for k, part in enumerate(detection.descriptors[j]): descriptor[k] = int(part) decodedId = db.init('decodedId', len(detection.ids[j])) for k, bit in enumerate(detection.ids[j]): decodedId[k] = int(round(255 * bit)) return fc
def test_generator_processor_threads(tmpdir, bees_video, filelists_path, pipeline_config): repo = Repository(str(tmpdir)) pipelines = [Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) for _ in range(3)] gen_processor = GeneratorProcessor( pipelines, lambda: BBBinaryRepoSink(repo, camId=0)) gen = video_generator(bees_video, ts_format='2015', path_filelists=filelists_path) gen_processor(gen) fnames = list(repo.iter_fnames()) assert len(fnames) == 1 num_frames = 0 for fname in repo.iter_fnames(): with open(fname, 'rb') as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert(num_frames == 3)
def test_no_detection(tmpdir, pipeline_config): repo = Repository(str(tmpdir)) sink = BBBinaryRepoSink(repo, camId=0) pipeline = Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) image = np.zeros((3000, 4000), dtype=np.uint8) results = pipeline([image, 0]) data_source = DataSource.new_message(filename='source') sink.add_frame(data_source, results, 0) sink.finish() assert(len(list(repo.iter_fnames())) == 1) for fname in repo.iter_fnames(): with open(fname, 'rb') as f: fc = FrameContainer.read(f) assert(len(fc.frames) == 1) assert fc.dataSources[0].filename == 'source' frame = fc.frames[0] assert(len(frame.detectionsUnion.detectionsDP) == 0)
def test_generator_processor_video(tmpdir, bees_video, filelists_path, pipeline_config): repo = Repository(str(tmpdir)) pipeline = Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config) gen_processor = GeneratorProcessor( pipeline, lambda: BBBinaryRepoSink(repo, camId=0)) gen = video_generator(bees_video, ts_format='2015', path_filelists=filelists_path) gen_processor(gen) fnames = list(repo.iter_fnames()) assert len(fnames) == 1 last_ts = 0 num_frames = 0 for fname in repo.iter_fnames(): print("{}: {}".format(fname, os.path.getsize(fname))) with open(fname, 'rb') as f: fc = FrameContainer.read(f) num_frames += len(list(fc.frames)) assert fc.dataSources[0].filename == os.path.basename(bees_video) assert last_ts < fc.fromTimestamp last_ts = fc.fromTimestamp assert(num_frames == 3)