예제 #1
0
    def open(self):
        remote_params_url = BASE_URL_RETINAFACE + 'R50-0000.params'
        remote_config_url = BASE_URL_RETINAFACE + 'R50-symbol.json'

        path_to_params_file = get_file('R50-0000.params', remote_params_url)
        path_to_config_file = get_file('R50-symbol.json', remote_config_url)
        prefix_idx = path_to_params_file.find('-0000.params')
        prefix = path_to_params_file[0:prefix_idx]

        if self.device_type == CPU:
            ctx_id = -1
        elif self.device_type == GPU:
            ctx_id = 0
        self._detector = RetinaFace(prefix, 0, ctx_id, 'net3')
예제 #2
0
    def open(self):
        '''
        Creates session with tensorflow model
        '''
        if self.device_type == CPU:
            device_id = 'cpu'
        elif self.device_type == GPU:
            device_id = 'gpu'
        else:
            device_id = 'cpu'

        if self._path_to_pb_file is None:
            remote_url = 'https://github.com/videoflow/videoflow-contrib/releases/download/models/humanencoder_mars_128.pb'
            self._path_to_pb_file = get_file('human_encoder.pb', remote_url)

        with tf.device(device_id):
            self._model_graph = tf.Graph()
            with self._model_graph.as_default():
                graph_def = tf.GraphDef()
                with tf.gfile.GFile(self._path_to_pb_file, 'rb') as fid:
                    serialized_graph = fid.read()
                    graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(graph_def, name='')

        self._session = tf.Session(graph=self._model_graph)
        self._input_var = self._model_graph.get_tensor_by_name('images:0')
        self._output_var = self._model_graph.get_tensor_by_name('features:0')
        self._feature_dim = self._output_var.get_shape().as_list()[-1]
        self._image_shape = self._input_var.get_shape().as_list()[1:]
예제 #3
0
def main():
    input_file = get_file(VIDEO_NAME, URL_VIDEO)
    output_file = "output.avi"
    reader = VideofileReader(input_file)
    frame = FrameIndexSplitter()(reader)
    detector = TensorflowObjectDetector()(frame)
    annotator = BoundingBoxAnnotator()(frame, detector)
    writer = VideofileWriter(output_file, fps=30)(annotator)
    fl = flow.Flow([reader], [writer], flow_type=BATCH)
    fl.run()
    fl.join()
예제 #4
0
    def open(self):
        #1. Load detection model
        detection_model_path = get_file('detection.pkl', URL_DETECTION_MODEL)
        obj_detect = FRCNN_FPN(num_classes=2)
        obj_detect.load_state_dict(
            torch.load(detection_model_path,
                       map_location=lambda storage, loc: storage))
        obj_detect.eval()
        obj_detect.cuda()

        #2. Load re-identification model
        reid_model_path = get_file('reid.pkl', URL_REID_MODEL)
        reid_network = resnet50(pretrained=False, **{'output_dim': 128})
        reid_network.load_state_dict(
            torch.load(reid_model_path,
                       map_location=lambda storage, loc: storage))
        reid_network.eval()
        reid_network.cuda()

        #3. Creater tracker
        self._tracker = Tracker(obj_detect,
                                reid_network,
                                detection_person_thresh=0.5,
                                regression_person_thresh=0.5,
                                detection_nms_thresh=0.3,
                                regression_nms_thresh=0.6,
                                public_detections=False,
                                inactive_patience=10,
                                do_reid=True,
                                max_features_num=10,
                                reid_sim_threshold=2.0,
                                reid_iou_threshold=0.2,
                                motion_model_cfg={
                                    'enabled': False,
                                    'n_steps': 1,
                                    'center_only': True
                                },
                                warp_mode='cv2.MOTION_EUCLIDEAN',
                                number_of_iterations=100,
                                termination_eps=0.00001,
                                do_align=False)
def main():
    output_file = sys.argv[1]
    input_file = get_file(VIDEO_NAME, URL_VIDEO)
    reader = VideofileReader(input_file)
    frame = FrameIndexSplitter()(reader)
    tracks = TracktorFromFrames()(frame)
    tracks_to_annotator = TracksToAnnotator()(tracks)
    annotator = TrackerAnnotator()(frame, tracks_to_annotator)
    writer = VideofileWriter(output_file, fps=30)(annotator)
    fl = flow.Flow([reader], [writer], flow_type=BATCH)
    fl.run()
    fl.join()
예제 #6
0
def main():
    input_file = get_file(VIDEO_NAME, URL_VIDEO)
    output_file = "output.avi"
    
    reader = VideofileReader(input_file)
    frame = FrameIndexSplitter()(reader)
    detector = TensorflowObjectDetector(num_classes = 2, architecture = 'fasterrcnn-resnet101', dataset = 'kitti')(frame)
    # keeps only automobile classes: autos, buses, cycles, etc.
    tracker = KalmanFilterBoundingBoxTracker()(detector)
    annotator = TrackerAnnotator()(frame, tracker)
    writer = VideofileWriter(output_file, fps = 30)(annotator)
    fl = flow.Flow([reader], [writer], flow_type = BATCH)
    fl.run()
    fl.join()
예제 #7
0
 def open(self):
     cfg = get_cfg()
     if self.device_type == CPU:
         cfg.MODEL.DEVICE = 'cpu'
     elif self.device_type == GPU:
         cfg.MODEL.DEVICE = 'cuda'
     if self._path_to_model_file is None:
         remote_url = BASE_URL_DETECTRON2 + self._remote_model_file_name
         self._path_to_model_file = get_file('detectron2_model.pkl', remote_url)
         current_folder = os.path.abspath(os.path.dirname(__file__))
         self._path_to_model_config = os.path.join(current_folder, 'configs', f'{self._architecture}.yaml')
     cfg.merge_from_file(self._path_to_model_config)
     cfg.MODEL.WEIGHTS = self._path_to_model_file
     
     self._predictor = DefaultPredictor(cfg)
예제 #8
0
def main():
    input_file = get_file(VIDEO_NAME, URL_VIDEO)
    output_file = "output.avi"

    reader = VideofileReader(input_file)
    frame = FrameIndexSplitter()(reader)
    detector = TensorflowObjectDetector()(frame)
    # keeps only automobile classes: autos, buses, cycles, etc.
    filter_ = BoundingBoxesFilter([1, 2, 3, 4, 6, 8, 10, 13])(detector)
    tracker = KalmanFilterBoundingBoxTracker()(filter_)
    annotator = TrackerAnnotator()(frame, tracker)
    writer = VideofileWriter(output_file, fps=30)(annotator)
    fl = flow.Flow([reader], [writer], flow_type=BATCH)
    fl.run()
    fl.join()
예제 #9
0
def track_humans():
    input_file_path = get_file(VIDEO_NAME, URL_VIDEO)
    output_file = 'annotated_video.avi'
    reader = VideofileReader(input_file_path)
    frame = FrameIndexSplitter()(reader)
    results = Detectron2HumanPose(architecture='R50_FPN_3x',
                                  device_type='cpu')(frame)
    keypoints = KeypointsExtractor()(results)
    bounding_boxes = BoundingBoxesExtractor()(results)
    anotated_keypoints = HumanPoseAnnotator()(frame, keypoints)
    cropped_humans = CropBoundingBoxes()(frame, bounding_boxes)
    human_features = HumanEncoder()(cropped_humans)
    tracker_input = AppendFeaturesToBoundingBoxes()(bounding_boxes,
                                                    human_features)
    tracks = DeepSort()(tracker_input)
    tracks_anotator_input = ConvertTracksForAnotation()(tracks)
    anotated_tracks = TrackerAnnotator()(anotated_keypoints,
                                         tracks_anotator_input)
    writer = VideofileWriter(output_file)(anotated_tracks)
    fl = flow.Flow([reader], [writer], flow_type=BATCH)
    fl.run()
    fl.join()
예제 #10
0
    def open(self):
        '''
        Creates session with tensorflow model
        '''
        if self.device_type == CPU:
            device_id = 'cpu'
        elif self.device_type == GPU:
            device_id = 'gpu'
        else:
            device_id = 'cpu'

        if self._path_to_pb_file is None:
            remote_url = BASE_URL_SEGMENTATION + self._remote_model_file_name
            self._path_to_pb_file = get_file(self._remote_model_file_name,
                                             remote_url)

        with tf.device(device_id):
            self._model_graph = tf.Graph()
            with self._model_graph.as_default():
                graph_def = tf.GraphDef()
                with tf.gfile.GFile(self._path_to_pb_file, 'rb') as fid:
                    serialized_graph = fid.read()
                    graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(graph_def, name='')
        self._session = tf.Session(graph=self._model_graph)
        self._detection_boxes = self._model_graph.get_tensor_by_name(
            'detection_boxes:0')
        self._detection_masks = self._model_graph.get_tensor_by_name(
            'detection_masks:0')
        self._num_detections = self._model_graph.get_tensor_by_name(
            'num_detections:0')
        self._detection_scores = self._model_graph.get_tensor_by_name(
            'detection_scores:0')
        self._detection_classes = self._model_graph.get_tensor_by_name(
            'detection_classes:0')
        self._image_tensor = self._model_graph.get_tensor_by_name(
            'image_tensor:0')
예제 #11
0
def test_segmenter_resources():
    for modelid in TensorflowSegmenter.supported_models:
        filename = f'{modelid}.pb'
        url_path = BASE_URL_SEGMENTATION + filename
        get_file(filename, url_path)
예제 #12
0
def test_detector_resources():
    for modelid in TensorflowObjectDetector.supported_models:
        filename = f'{modelid}.pb'
        url_path = BASE_URL_DETECTION + filename
        get_file(filename, url_path)
예제 #13
0
def test_bboxannotator_resources():
    for datasetid in BoundingBoxAnnotator.supported_datasets:
        filename = f'labels_{datasetid}.pbtxt'
        url_path = BASE_URL_DETECTION + filename
        get_file(filename, url_path)
예제 #14
0
    def process(self, data):
        index, frame = data
        return frame


class KeypointsExtractor(videoflow.core.node.ProcessorNode):
    def __init__(self):
        super(KeypointsExtractor, self).__init__()

    def process(self, data):
        keypoints, bounding_boxes = data
        return keypoints


def annotate_video(video_filepath):
    reader = VideofileReader(video_filepath)
    frame = FrameIndexSplitter()(reader)
    results = Detectron2HumanPose(architecture="R50_FPN_3x",
                                  device_type="cpu")(frame)
    keypoints = KeypointsExtractor()(results)
    annotated_frame = HumanPoseAnnotator()(frame, keypoints)
    writer = VideofileWriter("pose.avi")(annotated_frame)
    fl = flow.Flow([reader], [writer], flow_type=BATCH)
    fl.run()
    fl.join()


if __name__ == '__main__':
    video_filepath = get_file(VIDEO_NAME, URL_VIDEO)
    annotate_video(video_filepath)