Esempio n. 1
0
 def prepare_model(self,input_type):
     """
     prepares Object_Detection model
     input_type: must be 'image', 'video', or 'ros'
     """
     assert input_type in ['image','video','ros'], "only 'image','video' and 'ros' input possible"
     super(ObjectDetectionModel, self).prepare_model()
     self.input_type = input_type
     # Tracker
     if self.config.USE_TRACKER:
         self.prepare_tracker()
     print("> Building Graph")
     with self.detection_graph.as_default():
         with tf.Session(graph=self.detection_graph,config=self._tf_config) as self._sess:
             # Prepare Input Stream
             self.prepare_input_stream()
             # Define Input and Ouput tensors
             self._tensor_dict = self.get_tensor_dict(['num_detections', 'detection_boxes',
                                                     'detection_scores','detection_classes', 'detection_masks'])
             self._image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
             # Mask Transformations
             if 'detection_masks' in self._tensor_dict:
                 # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                 detection_boxes = tf.squeeze(self._tensor_dict['detection_boxes'], [0])
                 detection_masks = tf.squeeze(self._tensor_dict['detection_masks'], [0])
                 real_num_detection = tf.cast(self._tensor_dict['num_detections'][0], tf.int32)
                 detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                 detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                 detection_masks_reframed = reframe_box_masks_to_image_masks(detection_masks, detection_boxes,self.stream_height,self.stream_width)
                 detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                 self._tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
             if self.config.SPLIT_MODEL:
                 self._score_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[1]))
                 self._score_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[1]))
                 # Threading
                 self._gpu_worker = SessionWorker("GPU",self.detection_graph,self._tf_config)
                 self._cpu_worker = SessionWorker("CPU",self.detection_graph,self._tf_config)
                 self._gpu_opts = [self._score_out,self._expand_out]
                 self._cpu_opts = [self._tensor_dict['detection_boxes'],
                                 self._tensor_dict['detection_scores'],
                                 self._tensor_dict['detection_classes'],
                                 self._tensor_dict['num_detections']]
         return self
Esempio n. 2
0
class ObjectDetectionModel(Model):
    """
    object_detection model class
    """
    def __init__(self,config):
        super(ObjectDetectionModel, self).__init__(config)

    def prepare_input_stream(self):
        """
        prepares Input Stream
        stream types: 'video','image','ros'
        gets called by prepare model
        """
        if self.input_type is 'video':
            self._is_videoD = True
            self._input_stream = VideoStream(self.config.VIDEO_INPUT,self.config.WIDTH,
                                                    self.config.HEIGHT).start()
            self.stream_height = self._input_stream.real_height
            self.stream_width = self._input_stream.real_width
        elif self.input_type is 'image':
            self._is_imageD = True
            self._input_stream = ImageStream(self.config.IMAGE_PATH,self.config.LIMIT_IMAGES,
                                            (self.config.WIDTH,self.config.HEIGHT)).start()
            self.stream_height = self.config.HEIGHT
            self.stream_width = self.config.WIDTH
        elif self.input_type is 'ros':
            self.prepare_ros('detection_node')
        # Timeliner for image detection
        if self.config.WRITE_TIMELINE:
            self.prepare_timeliner()


    def prepare_model(self,input_type):
        """
        prepares Object_Detection model
        input_type: must be 'image', 'video', or 'ros'
        """
        assert input_type in ['image','video','ros'], "only 'image','video' and 'ros' input possible"
        super(ObjectDetectionModel, self).prepare_model()
        self.input_type = input_type
        # Tracker
        if self.config.USE_TRACKER:
            self.prepare_tracker()
        print("> Building Graph")
        with self.detection_graph.as_default():
            with tf.Session(graph=self.detection_graph,config=self._tf_config) as self._sess:
                # Prepare Input Stream
                self.prepare_input_stream()
                # Define Input and Ouput tensors
                self._tensor_dict = self.get_tensor_dict(['num_detections', 'detection_boxes',
                                                        'detection_scores','detection_classes', 'detection_masks'])
                self._image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
                # Mask Transformations
                if 'detection_masks' in self._tensor_dict:
                    # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                    detection_boxes = tf.squeeze(self._tensor_dict['detection_boxes'], [0])
                    detection_masks = tf.squeeze(self._tensor_dict['detection_masks'], [0])
                    real_num_detection = tf.cast(self._tensor_dict['num_detections'][0], tf.int32)
                    detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                    detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                    detection_masks_reframed = reframe_box_masks_to_image_masks(detection_masks, detection_boxes,self.stream_height,self.stream_width)
                    detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                    self._tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
                if self.config.SPLIT_MODEL:
                    self._score_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_out = self.detection_graph.get_tensor_by_name('{}:0'.format(self.config.SPLIT_NODES[1]))
                    self._score_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_in = self.detection_graph.get_tensor_by_name('{}_1:0'.format(self.config.SPLIT_NODES[1]))
                    # Threading
                    self._gpu_worker = SessionWorker("GPU",self.detection_graph,self._tf_config)
                    self._cpu_worker = SessionWorker("CPU",self.detection_graph,self._tf_config)
                    self._gpu_opts = [self._score_out,self._expand_out]
                    self._cpu_opts = [self._tensor_dict['detection_boxes'],
                                    self._tensor_dict['detection_scores'],
                                    self._tensor_dict['detection_classes'],
                                    self._tensor_dict['num_detections']]
            return self

    def run_default_sess(self):
        """
        runs default session
        """
        # default session)
        self.frame = self._input_stream.read()
        output_dict = self._sess.run(self._tensor_dict,
                                    feed_dict={self._image_tensor:
                                    self._visualizer.expand_and_convertRGB_image(self.frame)},
                                    options=self._run_options, run_metadata=self._run_metadata)
        self.num = output_dict['num_detections'][0]
        self.classes = output_dict['detection_classes'][0]
        self.boxes = output_dict['detection_boxes'][0]
        self.scores = output_dict['detection_scores'][0]
        if 'detection_masks' in output_dict:
            self.masks = output_dict['detection_masks'][0]

    def run_thread_sess(self):
        """
        runs seperate gpu and cpu session threads
        """
        if self._gpu_worker.is_sess_empty():
            # put new queue
            self.frame = self._input_stream.read()
            gpu_feeds = {self._image_tensor: self._visualizer.expand_and_convertRGB_image(self.frame)}
            if self.config.VISUALIZE:
                gpu_extras = self.frame # for visualization frame
            else:
                gpu_extras = None
            self._gpu_worker.put_sess_queue(self._gpu_opts,gpu_feeds,gpu_extras)
        g = self._gpu_worker.get_result_queue()
        if g is None:
            # gpu thread has no output queue. ok skip, let's check cpu thread.
            pass
        else:
            # gpu thread has output queue.
            score,expand,self._frame = g["results"][0],g["results"][1],g["extras"]
            if self._cpu_worker.is_sess_empty():
                # When cpu thread has no next queue, put new queue.
                # else, drop gpu queue.
                cpu_feeds = {self._score_in: score, self._expand_in: expand}
                cpu_extras = self.frame
                self._cpu_worker.put_sess_queue(self._cpu_opts,cpu_feeds,cpu_extras)
        c = self._cpu_worker.get_result_queue()
        if c is None:
            # cpu thread has no output queue. ok, nothing to do. continue
            self._wait_thread = True
            return # If CPU RESULT has not been set yet, no fps update
        else:
            self._wait_thread = False
            self.boxes,self.scores,self.classes,self.num,self.frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]

    def run_split_sess(self):
        """
        runs split session WITHOUT threading
        optional: timeline writer
        """
        self.frame = self._input_stream.read()
        score, expand = self._sess.run(self._gpu_opts,feed_dict={self._image_tensor:
                                        self._visualizer.expand_and_convertRGB_image(self.frame)},
                                        options=self._run_options, run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(self._run_metadata.step_stats,
                                        '{}/timeline_{}_SM1.json'.format(
                                        self.config.RESULT_PATH,self.config.DISPLAY_NAME))
        # CPU Session
        self.boxes,self.scores,self.classes,self.num = self._sess.run(self._cpu_opts,
                                                                    feed_dict={self._score_in:score,
                                                                    self._expand_in: expand},
                                                                    options=self._run_options,
                                                                    run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(self._run_metadata.step_stats,
                                        '{}/timeline_{}_SM2.json'.format(
                                        self.config.RESULT_PATH,self.config.DISPLAY_NAME))


    def reformat_detection(self):
        """
        reformats detection
        """
        self.num = int(self.num)
        self.boxes = np.squeeze(self.boxes)
        self.classes = np.squeeze(self.classes).astype(np.uint8)
        self.scores = np.squeeze(self.scores)

    def detect(self):
        """
        Object_Detection Detection function
        optional: multi threading split session, timline writer
        """
        if not (self.config.USE_TRACKER and self._track):
            if self.config.SPLIT_MODEL:
                if self.config.MULTI_THREADING:
                    self.run_thread_sess()
                    if self._wait_thread: # checks if thread has output
                        return
                else:
                    self.run_split_sess()
            else:
                self.run_default_sess()
                if self.config.WRITE_TIMELINE:
                    self.timeliner.write_timeline(self._run_metadata.step_stats,
                                            '{}/timeline_{}.json'.format(
                                            self.config.RESULT_PATH,self.config.DISPLAY_NAME))
            self.reformat_detection()
            # Activate Tracker
            if self.config.USE_TRACKER and not self._is_imageD:
                self.activate_tracker()
        # Tracking
        else:
            self.run_tracker()

        # Publish ROS Message
        if self._is_rosD:
            self._ros_publisher.publish(self.boxes,self.scores,self.classes,self.num,self.category_index,self.frame.shape,self.masks,self.fps.fps_local())
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/stuff/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]
                gpu_counter = 0
                cpu_counter = 0

            fps = FPS(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        masks = None # No Mask Detection possible yet
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            gpu_counter += 1
                        else:
                            # gpu thread has output queue.
                            gpu_counter = 0
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            cpu_counter += 1
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            cpu_counter = 0
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]
                        else:
                            masks = None

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = vis_detection(frame, boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = vis_detection(frame, tracker_boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()
Esempio n. 4
0
 def prepare_model(self, input_type):
     """
     prepares Object_Detection model
     input_type: must be 'image' or 'video'
     """
     assert input_type in ['image', 'video'
                           ], "only 'image' or 'video' input possible"
     super(ObjectDetectionModel, self).prepare_model()
     self.input_type = input_type
     # Tracker
     if self.config.USE_TRACKER:
         sys.path.append(os.getcwd() + '/rod/kcf')
         import KCF
         self._tracker = KCF.kcftracker(False, True, False, False)
         self._tracker_counter = 0
         self._track = False
     print("> Building Graph")
     with self.detection_graph.as_default():
         with tf.Session(graph=self.detection_graph,
                         config=self._tf_config) as self._sess:
             # Input Configuration
             if self.input_type is 'video':
                 self._input_stream = VideoStream(
                     self.config.VIDEO_INPUT, self.config.WIDTH,
                     self.config.HEIGHT).start()
                 height = self._input_stream.real_height
                 width = self._input_stream.real_width
             elif self.input_type is 'image':
                 self._input_stream = ImageStream(
                     self.config.IMAGE_PATH, self.config.LIMIT_IMAGES,
                     (self.config.WIDTH, self.config.HEIGHT)).start()
                 height = self.config.HEIGHT
                 width = self.config.WIDTH
                 # Timeliner for image detection
                 if self.config.WRITE_TIMELINE:
                     self._run_options = tf.RunOptions(
                         trace_level=tf.RunOptions.FULL_TRACE)
                     self._run_metadata = tf.RunMetadata()
                     self.timeliner = TimeLiner()
             # Define Input and Ouput tensors
             self._tensor_dict = self.get_tensordict([
                 'num_detections', 'detection_boxes', 'detection_scores',
                 'detection_classes', 'detection_masks'
             ])
             self._image_tensor = self.detection_graph.get_tensor_by_name(
                 'image_tensor:0')
             # Mask Transformations
             if 'detection_masks' in self._tensor_dict:
                 # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                 detection_boxes = tf.squeeze(
                     self._tensor_dict['detection_boxes'], [0])
                 detection_masks = tf.squeeze(
                     self._tensor_dict['detection_masks'], [0])
                 real_num_detection = tf.cast(
                     self._tensor_dict['num_detections'][0], tf.int32)
                 detection_boxes = tf.slice(detection_boxes, [0, 0],
                                            [real_num_detection, -1])
                 detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                            [real_num_detection, -1, -1])
                 detection_masks_reframed = reframe_box_masks_to_image_masks(
                     detection_masks, detection_boxes, height, width)
                 detection_masks_reframed = tf.cast(
                     tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                 self._tensor_dict['detection_masks'] = tf.expand_dims(
                     detection_masks_reframed, 0)
             if self.config.SPLIT_MODEL:
                 self._score_out = self.detection_graph.get_tensor_by_name(
                     '{}:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_out = self.detection_graph.get_tensor_by_name(
                     '{}:0'.format(self.config.SPLIT_NODES[1]))
                 self._score_in = self.detection_graph.get_tensor_by_name(
                     '{}_1:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_in = self.detection_graph.get_tensor_by_name(
                     '{}_1:0'.format(self.config.SPLIT_NODES[1]))
                 # Threading
                 self._gpu_worker = SessionWorker("GPU",
                                                  self.detection_graph,
                                                  self._tf_config)
                 self._cpu_worker = SessionWorker("CPU",
                                                  self.detection_graph,
                                                  self._tf_config)
                 self._gpu_opts = [self._score_out, self._expand_out]
                 self._cpu_opts = [
                     self._tensor_dict['detection_boxes'],
                     self._tensor_dict['detection_scores'],
                     self._tensor_dict['detection_classes'],
                     self._tensor_dict['num_detections']
                 ]
         return self
Esempio n. 5
0
class ObjectDetectionModel(Model):
    """
    object_detection model class
    """
    def __init__(self, config):
        super(ObjectDetectionModel, self).__init__(config)

    def prepare_model(self, input_type):
        """
        prepares Object_Detection model
        input_type: must be 'image' or 'video'
        """
        assert input_type in ['image', 'video'
                              ], "only 'image' or 'video' input possible"
        super(ObjectDetectionModel, self).prepare_model()
        self.input_type = input_type
        # Tracker
        if self.config.USE_TRACKER:
            sys.path.append(os.getcwd() + '/rod/kcf')
            import KCF
            self._tracker = KCF.kcftracker(False, True, False, False)
            self._tracker_counter = 0
            self._track = False
        print("> Building Graph")
        with self.detection_graph.as_default():
            with tf.Session(graph=self.detection_graph,
                            config=self._tf_config) as self._sess:
                # Input Configuration
                if self.input_type is 'video':
                    self._input_stream = VideoStream(
                        self.config.VIDEO_INPUT, self.config.WIDTH,
                        self.config.HEIGHT).start()
                    height = self._input_stream.real_height
                    width = self._input_stream.real_width
                elif self.input_type is 'image':
                    self._input_stream = ImageStream(
                        self.config.IMAGE_PATH, self.config.LIMIT_IMAGES,
                        (self.config.WIDTH, self.config.HEIGHT)).start()
                    height = self.config.HEIGHT
                    width = self.config.WIDTH
                    # Timeliner for image detection
                    if self.config.WRITE_TIMELINE:
                        self._run_options = tf.RunOptions(
                            trace_level=tf.RunOptions.FULL_TRACE)
                        self._run_metadata = tf.RunMetadata()
                        self.timeliner = TimeLiner()
                # Define Input and Ouput tensors
                self._tensor_dict = self.get_tensordict([
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
                ])
                self._image_tensor = self.detection_graph.get_tensor_by_name(
                    'image_tensor:0')
                # Mask Transformations
                if 'detection_masks' in self._tensor_dict:
                    # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                    detection_boxes = tf.squeeze(
                        self._tensor_dict['detection_boxes'], [0])
                    detection_masks = tf.squeeze(
                        self._tensor_dict['detection_masks'], [0])
                    real_num_detection = tf.cast(
                        self._tensor_dict['num_detections'][0], tf.int32)
                    detection_boxes = tf.slice(detection_boxes, [0, 0],
                                               [real_num_detection, -1])
                    detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                               [real_num_detection, -1, -1])
                    detection_masks_reframed = reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, height, width)
                    detection_masks_reframed = tf.cast(
                        tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                    self._tensor_dict['detection_masks'] = tf.expand_dims(
                        detection_masks_reframed, 0)
                if self.config.SPLIT_MODEL:
                    self._score_out = self.detection_graph.get_tensor_by_name(
                        '{}:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_out = self.detection_graph.get_tensor_by_name(
                        '{}:0'.format(self.config.SPLIT_NODES[1]))
                    self._score_in = self.detection_graph.get_tensor_by_name(
                        '{}_1:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_in = self.detection_graph.get_tensor_by_name(
                        '{}_1:0'.format(self.config.SPLIT_NODES[1]))
                    # Threading
                    self._gpu_worker = SessionWorker("GPU",
                                                     self.detection_graph,
                                                     self._tf_config)
                    self._cpu_worker = SessionWorker("CPU",
                                                     self.detection_graph,
                                                     self._tf_config)
                    self._gpu_opts = [self._score_out, self._expand_out]
                    self._cpu_opts = [
                        self._tensor_dict['detection_boxes'],
                        self._tensor_dict['detection_scores'],
                        self._tensor_dict['detection_classes'],
                        self._tensor_dict['num_detections']
                    ]
            return self

    def run_default_sess(self):
        """
        runs default session
        """
        # default session)
        self.frame = self._input_stream.read()
        output_dict = self._sess.run(
            self._tensor_dict,
            feed_dict={
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        self.num = output_dict['num_detections'][0]
        self.classes = output_dict['detection_classes'][0]
        self.boxes = output_dict['detection_boxes'][0]
        self.scores = output_dict['detection_scores'][0]
        if 'detection_masks' in output_dict:
            self.masks = output_dict['detection_masks'][0]

    def run_thread_sess(self):
        """
        runs seperate gpu and cpu session threads
        """
        if self._gpu_worker.is_sess_empty():
            # put new queue
            self.frame = self._input_stream.read()
            gpu_feeds = {
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            }
            if self.config.VISUALIZE:
                gpu_extras = self.frame  # for visualization frame
            else:
                gpu_extras = None
            self._gpu_worker.put_sess_queue(self._gpu_opts, gpu_feeds,
                                            gpu_extras)
        g = self._gpu_worker.get_result_queue()
        if g is None:
            # gpu thread has no output queue. ok skip, let's check cpu thread.
            pass
        else:
            # gpu thread has output queue.
            score, expand, self._frame = g["results"][0], g["results"][1], g[
                "extras"]
            if self._cpu_worker.is_sess_empty():
                # When cpu thread has no next queue, put new queue.
                # else, drop gpu queue.
                cpu_feeds = {self._score_in: score, self._expand_in: expand}
                cpu_extras = self.frame
                self._cpu_worker.put_sess_queue(self._cpu_opts, cpu_feeds,
                                                cpu_extras)
        c = self._cpu_worker.get_result_queue()
        if c is None:
            # cpu thread has no output queue. ok, nothing to do. continue
            self._wait_thread = True
            return  # If CPU RESULT has not been set yet, no fps update
        else:
            self._wait_thread = False
            self.boxes, self.scores, self.classes, self.num, self.frame = c[
                "results"][0], c["results"][1], c["results"][2], c["results"][
                    3], c["extras"]

    def run_split_sess(self):
        """
        runs split session WITHOUT threading
        optional: timeline writer
        """
        self.frame = self._input_stream.read()
        score, expand = self._sess.run(
            self._gpu_opts,
            feed_dict={
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(
                self._run_metadata.step_stats,
                '{}/timeline_{}_SM1.json'.format(self.config.RESULT_PATH,
                                                 self.config.DISPLAY_NAME))
        # CPU Session
        self.boxes, self.scores, self.classes, self.num = self._sess.run(
            self._cpu_opts,
            feed_dict={
                self._score_in: score,
                self._expand_in: expand
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(
                self._run_metadata.step_stats,
                '{}/timeline_{}_SM2.json'.format(self.config.RESULT_PATH,
                                                 self.config.DISPLAY_NAME))

    def run_tracker(self):
        """
        runs KCF tracker on videoStream frame
        !does not work on images, obviously!
        """
        self.frame = self._input_stream.read()
        if self._first_track:
            self._trackers = []
            self._tracker_boxes = self.boxes
            for box in self.boxes[~np.all(self.boxes == 0, axis=1)]:
                self._tracker.init(
                    conv_detect2track(box, self._input_stream.real_width,
                                      self._input_stream.real_height),
                    self.tracker_frame)
                self._trackers.append(self._tracker)
            self._first_track = False

        for idx, self._tracker in enumerate(self._trackers):
            tracker_box = self._tracker.update(self.frame)
            self._tracker_boxes[idx, :] = conv_track2detect(
                tracker_box, self._input_stream.real_width,
                self._input_stream.real_height)
        self._tracker_counter += 1
        self.boxes = self._tracker_boxes
        # Deactivate Tracker
        if self._tracker_counter >= self.config.TRACKER_FRAMES:
            self._track = False
            self._tracker_counter = 0

    def reformat_detection(self):
        """
        reformats detection
        """
        self.num = int(self.num)
        self.boxes = np.squeeze(self.boxes)
        self.classes = np.squeeze(self.classes).astype(np.uint8)
        self.scores = np.squeeze(self.scores)

    def detect(self):
        """
        Object_Detection Detection function
        optional: multi threading split session, timline writer
        """
        if not (self.config.USE_TRACKER and self._track):
            if self.config.SPLIT_MODEL:
                if self.config.MULTI_THREADING:
                    self.run_thread_sess()
                    if self._wait_thread:  # checks if thread has output
                        return
                else:
                    self.run_split_sess()
            else:
                self.run_default_sess()
                if self.config.WRITE_TIMELINE and self.input_type is 'image':
                    self.timeliner.write_timeline(
                        self._run_metadata.step_stats,
                        '{}/timeline_{}.json'.format(self.config.RESULT_PATH,
                                                     self.config.DISPLAY_NAME))
            self.reformat_detection()
            # Activate Tracker
            if self.config.USE_TRACKER and self.num <= self.config.NUM_TRACKERS and self.input_type is 'video':
                self.tracker_frame = self.frame
                self._track = True
                self._first_track = True
        # Tracking
        else:
            self.run_tracker()
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/rod/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = reframe_box_masks_to_image_masks(
                                            detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]

            fps = FPS(config.FPS_INTERVAL).start()
            masks = None
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            pass
                        else:
                            # gpu thread has output queue.
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = visualize_objectdetection(frame,boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = visualize_objectdetection(frame,tracker_boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()
def detection(model, config):

    print("> Building Graph")
    # tf Session Config
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    tf_config.gpu_options.per_process_gpu_memory_fraction = 0.1
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            # start Videostream
            # vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            vs = MultiImagesMemmap(mode="r",
                                   name="main_stream",
                                   memmap_path=os.getenv(
                                       "MEMMAP_PATH", "/tmp"))
            vs.wait_until_available()  #initialize and find video data
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict([
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            score_out = detection_graph.get_tensor_by_name(
                'Postprocessor/convert_scores:0')
            expand_out = detection_graph.get_tensor_by_name(
                'Postprocessor/ExpandDims_1:0')
            score_in = detection_graph.get_tensor_by_name(
                'Postprocessor/convert_scores_1:0')
            expand_in = detection_graph.get_tensor_by_name(
                'Postprocessor/ExpandDims_1_1:0')
            # Threading
            score = model.score
            expand = model.expand
            gpu_worker = SessionWorker("GPU", detection_graph, tf_config)
            cpu_worker = SessionWorker("CPU", detection_graph, tf_config)
            gpu_opts = [score_out, expand_out]
            cpu_opts = [
                tensor_dict['detection_boxes'],
                tensor_dict['detection_scores'],
                tensor_dict['detection_classes'], tensor_dict['num_detections']
            ]
            gpu_counter = 0
            cpu_counter = 0

            fps = FPS2(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            frame = vs.read("C")
            # frame = vs.read()
            h, w, _ = frame.shape
            vs.real_width, vs.real_height = w, h
            while True:
                # Detection

                # split model in seperate gpu and cpu session threads
                masks = None  # No Mask Detection possible yet
                if gpu_worker.is_sess_empty():
                    # read video frame, expand dimensions and convert to rgb
                    frame = vs.read("C")
                    # frame = vs.read()
                    # put new queue
                    image_expanded = np.expand_dims(cv2.cvtColor(
                        frame, cv2.COLOR_BGR2RGB),
                                                    axis=0)
                    gpu_feeds = {image_tensor: image_expanded}
                    if config.VISUALIZE:
                        gpu_extras = frame  # for visualization frame
                    else:
                        gpu_extras = None
                    gpu_worker.put_sess_queue(gpu_opts, gpu_feeds, gpu_extras)
                g = gpu_worker.get_result_queue()
                if g is None:
                    # gpu thread has no output queue. ok skip, let's check cpu thread.
                    gpu_counter += 1
                else:
                    # gpu thread has output queue.
                    gpu_counter = 0
                    score, expand, frame = g["results"][0], g["results"][1], g[
                        "extras"]

                    if cpu_worker.is_sess_empty():
                        # When cpu thread has no next queue, put new queue.
                        # else, drop gpu queue.
                        cpu_feeds = {score_in: score, expand_in: expand}
                        cpu_extras = frame
                        cpu_worker.put_sess_queue(cpu_opts, cpu_feeds,
                                                  cpu_extras)
                c = cpu_worker.get_result_queue()
                if c is None:
                    # cpu thread has no output queue. ok, nothing to do. continue
                    cpu_counter += 1
                    continue  # If CPU RESULT has not been set yet, no fps update
                else:
                    cpu_counter = 0
                    boxes, scores, classes, num, frame = c["results"][0], c[
                        "results"][1], c["results"][2], c["results"][3], c[
                            "extras"]

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    # print frame.shape
                    if frame is not None:
                        vis = vis_detection(frame.copy(), boxes, classes,
                                            scores, masks, category_index,
                                            fps.fps_local(), config.VISUALIZE,
                                            config.DET_INTERVAL, config.DET_TH,
                                            config.MAX_FRAMES,
                                            fps._glob_numFrames,
                                            config.OD_MODEL_NAME)
                        if not vis:
                            break

                fps.update()

    # End everything
    # vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()