Exemple #1
0
 def prepare_model(self, input_type):
     """
     prepares DeepLab model
     input_type: must be 'image' or 'video'
     """
     assert input_type in ['image',
                           'video'], "only image or video input possible"
     super(DeepLabModel, self).prepare_model()
     self.input_type = input_type
     # fixed input sizes as model needs resize either way
     # Input configurations
     self.category_index = None
     if self.input_type is 'video':
         self._input_stream = VideoStream(self.config.VIDEO_INPUT,
                                          self.config.WIDTH,
                                          self.config.HEIGHT).start()
     elif self.input_type is 'image':
         self._input_stream = ImageStream(self.config.IMAGE_PATH,
                                          self.config.LIMIT_IMAGES).start()
         if self.config.WRITE_TIMELINE:
             self._run_options = tf.RunOptions(
                 trace_level=tf.RunOptions.FULL_TRACE)
             self._run_metadata = tf.RunMetadata()
             self._timeliner = TimeLiner()
     print("> Building Graph")
     with self.detection_graph.as_default():
         with tf.Session(graph=self.detection_graph,
                         config=self._tf_config) as self._sess:
             return self
def segmentation(model,config):
    images = load_images(config.IMAGE_PATH,config.LIMIT_IMAGES)
    # Tf Session + Timeliner
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    if config.WRITE_TIMELINE:
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        timeliner = TimeLiner()
    else:
        options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        run_metadata = False
    timer = Timer().start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            for image in images:
                # input
                frame = cv2.imread(image)
                height, width, channels = frame.shape
                resize_ratio = 1.0 * 513 / max(width,height)
                target_size = (int(resize_ratio * width), int(resize_ratio * height))
                frame = cv2.resize(frame, target_size)
                timer.tic()
                batch_seg_map = sess.run('SemanticPredictions:0',
                				feed_dict={'ImageTensor:0': [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]},
                				options=options, run_metadata=run_metadata)
                timer.toc()
                if config.WRITE_TIMELINE:
                    timeliner.write_timeline(run_metadata.step_stats,
                                            '{}/timeline_{}.json'.format(
                                            config.RESULT_PATH,config.DL_DISPLAY_NAME))
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                #ids = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        id = seg_map[tuple(region.coords[0])]
                        label = config.LABEL_NAMES[id]
                        #boxes.append(box)
                        #labels.append(label)
                        #ids.append(id)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame,box,label,id,config.DISCO_MODE)

                vis = visualize_deeplab(frame,seg_map,timer.get_frame(),config.MAX_FRAMES,timer.get_fps(),
                                        config.PRINT_INTERVAL,config.PRINT_TH,config.DL_DISPLAY_NAME,
                                        config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                if not vis:
                    break
                if config.SAVE_RESULT:
                    cv2.imwrite('{}/{}_{}.jpg'.format(config.RESULT_PATH,timer.get_frame(),config.DL_DISPLAY_NAME),frame)

	cv2.destroyAllWindows()
    timer.stop()
def segmentation(model,config):
    images = load_images(config.IMAGE_PATH,config.LIMIT_IMAGES)
    # Tf Session + Timeliner
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth=True
    detection_graph = model.detection_graph
    if config.WRITE_TIMELINE:
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        timeliner = TimeLiner()
    else:
        options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        run_metadata = False
    timer = Timer().start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            for image in images:
                # input
                frame = cv2.imread(image)
                height, width, channels = frame.shape
                resize_ratio = 1.0 * 513 / max(width,height)
                target_size = (int(resize_ratio * width), int(resize_ratio * height))
                frame = cv2.resize(frame, target_size)
                timer.tic()
                batch_seg_map = sess.run('SemanticPredictions:0',
                				feed_dict={'ImageTensor:0': [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]},
                				options=options, run_metadata=run_metadata)
                timer.toc()
                if config.WRITE_TIMELINE:
                    timeliner.write_timeline(run_metadata.step_stats,
                                            'test_results/timeline_{}{}.json'.format(
                                            config.OD_MODEL_NAME,config.DEVICE))
                # visualization
                if config.VISUALIZE:
                    seg_map = batch_seg_map[0]
                    seg_image = create_colormap(seg_map).astype(np.uint8)
                    cv2.addWeighted(seg_image,config.ALPHA,frame,1-config.ALPHA,0,frame)
                    vis_text(frame,"fps: {}".format(timer.get_fps()),(10,30))
                    # boxes (ymin, xmin, ymax, xmax)
                    if config.BBOX:
                        map_labeled = measure.label(seg_map, connectivity=1)
                        for region in measure.regionprops(map_labeled):
                            if region.area > config.MINAREA:
                                box = region.bbox
                                p1 = (box[1], box[0])
                                p2 = (box[3], box[2])
                                cv2.rectangle(frame, p1, p2, (77,255,9), 2)
                                vis_text(frame,config.LABEL_NAMES[seg_map[tuple(region.coords[0])]],(p1[0],p1[1]-10))
                    cv2.imshow('segmentation',frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
	cv2.destroyAllWindows()
    timer.stop()
Exemple #4
0
 def prepare_timeliner(self):
     """
     prepares timeliner and sets tf Run options
     """
     self._run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
     self._run_metadata = tf.RunMetadata()
     self.timeliner = TimeLiner()
Exemple #5
0
def detection(model, config):
    # Tf Session
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    print("> Building Graph")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            # start Videostream
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict([
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'],
                                             [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'],
                                             [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0],
                                             tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0],
                                           [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                           [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                    detection_masks, detection_boxes, config.HEIGHT,
                    config.WIDTH)
                detection_masks_reframed = tf.cast(
                    tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(
                    detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name(
                    'Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name(
                    'Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name(
                    'Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name(
                    'Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand

            # Timeliner
            if config.WRITE_TIMELINE:
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                timeliner = TimeLiner()
            else:
                options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
                run_metadata = False

            images = load_images(config.IMAGE_PATH, config.LIMIT_IMAGES)
            timer = Timer().start()
            print('> Starting Detection')
            for image in images:
                if config.SPLIT_MODEL:
                    # split model in seperate gpu and cpu session threads
                    masks = None  # No Mask Detection possible yet
                    frame = cv2.resize(cv2.imread(image),
                                       (config.WIDTH, config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(
                        frame, cv2.COLOR_BGR2RGB),
                                                    axis=0)
                    timer.tic()
                    # GPU Session
                    score, expand = sess.run(
                        [score_out, expand_out],
                        feed_dict={image_tensor: frame_expanded},
                        options=options,
                        run_metadata=run_metadata)
                    timer.tictic()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}{}.json'.format(
                                config.OD_MODEL_NAME, '_SM1', config._DEV,
                                config._OPT))
                    timer.tic()
                    # CPU Session
                    boxes, scores, classes, num = sess.run(
                        [
                            tensor_dict['detection_boxes'],
                            tensor_dict['detection_scores'],
                            tensor_dict['detection_classes'],
                            tensor_dict['num_detections']
                        ],
                        feed_dict={
                            score_in: score,
                            expand_in: expand
                        },
                        options=options,
                        run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}{}.json'.format(
                                config.OD_MODEL_NAME, '_SM2', config._DEV,
                                config._OPT))
                else:
                    # default session
                    frame = cv2.resize(cv2.imread(image),
                                       (config.WIDTH, config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(
                        frame, cv2.COLOR_BGR2RGB),
                                                    axis=0)
                    timer.tic()
                    output_dict = sess.run(
                        tensor_dict,
                        feed_dict={image_tensor: frame_expanded},
                        options=options,
                        run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(
                            run_metadata.step_stats,
                            'test_results/timeline_{}{}{}.json'.format(
                                config.OD_MODEL_NAME, config._DEV,
                                config._OPT))
                    num = output_dict['num_detections'][0]
                    classes = output_dict['detection_classes'][0]
                    boxes = output_dict['detection_boxes'][0]
                    scores = output_dict['detection_scores'][0]
                    if 'detection_masks' in output_dict:
                        masks = output_dict['detection_masks'][0]
                    else:
                        masks = None

                # reformat detection
                num = int(num)
                boxes = np.squeeze(boxes)
                classes = np.squeeze(classes).astype(np.uint8)
                scores = np.squeeze(scores)

                # Visualization
                vis = vis_detection(frame, boxes, classes,
                                    scores, masks, category_index,
                                    timer.get_fps(), config.VISUALIZE,
                                    config.DET_INTERVAL, config.DET_TH,
                                    config.MAX_FRAMES, None,
                                    config.OD_MODEL_NAME + config._OPT)
                if not vis:
                    break

    cv2.destroyAllWindows()
    timer.stop()
Exemple #6
0
class DeepLabModel(Model):
    def __init__(self, config):
        super(DeepLabModel, self).__init__(config)

    def prepare_model(self, input_type):
        """
        prepares DeepLab model
        input_type: must be 'image' or 'video'
        """
        assert input_type in ['image',
                              'video'], "only image or video input possible"
        super(DeepLabModel, self).prepare_model()
        self.input_type = input_type
        # fixed input sizes as model needs resize either way
        # Input configurations
        self.category_index = None
        if self.input_type is 'video':
            self._input_stream = VideoStream(self.config.VIDEO_INPUT,
                                             self.config.WIDTH,
                                             self.config.HEIGHT).start()
        elif self.input_type is 'image':
            self._input_stream = ImageStream(self.config.IMAGE_PATH,
                                             self.config.LIMIT_IMAGES).start()
            if self.config.WRITE_TIMELINE:
                self._run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                self._run_metadata = tf.RunMetadata()
                self._timeliner = TimeLiner()
        print("> Building Graph")
        with self.detection_graph.as_default():
            with tf.Session(graph=self.detection_graph,
                            config=self._tf_config) as self._sess:
                return self

    def detect(self):
        """
        DeepLab Detection function
        """
        self.frame = self._input_stream.read()
        height, width, _ = self.frame.shape
        resize_ratio = 1.0 * 513 / max(self._input_stream.real_width,
                                       self._input_stream.real_height)
        target_size = (int(resize_ratio * self._input_stream.real_width),
                       int(resize_ratio * self._input_stream.real_height)
                       )  #(513, 342)?(513,384)
        self.frame = self._visualizer.resize_image(self.frame, target_size)
        batch_seg_map = self._sess.run(
            'SemanticPredictions:0',
            feed_dict={
                'ImageTensor:0':
                [self._visualizer.convertRGB_image(self.frame)]
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE and self.input_type is 'image':
            self._timeliner.write_timeline(
                self._run_metadata.step_stats,
                '{}/timeline_{}.json'.format(self.config.RESULT_PATH,
                                             self.config.DISPLAY_NAME))
        seg_map = batch_seg_map[0]
        self.boxes = []
        self.labels = []
        self.ids = []
        if self.config.BBOX:
            map_labeled = measure.label(seg_map, connectivity=1)
            for region in measure.regionprops(map_labeled):
                if region.area > self.config.MINAREA:
                    box = region.bbox
                    id = seg_map[tuple(region.coords[0])]
                    label = self.config.LABEL_NAMES[id]
                    self.boxes.append(box)
                    self.labels.append(label)
                    self.ids.append(id)
        # workaround
        self.num = len(self.boxes)
        self.classes = self.ids
        self.scores = self.labels
        self.masks = seg_map
Exemple #7
0
 def prepare_model(self, input_type):
     """
     prepares Object_Detection model
     input_type: must be 'image' or 'video'
     """
     assert input_type in ['image', 'video'
                           ], "only 'image' or 'video' input possible"
     super(ObjectDetectionModel, self).prepare_model()
     self.input_type = input_type
     # Tracker
     if self.config.USE_TRACKER:
         sys.path.append(os.getcwd() + '/rod/kcf')
         import KCF
         self._tracker = KCF.kcftracker(False, True, False, False)
         self._tracker_counter = 0
         self._track = False
     print("> Building Graph")
     with self.detection_graph.as_default():
         with tf.Session(graph=self.detection_graph,
                         config=self._tf_config) as self._sess:
             # Input Configuration
             if self.input_type is 'video':
                 self._input_stream = VideoStream(
                     self.config.VIDEO_INPUT, self.config.WIDTH,
                     self.config.HEIGHT).start()
                 height = self._input_stream.real_height
                 width = self._input_stream.real_width
             elif self.input_type is 'image':
                 self._input_stream = ImageStream(
                     self.config.IMAGE_PATH, self.config.LIMIT_IMAGES,
                     (self.config.WIDTH, self.config.HEIGHT)).start()
                 height = self.config.HEIGHT
                 width = self.config.WIDTH
                 # Timeliner for image detection
                 if self.config.WRITE_TIMELINE:
                     self._run_options = tf.RunOptions(
                         trace_level=tf.RunOptions.FULL_TRACE)
                     self._run_metadata = tf.RunMetadata()
                     self.timeliner = TimeLiner()
             # Define Input and Ouput tensors
             self._tensor_dict = self.get_tensordict([
                 'num_detections', 'detection_boxes', 'detection_scores',
                 'detection_classes', 'detection_masks'
             ])
             self._image_tensor = self.detection_graph.get_tensor_by_name(
                 'image_tensor:0')
             # Mask Transformations
             if 'detection_masks' in self._tensor_dict:
                 # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                 detection_boxes = tf.squeeze(
                     self._tensor_dict['detection_boxes'], [0])
                 detection_masks = tf.squeeze(
                     self._tensor_dict['detection_masks'], [0])
                 real_num_detection = tf.cast(
                     self._tensor_dict['num_detections'][0], tf.int32)
                 detection_boxes = tf.slice(detection_boxes, [0, 0],
                                            [real_num_detection, -1])
                 detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                            [real_num_detection, -1, -1])
                 detection_masks_reframed = reframe_box_masks_to_image_masks(
                     detection_masks, detection_boxes, height, width)
                 detection_masks_reframed = tf.cast(
                     tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                 self._tensor_dict['detection_masks'] = tf.expand_dims(
                     detection_masks_reframed, 0)
             if self.config.SPLIT_MODEL:
                 self._score_out = self.detection_graph.get_tensor_by_name(
                     '{}:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_out = self.detection_graph.get_tensor_by_name(
                     '{}:0'.format(self.config.SPLIT_NODES[1]))
                 self._score_in = self.detection_graph.get_tensor_by_name(
                     '{}_1:0'.format(self.config.SPLIT_NODES[0]))
                 self._expand_in = self.detection_graph.get_tensor_by_name(
                     '{}_1:0'.format(self.config.SPLIT_NODES[1]))
                 # Threading
                 self._gpu_worker = SessionWorker("GPU",
                                                  self.detection_graph,
                                                  self._tf_config)
                 self._cpu_worker = SessionWorker("CPU",
                                                  self.detection_graph,
                                                  self._tf_config)
                 self._gpu_opts = [self._score_out, self._expand_out]
                 self._cpu_opts = [
                     self._tensor_dict['detection_boxes'],
                     self._tensor_dict['detection_scores'],
                     self._tensor_dict['detection_classes'],
                     self._tensor_dict['num_detections']
                 ]
         return self
Exemple #8
0
class ObjectDetectionModel(Model):
    """
    object_detection model class
    """
    def __init__(self, config):
        super(ObjectDetectionModel, self).__init__(config)

    def prepare_model(self, input_type):
        """
        prepares Object_Detection model
        input_type: must be 'image' or 'video'
        """
        assert input_type in ['image', 'video'
                              ], "only 'image' or 'video' input possible"
        super(ObjectDetectionModel, self).prepare_model()
        self.input_type = input_type
        # Tracker
        if self.config.USE_TRACKER:
            sys.path.append(os.getcwd() + '/rod/kcf')
            import KCF
            self._tracker = KCF.kcftracker(False, True, False, False)
            self._tracker_counter = 0
            self._track = False
        print("> Building Graph")
        with self.detection_graph.as_default():
            with tf.Session(graph=self.detection_graph,
                            config=self._tf_config) as self._sess:
                # Input Configuration
                if self.input_type is 'video':
                    self._input_stream = VideoStream(
                        self.config.VIDEO_INPUT, self.config.WIDTH,
                        self.config.HEIGHT).start()
                    height = self._input_stream.real_height
                    width = self._input_stream.real_width
                elif self.input_type is 'image':
                    self._input_stream = ImageStream(
                        self.config.IMAGE_PATH, self.config.LIMIT_IMAGES,
                        (self.config.WIDTH, self.config.HEIGHT)).start()
                    height = self.config.HEIGHT
                    width = self.config.WIDTH
                    # Timeliner for image detection
                    if self.config.WRITE_TIMELINE:
                        self._run_options = tf.RunOptions(
                            trace_level=tf.RunOptions.FULL_TRACE)
                        self._run_metadata = tf.RunMetadata()
                        self.timeliner = TimeLiner()
                # Define Input and Ouput tensors
                self._tensor_dict = self.get_tensordict([
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
                ])
                self._image_tensor = self.detection_graph.get_tensor_by_name(
                    'image_tensor:0')
                # Mask Transformations
                if 'detection_masks' in self._tensor_dict:
                    # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                    detection_boxes = tf.squeeze(
                        self._tensor_dict['detection_boxes'], [0])
                    detection_masks = tf.squeeze(
                        self._tensor_dict['detection_masks'], [0])
                    real_num_detection = tf.cast(
                        self._tensor_dict['num_detections'][0], tf.int32)
                    detection_boxes = tf.slice(detection_boxes, [0, 0],
                                               [real_num_detection, -1])
                    detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                               [real_num_detection, -1, -1])
                    detection_masks_reframed = reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, height, width)
                    detection_masks_reframed = tf.cast(
                        tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                    self._tensor_dict['detection_masks'] = tf.expand_dims(
                        detection_masks_reframed, 0)
                if self.config.SPLIT_MODEL:
                    self._score_out = self.detection_graph.get_tensor_by_name(
                        '{}:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_out = self.detection_graph.get_tensor_by_name(
                        '{}:0'.format(self.config.SPLIT_NODES[1]))
                    self._score_in = self.detection_graph.get_tensor_by_name(
                        '{}_1:0'.format(self.config.SPLIT_NODES[0]))
                    self._expand_in = self.detection_graph.get_tensor_by_name(
                        '{}_1:0'.format(self.config.SPLIT_NODES[1]))
                    # Threading
                    self._gpu_worker = SessionWorker("GPU",
                                                     self.detection_graph,
                                                     self._tf_config)
                    self._cpu_worker = SessionWorker("CPU",
                                                     self.detection_graph,
                                                     self._tf_config)
                    self._gpu_opts = [self._score_out, self._expand_out]
                    self._cpu_opts = [
                        self._tensor_dict['detection_boxes'],
                        self._tensor_dict['detection_scores'],
                        self._tensor_dict['detection_classes'],
                        self._tensor_dict['num_detections']
                    ]
            return self

    def run_default_sess(self):
        """
        runs default session
        """
        # default session)
        self.frame = self._input_stream.read()
        output_dict = self._sess.run(
            self._tensor_dict,
            feed_dict={
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        self.num = output_dict['num_detections'][0]
        self.classes = output_dict['detection_classes'][0]
        self.boxes = output_dict['detection_boxes'][0]
        self.scores = output_dict['detection_scores'][0]
        if 'detection_masks' in output_dict:
            self.masks = output_dict['detection_masks'][0]

    def run_thread_sess(self):
        """
        runs seperate gpu and cpu session threads
        """
        if self._gpu_worker.is_sess_empty():
            # put new queue
            self.frame = self._input_stream.read()
            gpu_feeds = {
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            }
            if self.config.VISUALIZE:
                gpu_extras = self.frame  # for visualization frame
            else:
                gpu_extras = None
            self._gpu_worker.put_sess_queue(self._gpu_opts, gpu_feeds,
                                            gpu_extras)
        g = self._gpu_worker.get_result_queue()
        if g is None:
            # gpu thread has no output queue. ok skip, let's check cpu thread.
            pass
        else:
            # gpu thread has output queue.
            score, expand, self._frame = g["results"][0], g["results"][1], g[
                "extras"]
            if self._cpu_worker.is_sess_empty():
                # When cpu thread has no next queue, put new queue.
                # else, drop gpu queue.
                cpu_feeds = {self._score_in: score, self._expand_in: expand}
                cpu_extras = self.frame
                self._cpu_worker.put_sess_queue(self._cpu_opts, cpu_feeds,
                                                cpu_extras)
        c = self._cpu_worker.get_result_queue()
        if c is None:
            # cpu thread has no output queue. ok, nothing to do. continue
            self._wait_thread = True
            return  # If CPU RESULT has not been set yet, no fps update
        else:
            self._wait_thread = False
            self.boxes, self.scores, self.classes, self.num, self.frame = c[
                "results"][0], c["results"][1], c["results"][2], c["results"][
                    3], c["extras"]

    def run_split_sess(self):
        """
        runs split session WITHOUT threading
        optional: timeline writer
        """
        self.frame = self._input_stream.read()
        score, expand = self._sess.run(
            self._gpu_opts,
            feed_dict={
                self._image_tensor:
                self._visualizer.expand_and_convertRGB_image(self.frame)
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(
                self._run_metadata.step_stats,
                '{}/timeline_{}_SM1.json'.format(self.config.RESULT_PATH,
                                                 self.config.DISPLAY_NAME))
        # CPU Session
        self.boxes, self.scores, self.classes, self.num = self._sess.run(
            self._cpu_opts,
            feed_dict={
                self._score_in: score,
                self._expand_in: expand
            },
            options=self._run_options,
            run_metadata=self._run_metadata)
        if self.config.WRITE_TIMELINE:
            self.timeliner.write_timeline(
                self._run_metadata.step_stats,
                '{}/timeline_{}_SM2.json'.format(self.config.RESULT_PATH,
                                                 self.config.DISPLAY_NAME))

    def run_tracker(self):
        """
        runs KCF tracker on videoStream frame
        !does not work on images, obviously!
        """
        self.frame = self._input_stream.read()
        if self._first_track:
            self._trackers = []
            self._tracker_boxes = self.boxes
            for box in self.boxes[~np.all(self.boxes == 0, axis=1)]:
                self._tracker.init(
                    conv_detect2track(box, self._input_stream.real_width,
                                      self._input_stream.real_height),
                    self.tracker_frame)
                self._trackers.append(self._tracker)
            self._first_track = False

        for idx, self._tracker in enumerate(self._trackers):
            tracker_box = self._tracker.update(self.frame)
            self._tracker_boxes[idx, :] = conv_track2detect(
                tracker_box, self._input_stream.real_width,
                self._input_stream.real_height)
        self._tracker_counter += 1
        self.boxes = self._tracker_boxes
        # Deactivate Tracker
        if self._tracker_counter >= self.config.TRACKER_FRAMES:
            self._track = False
            self._tracker_counter = 0

    def reformat_detection(self):
        """
        reformats detection
        """
        self.num = int(self.num)
        self.boxes = np.squeeze(self.boxes)
        self.classes = np.squeeze(self.classes).astype(np.uint8)
        self.scores = np.squeeze(self.scores)

    def detect(self):
        """
        Object_Detection Detection function
        optional: multi threading split session, timline writer
        """
        if not (self.config.USE_TRACKER and self._track):
            if self.config.SPLIT_MODEL:
                if self.config.MULTI_THREADING:
                    self.run_thread_sess()
                    if self._wait_thread:  # checks if thread has output
                        return
                else:
                    self.run_split_sess()
            else:
                self.run_default_sess()
                if self.config.WRITE_TIMELINE and self.input_type is 'image':
                    self.timeliner.write_timeline(
                        self._run_metadata.step_stats,
                        '{}/timeline_{}.json'.format(self.config.RESULT_PATH,
                                                     self.config.DISPLAY_NAME))
            self.reformat_detection()
            # Activate Tracker
            if self.config.USE_TRACKER and self.num <= self.config.NUM_TRACKERS and self.input_type is 'video':
                self.tracker_frame = self.frame
                self._track = True
                self._first_track = True
        # Tracking
        else:
            self.run_tracker()
Exemple #9
0
def segmentation(model, config):
    images = load_images(config.IMAGE_PATH, config.LIMIT_IMAGES)
    # Tf Session + Timeliner
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    if config.WRITE_TIMELINE:
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        timeliner = TimeLiner()
    else:
        options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        run_metadata = False
    timer = Timer().start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            for image in images:
                # input
                frame = cv2.imread(image)
                height, width, channels = frame.shape
                resize_ratio = 1.0 * 513 / max(width, height)
                target_size = (int(resize_ratio * width),
                               int(resize_ratio * height))
                frame = cv2.resize(frame, target_size)
                timer.tic()
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    },
                    options=options,
                    run_metadata=run_metadata)
                timer.toc()
                if config.WRITE_TIMELINE:
                    timeliner.write_timeline(
                        run_metadata.step_stats,
                        '{}/timeline_{}.json'.format(config.RESULT_PATH,
                                                     config.DL_DISPLAY_NAME))
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                #ids = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        id = seg_map[tuple(region.coords[0])]
                        label = config.LABEL_NAMES[id]
                        #boxes.append(box)
                        #labels.append(label)
                        #ids.append(id)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame, box, label, id,
                                                     config.DISCO_MODE)

                vis = visualize_deeplab(frame, seg_map,
                                        timer.get_frame(), config.MAX_FRAMES,
                                        timer.get_fps(), config.PRINT_INTERVAL,
                                        config.PRINT_TH,
                                        config.DL_DISPLAY_NAME,
                                        config.VISUALIZE, config.VIS_FPS,
                                        config.DISCO_MODE, config.ALPHA)
                if not vis:
                    break
                if config.SAVE_RESULT:
                    cv2.imwrite(
                        '{}/{}_{}.jpg'.format(config.RESULT_PATH,
                                              timer.get_frame(),
                                              config.DL_DISPLAY_NAME), frame)

        cv2.destroyAllWindows()
    timer.stop()
def detection(model,config):
    # Tf Session
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    print("> Building Graph")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, config.HEIGHT, config.WIDTH)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand

            # Timeliner
            if config.WRITE_TIMELINE:
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                timeliner = TimeLiner()
            else:
                options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
                run_metadata = False

            images = load_images(config.IMAGE_PATH,config.LIMIT_IMAGES)
            timer = Timer().start()
            print('> Starting Detection')
            for image in images:
                if config.SPLIT_MODEL:
                    # split model in seperate gpu and cpu session threads
                    masks = None # No Mask Detection possible yet
                    frame = cv2.resize(cv2.imread(image),(config.WIDTH,config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), axis=0)
                    timer.tic()
                    # GPU Session
                    score, expand = sess.run([score_out, expand_out],
                            feed_dict={image_tensor: frame_expanded},
                            options=options, run_metadata=run_metadata)
                    timer.tictic()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(run_metadata.step_stats,
                                                '{}/timeline_{}_SM1.json'.format(
                                                config.RESULT_PATH,config.OD_DISPLAY_NAME))
                    timer.tic()
                    # CPU Session
                    boxes, scores, classes, num = sess.run(
                            [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']],
                            feed_dict={score_in:score, expand_in: expand},
                            options=options, run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(run_metadata.step_stats,
                                                '{}/timeline_{}_SM2.json'.format(
                                                config.RESULT_PATH,config.OD_DISPLAY_NAME))
                else:
                    # default session
                    frame = cv2.resize(cv2.imread(image),(config.WIDTH,config.HEIGHT))
                    frame_expanded = np.expand_dims(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), axis=0)
                    timer.tic()
                    output_dict = sess.run(tensor_dict,
                            feed_dict={image_tensor: frame_expanded},
                            options=options, run_metadata=run_metadata)
                    timer.toc()
                    if config.WRITE_TIMELINE:
                        timeliner.write_timeline(run_metadata.step_stats,
                                                '{}/timeline_{}.json'.format(
                                                config.RESULT_PATH,config.OD_DISPLAY_NAME))
                    num = output_dict['num_detections'][0]
                    classes = output_dict['detection_classes'][0]
                    boxes = output_dict['detection_boxes'][0]
                    scores = output_dict['detection_scores'][0]
                    if 'detection_masks' in output_dict:
                        masks = output_dict['detection_masks'][0]
                    else:
                        masks = None

                # reformat detection
                num = int(num)
                boxes = np.squeeze(boxes)
                classes = np.squeeze(classes).astype(np.uint8)
                scores = np.squeeze(scores)

                # Visualization
                vis = visualize_objectdetection(frame,boxes,classes,scores,masks,category_index,timer.get_frame(),
                                                config.MAX_FRAMES,timer.get_fps(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MOE,config.ALPHA)
                if not vis:
                    break
                if config.SAVE_RESULT:
                    cv2.imwrite('{}/{}_{}.jpg'.format(config.RESULT_PATH,timer.get_frame(),config.OD_DISPLAY_NAME),frame)

    cv2.destroyAllWindows()
    timer.stop()