コード例 #1
0
    def prepare_model(self):
        """
        first step prepare model
        needs to be called by subclass in re-write process

        Necessary: subclass needs to init
        self._input_stream
        """
        if self.config.MODEL_TYPE is 'od':
            self.download_model()
            self.load_frozen_graph()
            self.load_category_index()
        elif self.config.MODEL_TYPE is 'dl':
            self.download_model()
            self.load_frozen_graph()
        self.fps = FPS(self.config.FPS_INTERVAL).start()
        self._visualizer = Visualizer(self.config).start()
        return self
コード例 #2
0
def segmentation(model,config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT,640,480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width,vs.real_height)
    target_size = (int(resize_ratio * vs.real_width), int(resize_ratio * vs.real_height)) #(513, 384)
    tf_config = model.tf_config
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run('SemanticPredictions:0',
                                        feed_dict={'ImageTensor:0':
                                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]})
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                #ids = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        id = seg_map[tuple(region.coords[0])]
                        label = config.LABEL_NAMES[id]
                        #boxes.append(box)
                        #labels.append(label)
                        #ids.append(id)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame,box,label,id,config.DISCO_MODE)

                vis = visualize_deeplab(frame,seg_map,fps._glob_numFrames,config.MAX_FRAMES,fps.fps_local(),
                                        config.PRINT_INTERVAL,config.PRINT_TH,config.DL_DISPLAY_NAME,
                                        config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                if not vis:
                    break
                fps.update()
    fps.stop()
    vs.stop()
コード例 #3
0
def segmentation(model, config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT, 640, 480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width, vs.real_height)
    target_size = (int(resize_ratio * vs.real_width),
                   int(resize_ratio * vs.real_height))  #(513, 384)
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    })
                # visualization
                if config.VISUALIZE:
                    seg_map = batch_seg_map[0]
                    seg_image = create_colormap(seg_map).astype(np.uint8)
                    cv2.addWeighted(seg_image, config.ALPHA, frame,
                                    1 - config.ALPHA, 0, frame)
                    vis_text(frame, "fps: {}".format(fps.fps_local()),
                             (10, 30))
                    # boxes (ymin, xmin, ymax, xmax)
                    if config.BBOX:
                        map_labeled = measure.label(seg_map, connectivity=1)
                        for region in measure.regionprops(map_labeled):
                            if region.area > config.MINAREA:
                                box = region.bbox
                                p1 = (box[1], box[0])
                                p2 = (box[3], box[2])
                                cv2.rectangle(frame, p1, p2, (77, 255, 9), 2)
                                vis_text(
                                    frame, config.LABEL_NAMES[seg_map[tuple(
                                        region.coords[0])]],
                                    (p1[0], p1[1] - 10))
                    cv2.imshow(config.DL_MODEL_NAME, frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                fps.update()
    fps.stop()
    vs.stop()
コード例 #4
0
def detection(model,config):

    print("> Building Graph")
    # tf Session Config
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth=True
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations


            fps = FPS(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER):
                    # default session
                    frame = vs.read()
                    output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                    num = output_dict['num_detections'][0]
                    classes = output_dict['detection_classes'][0]
                    boxes = output_dict['detection_boxes'][0]
                    scores = output_dict['detection_scores'][0]
                    if 'detection_masks' in output_dict:
                        masks = output_dict['detection_masks'][0]
                    else:
                        masks = None

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = vis_detection(frame, boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
コード例 #5
0
def segmentation(model, config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT, 640, 480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width, vs.real_height)
    target_size = (int(resize_ratio * vs.real_width),
                   int(resize_ratio * vs.real_height))  #(513, 384)
    tf_config = model.tf_config
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    })
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        label = config.LABEL_NAMES[seg_map[tuple(
                            region.coords[0])]]
                        #boxes.append(box)
                        #labels.append(label)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame, box, label)

                vis = visualize_deeplab(
                    frame, seg_map, fps._glob_numFrames, config.MAX_FRAMES,
                    fps.fps_local(), config.PRINT_INTERVAL, config.PRINT_TH,
                    config.OD_MODEL_NAME + config._DEV + config._OPT,
                    config.VISUALIZE)
                if not vis:
                    break
                fps.update()
    fps.stop()
    vs.stop()
コード例 #6
0
class Model(object):
    """
    Base Tensorflow Inference Model Class
    """
    def __init__(self,config):
        self.config = config
        self.detection_graph = tf.Graph()
        self.category_index = None
        self.masks = None
        #self._tf_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
        self._tf_config = tf.ConfigProto(allow_soft_placement=True)
        self._tf_config.gpu_options.allow_growth=True
        #self._tf_config.gpu_options.force_gpu_compatible=True
        #self._tf_config.gpu_options.per_process_gpu_memory_fraction = 0.01
        self._run_options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        self._run_metadata = False
        self._wait_thread = False
        self._is_imageD = False
        self._is_videoD = False
        self._is_rosD = False
        print ('> Model: {}'.format(self.config.MODEL_PATH))

    def download_model(self):
        """
        downlaods model from model_zoo
        """
        if self.config.MODEL_TYPE == 'dl':
            download_base = 'http://download.tensorflow.org/models/'
        elif self.config.MODEL_TYPE == 'od':
            download_base = 'http://download.tensorflow.org/models/object_detection/'
        model_file = self.config.MODEL_NAME + '.tar.gz'
        if not os.path.isfile(self.config.MODEL_PATH) and self.config.DOWNLOAD_MODEL:
            print('> Model not found. Downloading it now.')
            opener = urllib.request.URLopener()
            opener.retrieve(download_base + model_file, model_file)
            tar_file = tarfile.open(model_file)
            for file in tar_file.getmembers():
              file_name = os.path.basename(file.name)
              if 'frozen_inference_graph.pb' in file_name:
                tar_file.extract(file, os.getcwd() + '/models/')
            os.remove(os.getcwd() + '/' + model_file)
        else:
            print('> Model found. Proceed.')

    def node_name(self,n):
        if n.startswith("^"):
            return n[1:]
        else:
            return n.split(":")[0]

    def load_frozen_graph(self):
        """
        loads graph from frozen model file
        """
        print('> Loading frozen model into memory')
        if (self.config.MODEL_TYPE == 'od' and self.config.SPLIT_MODEL):
            # load a frozen Model and split it into GPU and CPU graphs
            # Hardcoded split points for ssd_mobilenet
            tf.reset_default_graph()
            if self.config.SSD_SHAPE == 600:
                shape = 7326
            else:
                shape = 1917
            self.score = tf.placeholder(tf.float32, shape=(None, shape, self.config.NUM_CLASSES), name=self.config.SPLIT_NODES[0])
            self.expand = tf.placeholder(tf.float32, shape=(None, shape, 1, 4), name=self.config.SPLIT_NODES[1])
            #self.tofloat = tf.placeholder(tf.float32, shape=(None), name=self.config.SPLIT_NODES[2])
            for node in tf.get_default_graph().as_graph_def().node:
                if node.name == self.config.SPLIT_NODES[0]:
                    score_def = node
                if node.name == self.config.SPLIT_NODES[1]:
                    expand_def = node
                #if node.name == self.config.SPLIT_NODES[2]:
                #    tofloat_def = node

            with self.detection_graph.as_default():
                graph_def = tf.GraphDef()
                with tf.gfile.GFile(self.config.MODEL_PATH, 'rb') as fid:
                    serialized_graph = fid.read()
                    graph_def.ParseFromString(serialized_graph)

                    edges = {}
                    name_to_node_map = {}
                    node_seq = {}
                    seq = 0
                    for node in graph_def.node:
                        n = self.node_name(node.name)
                        name_to_node_map[n] = node
                        edges[n] = [self.node_name(x) for x in node.input]
                        node_seq[n] = seq
                        seq += 1
                    for d in self.config.SPLIT_NODES:
                        assert d in name_to_node_map, "%s is not in graph" % d

                    nodes_to_keep = set()
                    next_to_visit = self.config.SPLIT_NODES[:]

                    while next_to_visit:
                        n = next_to_visit[0]
                        del next_to_visit[0]
                        if n in nodes_to_keep: continue
                        nodes_to_keep.add(n)
                        next_to_visit += edges[n]

                    nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
                    nodes_to_remove = set()

                    for n in node_seq:
                        if n in nodes_to_keep_list: continue
                        nodes_to_remove.add(n)
                    nodes_to_remove_list = sorted(list(nodes_to_remove), key=lambda n: node_seq[n])

                    keep = graph_pb2.GraphDef()
                    for n in nodes_to_keep_list:
                        keep.node.extend([copy.deepcopy(name_to_node_map[n])])

                    remove = graph_pb2.GraphDef()
                    remove.node.extend([score_def])
                    remove.node.extend([expand_def])
                    for n in nodes_to_remove_list:
                        remove.node.extend([copy.deepcopy(name_to_node_map[n])])

                    with tf.device('/gpu:0'):
                        tf.import_graph_def(keep, name='')
                    with tf.device('/cpu:0'):
                        tf.import_graph_def(remove, name='')
        else:
            # default model loading procedure
            with self.detection_graph.as_default():
              graph_def = tf.GraphDef()
              with tf.gfile.GFile(self.config.MODEL_PATH, 'rb') as fid:
                serialized_graph = fid.read()
                graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(graph_def, name='')

    def load_category_index(self):
        """
        creates categorie_index from label_map
        """
        print('> Loading label map')
        label_map = tf_utils.load_labelmap(self.config.LABEL_PATH)
        categories = tf_utils.convert_label_map_to_categories(label_map, max_num_classes=self.config.NUM_CLASSES, use_display_name=True)
        self.category_index = tf_utils.create_category_index(categories)

    def get_tensor_dict(self, outputs):
        """
        returns tensordict for given tensornames list
        """
        ops = self.detection_graph.get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        self.tensor_dict = {}
        for key in outputs:
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                self.tensor_dict[key] = self.detection_graph.get_tensor_by_name(tensor_name)
        return self.tensor_dict

    def prepare_model(self):
        """
        first step prepare model
        needs to be called by subclass in re-write process

        Necessary: subclass needs to init
        self._input_stream
        """
        if self.config.MODEL_TYPE is 'od':
            self.download_model()
            self.load_frozen_graph()
            self.load_category_index()
        elif self.config.MODEL_TYPE is 'dl':
            self.download_model()
            self.load_frozen_graph()
        self.fps = FPS(self.config.FPS_INTERVAL).start()
        self._visualizer = Visualizer(self.config).start()
        return self

    def isActive(self):
        """
        checks if stream and visualizer are active
        """
        return self._input_stream.isActive() and self._visualizer.isActive()

    def stop(self):
        """
        stops all Model sub classes
        """
        self._input_stream.stop()
        self._visualizer.stop()
        self.fps.stop()
        if self.config.SPLIT_MODEL and self.config.MODEL_TYPE is 'od':
            self._gpu_worker.stop()
            self._cpu_worker.stop()

    def detect(self):
        """
        needs to be written by subclass
        """
        self.detection = None

    def run(self):
        """
        runs detection loop on video or image
        listens on isActive()
        """
        print("> starting detection")
        self.start()
        while self.isActive():
            # detection
            self.detect()
            # Visualization
            if not self._wait_thread:
                self.visualize_detection()
                self.fps.update()
        self.stop()

    def start(self):
        """
        starts fps and visualizer class
        """
        self.fps.start()
        self._visualizer = Visualizer(self.config).start()

    def visualize_detection(self):
        self.detection = self._visualizer.visualize_detection(self.frame,self.boxes,
                                                            self.classes,self.scores,
                                                            self.masks,self.fps.fps_local(),
                                                            self.category_index,self._is_imageD)

    def prepare_ros(self,node):
        """
        prepares ros Node and ROSInputstream
        only in ros branch usable due to ROS realted package stuff
        """
        assert node in ['detection_node','deeplab_node'], "only 'detection_node' and 'deeplab_node' supported"
        import rospy
        from ros import ROSStream, DetectionPublisher, SegmentationPublisher
        self._is_rosD = True
        rospy.init_node(node)
        self._input_stream = ROSStream(self.config.ROS_INPUT)
        if node is 'detection_node':
            self._ros_publisher = DetectionPublisher()
        if node is 'deeplab_node':
            self._ros_publisher = SegmentationPublisher()
        # check for frame
        while True:
            self.frame = self._input_stream.read()
            time.sleep(1)
            print("...waiting for ROS image")
            if self.frame is not None:
                self.stream_height,self.stream_width = self.frame.shape[0:2]
                break

    def prepare_timeliner(self):
        """
        prepares timeliner and sets tf Run options
        """
        self._run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        self._run_metadata = tf.RunMetadata()
        self.timeliner = TimeLiner()

    def prepare_tracker(self):
        """
        prepares KCF tracker
        """
        sys.path.append(os.getcwd()+'/rod/kcf')
        import KCF
        self._tracker = KCF.kcftracker(False, True, False, False)
        self._tracker_counter = 0
        self._track = False

    def run_tracker(self):
        """
        runs KCF tracker on videoStream frame
        !does not work on images, obviously!
        """
        self.frame = self._input_stream.read()
        if self._first_track:
            self._trackers = []
            self._tracker_boxes = self.boxes
            num_tracked = 0
            for box in self.boxes[~np.all(self.boxes == 0, axis=1)]:
                    self._tracker.init(conv_detect2track(box,self._input_stream.real_width,
                                        self._input_stream.real_height),self.tracker_frame)
                    self._trackers.append(self._tracker)
                    num_tracked += 1
                    if num_tracked <= self.config.NUM_TRACKERS:
                        break
            self._first_track = False

        for idx,self._tracker in enumerate(self._trackers):
            tracker_box = self._tracker.update(self.frame)
            self._tracker_boxes[idx,:] = conv_track2detect(tracker_box,
                                                    self._input_stream.real_width,
                                                    self._input_stream.real_height)
        self._tracker_counter += 1
        self.boxes = self._tracker_boxes
        # Deactivate Tracker
        if self._tracker_counter >= self.config.TRACKER_FRAMES:
            self._track = False
            self._tracker_counter = 0

    def activate_tracker(self):
        """
        activates KCF tracker
        deactivates mask detection
        """
        #self.masks = None
        self.tracker_frame = self.frame
        self._track = True
        self._first_track = True
コード例 #7
0
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/stuff/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]
                gpu_counter = 0
                cpu_counter = 0

            fps = FPS(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        masks = None # No Mask Detection possible yet
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            gpu_counter += 1
                        else:
                            # gpu thread has output queue.
                            gpu_counter = 0
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            cpu_counter += 1
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            cpu_counter = 0
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]
                        else:
                            masks = None

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = vis_detection(frame, boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = vis_detection(frame, tracker_boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()
コード例 #8
0
class Model(object):
    """
    Base Tensorflow Inference Model Class
    """
    def __init__(self, config):
        self.config = config
        self.detection_graph = tf.Graph()
        self.category_index = None
        self.masks = None
        self._tf_config = tf.ConfigProto(allow_soft_placement=True)
        self._tf_config.gpu_options.allow_growth = True
        self._run_options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        self._run_metadata = False
        self._wait_thread = False
        print('> Model: {}'.format(self.config.MODEL_PATH))

    def download_model(self):
        """
        downlaods model from model_zoo
        """
        if self.config.MODEL_TYPE == 'dl':
            download_base = 'http://download.tensorflow.org/models/'
        elif self.config.MODEL_TYPE == 'od':
            download_base = 'http://download.tensorflow.org/models/object_detection/'
        model_file = self.config.MODEL_NAME + '.tar.gz'
        if not os.path.isfile(
                self.config.MODEL_PATH) and self.config.DOWNLOAD_MODEL:
            print('> Model not found. Downloading it now.')
            opener = urllib.request.URLopener()
            opener.retrieve(download_base + model_file, model_file)
            tar_file = tarfile.open(model_file)
            for file in tar_file.getmembers():
                file_name = os.path.basename(file.name)
                if 'frozen_inference_graph.pb' in file_name:
                    tar_file.extract(file, os.getcwd() + '/models/')
            os.remove(os.getcwd() + '/' + model_file)
        else:
            print('> Model found. Proceed.')

    def _node_name(self, n):
        if n.startswith("^"):
            return n[1:]
        else:
            return n.split(":")[0]

    def load_frozenmodel(self):
        """
        loads graph from frozen model file
        """
        print('> Loading frozen model into memory')
        if (self.config.MODEL_TYPE == 'od' and self.config.SPLIT_MODEL):
            # load a frozen Model and split it into GPU and CPU graphs
            # Hardcoded split points for ssd_mobilenet
            input_graph = tf.Graph()
            with tf.Session(graph=input_graph, config=self._tf_config):
                if self.config.SSD_SHAPE == 600:
                    shape = 7326
                else:
                    shape = 1917
                self.score = tf.placeholder(tf.float32,
                                            shape=(None, shape,
                                                   self.config.NUM_CLASSES),
                                            name=self.config.SPLIT_NODES[0])
                self.expand = tf.placeholder(tf.float32,
                                             shape=(None, shape, 1, 4),
                                             name=self.config.SPLIT_NODES[1])
                for node in input_graph.as_graph_def().node:
                    if node.name == self.config.SPLIT_NODES[0]:
                        score_def = node
                    if node.name == self.config.SPLIT_NODES[1]:
                        expand_def = node

            with self.detection_graph.as_default():
                od_graph_def = tf.GraphDef()
                with tf.gfile.GFile(self.config.MODEL_PATH, 'rb') as fid:
                    serialized_graph = fid.read()
                    od_graph_def.ParseFromString(serialized_graph)

                    edges = {}
                    name_to_node_map = {}
                    node_seq = {}
                    seq = 0
                    for node in od_graph_def.node:
                        n = self._node_name(node.name)
                        name_to_node_map[n] = node
                        edges[n] = [self._node_name(x) for x in node.input]
                        node_seq[n] = seq
                        seq += 1
                    for d in self.config.SPLIT_NODES:
                        assert d in name_to_node_map, "%s is not in graph" % d

                    nodes_to_keep = set()
                    next_to_visit = self.config.SPLIT_NODES[:]

                    while next_to_visit:
                        n = next_to_visit[0]
                        del next_to_visit[0]
                        if n in nodes_to_keep: continue
                        nodes_to_keep.add(n)
                        next_to_visit += edges[n]

                    nodes_to_keep_list = sorted(list(nodes_to_keep),
                                                key=lambda n: node_seq[n])
                    nodes_to_remove = set()

                    for n in node_seq:
                        if n in nodes_to_keep_list: continue
                        nodes_to_remove.add(n)
                    nodes_to_remove_list = sorted(list(nodes_to_remove),
                                                  key=lambda n: node_seq[n])

                    keep = graph_pb2.GraphDef()
                    for n in nodes_to_keep_list:
                        keep.node.extend([copy.deepcopy(name_to_node_map[n])])

                    remove = graph_pb2.GraphDef()
                    remove.node.extend([score_def])
                    remove.node.extend([expand_def])
                    for n in nodes_to_remove_list:
                        remove.node.extend(
                            [copy.deepcopy(name_to_node_map[n])])

                    with tf.device('/gpu:0'):
                        tf.import_graph_def(keep, name='')
                    with tf.device('/cpu:0'):
                        tf.import_graph_def(remove, name='')
        else:
            # default model loading procedure
            with self.detection_graph.as_default():
                od_graph_def = tf.GraphDef()
                with tf.gfile.GFile(self.config.MODEL_PATH, 'rb') as fid:
                    serialized_graph = fid.read()
                    od_graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(od_graph_def, name='')

    def load_labelmap(self):
        """
        creates categorie_index from label_map
        """
        print('> Loading label map')
        label_map = tf_utils.load_labelmap(self.config.LABEL_PATH)
        categories = tf_utils.convert_label_map_to_categories(
            label_map,
            max_num_classes=self.config.NUM_CLASSES,
            use_display_name=True)
        self.category_index = tf_utils.create_category_index(categories)

    def get_tensordict(self, outputs):
        """
        returns tensordict for given tensornames list
        """
        ops = self.detection_graph.get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        self.tensor_dict = {}
        for key in outputs:
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                self.tensor_dict[
                    key] = self.detection_graph.get_tensor_by_name(tensor_name)
        return self.tensor_dict

    def prepare_model(self):
        """
        first step prepare model
        needs to be called by subclass in re-write process

        Necessary: subclass needs to init
        self._input_stream
        """
        if self.config.MODEL_TYPE is 'od':
            self.download_model()
            self.load_frozenmodel()
            self.load_labelmap()
        elif self.config.MODEL_TYPE is 'dl':
            self.download_model()
            self.load_frozenmodel()
        self.fps = FPS(self.config.FPS_INTERVAL).start()
        self._visualizer = Visualizer(self.config).start()
        return self

    def isActive(self):
        """
        checks if stream and visualizer are active
        """
        return self._input_stream.isActive() and self._visualizer.isActive()

    def stop(self):
        """
        stops all sub classes
        """
        self._input_stream.stop()
        self._visualizer.stop()
        self.fps.stop()
        if self.config.SPLIT_MODEL and self.config.MODEL_TYPE is 'od':
            self._gpu_worker.stop()
            self._cpu_worker.stop()

    def detect(self):
        """
        needs to be written by subclass
        """
        self.detection = None

    def run(self):
        """
        runs detection loop on video or image
        listens on isActive()
        """
        print("> starting detection")
        self.start()
        while self.isActive():
            # detection
            self.detect()
            # Visualization
            if not self._wait_thread:
                self.visualize_detection()
                self.fps.update()
        self.stop()

    def start(self):
        """
        starts fps and visualizer class
        """
        self.fps.start()
        self._visualizer = Visualizer(self.config).start()

    def visualize_detection(self):
        self.detection = self._visualizer.visualize_detection(
            self.frame, self.boxes, self.classes, self.scores, self.masks,
            self.fps.fps_local(), self.category_index)
コード例 #9
0
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/rod/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = reframe_box_masks_to_image_masks(
                                            detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]

            fps = FPS(config.FPS_INTERVAL).start()
            masks = None
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            pass
                        else:
                            # gpu thread has output queue.
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = visualize_objectdetection(frame,boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = visualize_objectdetection(frame,tracker_boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()