def segmentation(model,config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT,640,480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width,vs.real_height)
    target_size = (int(resize_ratio * vs.real_width), int(resize_ratio * vs.real_height)) #(513, 384)
    tf_config = model.tf_config
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run('SemanticPredictions:0',
                                        feed_dict={'ImageTensor:0':
                                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]})
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                #ids = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        id = seg_map[tuple(region.coords[0])]
                        label = config.LABEL_NAMES[id]
                        #boxes.append(box)
                        #labels.append(label)
                        #ids.append(id)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame,box,label,id,config.DISCO_MODE)

                vis = visualize_deeplab(frame,seg_map,fps._glob_numFrames,config.MAX_FRAMES,fps.fps_local(),
                                        config.PRINT_INTERVAL,config.PRINT_TH,config.DL_DISPLAY_NAME,
                                        config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                if not vis:
                    break
                fps.update()
    fps.stop()
    vs.stop()
def detection(model,config):

    print("> Building Graph")
    # tf Session Config
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth=True
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations


            fps = FPS(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER):
                    # default session
                    frame = vs.read()
                    output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                    num = output_dict['num_detections'][0]
                    classes = output_dict['detection_classes'][0]
                    boxes = output_dict['detection_boxes'][0]
                    scores = output_dict['detection_scores'][0]
                    if 'detection_masks' in output_dict:
                        masks = output_dict['detection_masks'][0]
                    else:
                        masks = None

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = vis_detection(frame, boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
def segmentation(model, config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT, 640, 480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width, vs.real_height)
    target_size = (int(resize_ratio * vs.real_width),
                   int(resize_ratio * vs.real_height))  #(513, 384)
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    })
                # visualization
                if config.VISUALIZE:
                    seg_map = batch_seg_map[0]
                    seg_image = create_colormap(seg_map).astype(np.uint8)
                    cv2.addWeighted(seg_image, config.ALPHA, frame,
                                    1 - config.ALPHA, 0, frame)
                    vis_text(frame, "fps: {}".format(fps.fps_local()),
                             (10, 30))
                    # boxes (ymin, xmin, ymax, xmax)
                    if config.BBOX:
                        map_labeled = measure.label(seg_map, connectivity=1)
                        for region in measure.regionprops(map_labeled):
                            if region.area > config.MINAREA:
                                box = region.bbox
                                p1 = (box[1], box[0])
                                p2 = (box[3], box[2])
                                cv2.rectangle(frame, p1, p2, (77, 255, 9), 2)
                                vis_text(
                                    frame, config.LABEL_NAMES[seg_map[tuple(
                                        region.coords[0])]],
                                    (p1[0], p1[1] - 10))
                    cv2.imshow(config.DL_MODEL_NAME, frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                fps.update()
    fps.stop()
    vs.stop()
Пример #4
0
def segmentation(model, config):
    detection_graph = model.detection_graph
    # fixed input sizes as model needs resize either way
    vs = WebcamVideoStream(config.VIDEO_INPUT, 640, 480).start()
    resize_ratio = 1.0 * 513 / max(vs.real_width, vs.real_height)
    target_size = (int(resize_ratio * vs.real_width),
                   int(resize_ratio * vs.real_height))  #(513, 384)
    tf_config = model.tf_config
    fps = FPS(config.FPS_INTERVAL).start()
    print("> Starting Segmentaion")
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            while vs.isActive():
                frame = vs.resized(target_size)
                batch_seg_map = sess.run(
                    'SemanticPredictions:0',
                    feed_dict={
                        'ImageTensor:0':
                        [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]
                    })
                seg_map = batch_seg_map[0]
                #boxes = []
                #labels = []
                map_labeled = measure.label(seg_map, connectivity=1)
                for region in measure.regionprops(map_labeled):
                    if region.area > config.MINAREA:
                        box = region.bbox
                        label = config.LABEL_NAMES[seg_map[tuple(
                            region.coords[0])]]
                        #boxes.append(box)
                        #labels.append(label)
                        if config.VISUALIZE:
                            draw_single_box_on_image(frame, box, label)

                vis = visualize_deeplab(
                    frame, seg_map, fps._glob_numFrames, config.MAX_FRAMES,
                    fps.fps_local(), config.PRINT_INTERVAL, config.PRINT_TH,
                    config.OD_MODEL_NAME + config._DEV + config._OPT,
                    config.VISUALIZE)
                if not vis:
                    break
                fps.update()
    fps.stop()
    vs.stop()
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/stuff/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                        detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]
                gpu_counter = 0
                cpu_counter = 0

            fps = FPS(config.FPS_INTERVAL).start()
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        masks = None # No Mask Detection possible yet
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            gpu_counter += 1
                        else:
                            # gpu thread has output queue.
                            gpu_counter = 0
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            cpu_counter += 1
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            cpu_counter = 0
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]
                        else:
                            masks = None

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = vis_detection(frame, boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = vis_detection(frame, tracker_boxes, classes, scores, masks, category_index, fps.fps_local(),
                                        config.VISUALIZE, config.DET_INTERVAL, config.DET_TH, config.MAX_FRAMES,
                                        fps._glob_numFrames, config.OD_MODEL_NAME)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()
def detection(model,config):
    # Tracker
    if config.USE_TRACKER:
        import sys
        sys.path.append(os.getcwd()+'/rod/kcf')
        import KCF
        tracker = KCF.kcftracker(False, True, False, False)
        tracker_counter = 0
        track = False

    print("> Building Graph")
    # tf Session Config
    tf_config = model.tf_config
    detection_graph = model.detection_graph
    category_index = model.category_index
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph,config=tf_config) as sess:
            # start Videostream
            vs = WebcamVideoStream(config.VIDEO_INPUT,config.WIDTH,config.HEIGHT).start()
            # Define Input and Ouput tensors
            tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Mask Transformations
            if 'detection_masks' in tensor_dict:
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
                detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
                real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = reframe_box_masks_to_image_masks(
                                            detection_masks, detection_boxes, vs.real_height, vs.real_width)
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            if config.SPLIT_MODEL:
                score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
                expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
                score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
                expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
                # Threading
                score = model.score
                expand = model.expand
                gpu_worker = SessionWorker("GPU",detection_graph,tf_config)
                cpu_worker = SessionWorker("CPU",detection_graph,tf_config)
                gpu_opts = [score_out, expand_out]
                cpu_opts = [tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']]

            fps = FPS(config.FPS_INTERVAL).start()
            masks = None
            print('> Starting Detection')
            while vs.isActive():
                # Detection
                if not (config.USE_TRACKER and track):
                    if config.SPLIT_MODEL:
                        # split model in seperate gpu and cpu session threads
                        if gpu_worker.is_sess_empty():
                            # read video frame, expand dimensions and convert to rgb
                            frame = vs.read()
                            # put new queue
                            gpu_feeds = {image_tensor: vs.expanded()}
                            if config.VISUALIZE:
                                gpu_extras = frame # for visualization frame
                            else:
                                gpu_extras = None
                            gpu_worker.put_sess_queue(gpu_opts,gpu_feeds,gpu_extras)
                        g = gpu_worker.get_result_queue()
                        if g is None:
                            # gpu thread has no output queue. ok skip, let's check cpu thread.
                            pass
                        else:
                            # gpu thread has output queue.
                            score,expand,frame = g["results"][0],g["results"][1],g["extras"]

                            if cpu_worker.is_sess_empty():
                                # When cpu thread has no next queue, put new queue.
                                # else, drop gpu queue.
                                cpu_feeds = {score_in: score, expand_in: expand}
                                cpu_extras = frame
                                cpu_worker.put_sess_queue(cpu_opts,cpu_feeds,cpu_extras)
                        c = cpu_worker.get_result_queue()
                        if c is None:
                            # cpu thread has no output queue. ok, nothing to do. continue
                            continue # If CPU RESULT has not been set yet, no fps update
                        else:
                            boxes, scores, classes, num, frame = c["results"][0],c["results"][1],c["results"][2],c["results"][3],c["extras"]
                    else:
                        # default session
                        frame = vs.read()
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: vs.expanded()})
                        num = output_dict['num_detections'][0]
                        classes = output_dict['detection_classes'][0]
                        boxes = output_dict['detection_boxes'][0]
                        scores = output_dict['detection_scores'][0]
                        if 'detection_masks' in output_dict:
                            masks = output_dict['detection_masks'][0]

                    # reformat detection
                    num = int(num)
                    boxes = np.squeeze(boxes)
                    classes = np.squeeze(classes).astype(np.uint8)
                    scores = np.squeeze(scores)

                    # Visualization
                    vis = visualize_objectdetection(frame,boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    # Activate Tracker
                    if config.USE_TRACKER and num <= config.NUM_TRACKERS:
                        tracker_frame = frame
                        track = True
                        first_track = True

                # Tracking
                else:
                    frame = vs.read()
                    if first_track:
                        trackers = []
                        tracker_boxes = boxes
                        for box in boxes[~np.all(boxes == 0, axis=1)]:
                                tracker.init(conv_detect2track(box,vs.real_width, vs.real_height), tracker_frame)
                                trackers.append(tracker)
                        first_track = False

                    for idx,tracker in enumerate(trackers):
                        tracker_box = tracker.update(frame)
                        tracker_boxes[idx,:] = conv_track2detect(tracker_box, vs.real_width, vs.real_height)
                    vis = visualize_objectdetection(frame,tracker_boxes,classes,scores,masks,category_index,fps._glob_numFrames,
                                                    config.MAX_FRAMES,fps.fps_local(),config.PRINT_INTERVAL,config.PRINT_TH,
                                                    config.OD_DISPLAY_NAME,config.VISUALIZE,config.VIS_FPS,config.DISCO_MODE,config.ALPHA)
                    if not vis:
                        break

                    tracker_counter += 1
                    #tracker_frame = frame
                    if tracker_counter >= config.TRACKER_FRAMES:
                        track = False
                        tracker_counter = 0

                fps.update()

    # End everything
    vs.stop()
    fps.stop()
    if config.SPLIT_MODEL:
        gpu_worker.stop()
        cpu_worker.stop()