コード例 #1
0
ファイル: demo.py プロジェクト: yzhang123/stairs_detection
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(
        timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
コード例 #2
0
def start_classify():

    model = load_model()
    print("Done LOADING!")

    while True:
        queue = db.lrange(settings.IMAGE_QUEUE, 0, settings.BATCH_SIZE - 1)
        imageIDs = []
        batch = None

        for q in queue:
            q = json.loads(q.decode("utf-8"))
            #print(type(q["image"]))
            image = base64_decode_image(q["image"], settings.IMAGE_DTYPE)

            if batch is None:
                batch = image
            else:
                batch = np.vstack([batch, image])

            imageIDs.append(q["id"])

        if len(imageIDs) > 0:
            print("* Batch size: {}".format(batch.shape))
            results = [im_detect(model, image)]

            for (imageID, ans) in zip(imageIDs, results):
                #print(ans)
                output = []
                scores = ans[0]
                boxes = ans[1]
                r = {"labels": scores.tolist(), "boxes": boxes.tolist()}
                output.append(r)

                db.set(imageID, json.dumps(output))
            db.ltrim(settings.IMAGE_QUEUE, len(imageIDs), -1)

        time.sleep(settings.SERVER_SLEEP)
コード例 #3
0
def demo(sess, net, image_name, path_list):
    """Detect object classes in an image using pre-computed object proposals."""
    # Load the demo image

    im = cv2.imdecode(np.fromfile(image_name, dtype=np.uint8), -1)
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    #print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.5
    NMS_THRESH = 0.3

    result = np.zeros((46, 147,3), dtype=np.uint8)
    outout = np.zeros((224, 224,3), dtype=np.uint8)
    color = 'blue plate'

    for cls_ind, cls in enumerate(CLASSES[1:]):
    #    print(cls_ind, cls)
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        #print(cls_boxes)

     #   print('scores',scores)
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
    #    print(dets)

        result,outout,color = vis_detections(im, cls, dets, path_list, result,outout,color,thresh=CONF_THRESH)

    return result,outout,color
コード例 #4
0
    cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, i)

    while (cap.isOpened()):
        ret, frame = cap.read()

        if i % 5 == 0:
            cv2.setTrackbarPos('frame', 'stair detection', int(cap.get(1)))
            """Detect object classes in an image using pre-computed object proposals."""

            copy = frame.copy()
            frame = cv2.resize(copy, (640, 480))

            # Detect all object classes and regress object bounds
            timer = Timer()
            timer.tic()
            scores, boxes = im_detect(sess, net, frame)
            timer.toc()
            print('Detection took {:.3f}s for {:d} object proposals'.format(
                timer.total_time, boxes.shape[0]))

            # Visualize detections for each class
            CONF_THRESH = 0.8
            NMS_THRESH = 0.3

            for cls_ind, cls in enumerate(CLASSES[1:]):
                cls_ind += 1  # because we skipped background
                cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
                cls_scores = scores[:, cls_ind]
                dets = np.hstack(
                    (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
                keep = nms(dets, NMS_THRESH)