Ejemplo n.º 1
0
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im = cv2.imread(image_name)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')

    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)
Ejemplo n.º 2
0
def time_analyse(matlab, cmd, image_filepath, par1, par2):
    timer = Timer()
    timer.tic()

    obj_proposals = ROI_boxes(matlab, image_filepath, cmd, par1, par2)

    timer.toc()
    time = timer.total_time
    box_numer = len(obj_proposals)

    return time, box_numer, obj_proposals
Ejemplo n.º 3
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 4
0
def demo(net, matlab, image_filepath, classes, args):
    """Detect object classes in an image using pre-computed object proposals."""
    timer = Timer()
    timer.tic()
    # Load pre-computed Selected Search object proposals
    obj_proposals = ROI_boxes(matlab, image_filepath, args.OP_method)
    if len(obj_proposals)==0:
        return

    # Load the demo image
    im = cv2.imread(image_filepath)

    # Detect all object classes and regress object bounds

    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        if (len(dets) == 0):
            global count
            count += 1
            print('{} No Ear detected').format(count)
        # print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
        #                                                             CONF_THRESH)
        if args.video_mode:
            visualise(im, cls, dets, thresh=CONF_THRESH)
        elif args.image_path is not None:
            vis_detections(im, cls, dets, thresh=CONF_THRESH)
Ejemplo n.º 5
0
def demo(net, image_name, classes):

    """Detect object classes in an image using pre-computed object proposals."""

    # Load pre-computed Selected Search object proposals
    box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',
                            image_name + '_boxes.mat')
    obj_proposals = sio.loadmat(box_file)['boxes']

    # Load the demo image
    im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
                                                                    CONF_THRESH)
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
Ejemplo n.º 6
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img, scale = TextLineCfg.SCALE, max_scale = TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)
    # 得到经过网络的boxes

    textdetector = TextDetector()
    # 得到经过nsm的boxes,并经过文本线构造算法形成文本线
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 7
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img_w = img.shape[1]
    img_h = img.shape[0]
    if img_w>=200 and img_h >=200:
        img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
        scores, boxes = test_ctpn(sess, net, img)

        textdetector = TextDetector()
        boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
        # draw_boxes(img, image_name, boxes, scale)
        draw_yolo_boxes(img, image_name, boxes, scale)
        timer.toc()
Ejemplo n.º 8
0
def ctpn(img):
    timer = Timer()
    timer.tic()

    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

    return scores, boxes, img, scale
Ejemplo n.º 9
0
def initModel():
    global flag, basemodel
    if flag:
        flag = False
        timer = Timer()
        timer.tic()
        reload(densenet)
        input = Input(shape=(32, None, 1), name='the_input')
        y_pred = densenet.dense_cnn(input, nclass)
        basemodel = Model(inputs=input, outputs=y_pred)
        modelPath = os.path.join(
            os.getcwd(), 'densenet/models/weights_densenet.h5')
        if os.path.exists(modelPath):
            basemodel.load_weights(modelPath)
        timer.toc()
        print("\n----------------------------------------------")
        print(('load model took {:.3f}s for ').format(timer.total_time))
Ejemplo n.º 10
0
def ctpn(sess, net, image_name):
    img = cv2.imread(image_name)
    im = check_img(img)
    timer = Timer()
    timer.tic()
    scores, boxes = test_ctpn(sess, net, im)
    timer.toc()
    CONF_THRESH = 0.9
    NMS_THRESH = 0.3
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32)
    keep = nms(dets, NMS_THRESH)
    dets = dets[keep, :]

    keep = np.where(dets[:, 4] >= 0.7)[0]
    dets = dets[keep, :]
    line = connect_proposal(dets[:, 0:4], dets[:, 4], im.shape)
    save_results(image_name, im, line, thresh=0.9)
def demo(sess, net, im, image_name, out_path):
    #im, im_ref,im_path
    """Detect object classes in an image using pre-computed object proposals."""
    #path_to_imgs = "/Users/dwaithe/Documents/collaborators/WaitheD/micro_vision/acquisitions/zstacks/test3/pos1_resize/"
    # Load the demo image
    #im_file = os.path.join(cfg.FLAGS2["data_dir"], path_to_imgs, image_name)
    #print(im_file)
    #im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(
        timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.7
    NMS_THRESH = 0.7
    out_name = os.path.join(cfg.FLAGS2["data_dir"], out_path,
                            str(image_name) + str('.txt'))
    f = open(out_name, 'w')
    for cls_ind, cls in enumerate(cfg.FLAGS2["CLASSES"][1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]

        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        inds = np.where(dets[:, -1] >= CONF_THRESH)[0]

        if len(inds) > 0:

            for i in inds:
                bbox = dets[i, :4]
                score = dets[i, -1]
                out_str = cls + "\t" + str(score) + "\t" + str(
                    bbox[0]) + "\t" + str(bbox[1]) + "\t" + str(
                        bbox[2]) + "\t" + str(bbox[3]) + "\n"
                f.write(out_str)

        #vis_detections(im, cls, dets, thresh=CONF_THRESH)
    f.close()
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.FLAGS2["data_dir"], 'demo', image_name)
    im = cv2.imread(im_file)
    # im = cv2.imread("G:/Python Projects/py3/Faster-RCNN-TensorFlow-Python3.5-master/data/demo/000456.jpg")

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(
        timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    # score 阈值,最后画出候选框时需要,>thresh才会被画出
    CONF_THRESH = 0.4
    # 其实是输出了很多得分高的框,只不过后续通过nms的方式将这些框进行了合并,从而达到很好的检测效果。
    # NMS_THRESH表示非极大值抑制,这个值越小表示要求的红框重叠度越小,0.0表示不允许重叠。
    NMS_THRESH = 0.1

    # python-opencv 中读取图片默认保存为[w,h,channel](w,h顺序不确定)
    # 其中 channel:BGR 存储,而画图时,需要按RGB格式,因此此处作转换。
    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')

    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        # 进行非极大值抑制,得到抑制后的 dets
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        # vis_detections(im, cls, dets, thresh=CONF_THRESH)
        # 画框
        vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)

    plt.axis('off')
    plt.tight_layout()
    plt.draw()
Ejemplo n.º 13
0
def ctpn(sess, training_flag, net, image_name, save_all_dir):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, training_flag, net, img)

    textdetector = TextDetector()
    boxes1, boxes2, boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    # img1 = img.copy()
    # draw_middle_boxes(img1, boxes1, scale)
    # img2 = img.copy()
    # draw_middle_boxes(img2, boxes2, scale)
    draw_boxes(img, image_name, boxes, scale, save_all_dir)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 14
0
    def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        while self.solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            self.solver.step(1)
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                self.snapshot()

        if last_snapshot_iter != self.solver.iter:
            self.snapshot()
Ejemplo n.º 15
0
def ctpn(sess, net, image_name):
    global detect_graph
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
    with detect_graph.as_default():
        scores, boxes = test_ctpn(sess, net, img)

        textdetector = TextDetector()
        boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
        draw_boxes(img, image_name, boxes, scale)
        timer.toc()
        print(('Detection took {:.3f}s for '
            '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

        return boxes,img,scale   
Ejemplo n.º 16
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()
    img = cv2.imread(image_name)
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)
    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    sort_index = np.argsort(boxes[:, -1])[::-1]
    boxes = boxes[sort_index]
    # print(boxes)
    texts = draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
    return texts
Ejemplo n.º 17
0
    def demo(self, image_root, image_name):

        # Load the demo image
        image = readimage(image_root)

        timer = Timer()
        timer.tic()
        prob = self.classify(image)
        timer.toc()
        print('Detection took {:.3f}s '.format(timer.total_time))

        save_classify_image_dir = os.path.join(self.output_dir, self._ind_to_class[prob])
        if not os.path.exists(save_classify_image_dir):
            os.makedirs(save_classify_image_dir)

        image = Image.fromarray(np.array(image))
        save_classify_image_root = os.path.join(save_classify_image_dir, image_name)
        image.save(save_classify_image_root)
Ejemplo n.º 18
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img = docRot(img)
    # cv2.imwrite(os.path.join("./data", image_name.split(os.path.sep)[-1]), img)
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 19
0
def demo(sess, net):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    # im_file = os.path.join(cfg.FLAGS2["data_dir"], 'demo', image_name)
    CONF_THRESH = 0.6
    NMS_THRESH = 0.1  # 非极大值抑制
    # server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # server.bind(("localhost", 8888))
    # server.listen(1)  # tcp连接队列的大小,即连接数
    im_names = ['39.jpg', '40.jpg', '41.jpg', '42.jpg',
                '43.jpg', '44.jpg']
    im=cv2.imread("init.jpg")   #目的是为了初始化相关变量,避免首次检测延时过大
    scores, boxes = im_detect(sess, net, im)

    # connection, address = server.accept()   #阻塞,等待连接
    # print(connection, address)
    recv_str =im_names[0]

    print(recv_str)
    im_name =recv_str #im_names[int(recv_str)]   #'G:/40.jpg'  #
    # saveImgPath+=im_name
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print('Demo for {}'.format(im_name))
    timer = Timer()
    timer.tic()
    im = cv2.imread(im_name)
    try:
        im.shape
    except:
        print('fail to read '+im_name)
        return
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
    result=getResult(scores, boxes,CONF_THRESH,NMS_THRESH)


    #保存图像等操作
    # timer2 = Timer()
    # timer2.tic()

    drawDefect(im,scores, boxes, CONF_THRESH, NMS_THRESH)
    cv2.imwrite(saveImgPath, im)
Ejemplo n.º 20
0
def ctpn(sess, net, image_name, pdf_file_name, coord_folder, debug):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()  #author's algorithms
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_ctpn_boxes(img, image_name, boxes, scale, pdf_file_name, coord_folder,
                    debug)
    timer.toc()
    if debug is True:
        print(('Detection took {:.3f}s for '
               '{:d} object proposals').format(timer.total_time,
                                               boxes.shape[0]))
Ejemplo n.º 21
0
def demo(net, matlab, image_filepath, classes, args):
    """Detect object classes in an image using pre-computed object proposals."""
    timer = Timer()
    timer.tic()
    # Load pre-computed Selected Search object proposals
    obj_proposals = ROI_boxes(matlab, image_filepath, args.OP_method)
    if len(obj_proposals) == 0:
        return

    # Load the demo image
    im = cv2.imread(image_filepath)

    # Detect all object classes and regress object bounds

    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    print('Detection took {:.3f}s for '
          '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        if (len(dets) == 0):
            global count
            count += 1
            print('{} No Ear detected').format(count)
        # print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
        #                                                             CONF_THRESH)
        if args.video_mode:
            visualise(im, cls, dets, thresh=CONF_THRESH)
        elif args.image_path is not None:
            vis_detections(im, cls, dets, thresh=CONF_THRESH)
Ejemplo n.º 22
0
def demo(sess, net, image_name, output_path):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    image_path = image_path_from_index(image_name)
    im = cv2.imread(image_path)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    # 此处的boxes是经过bbox_pre修正过的Bbox的位置坐标,并且对于预测的每一个类别,都有一个预测的Bbox坐标
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(
        timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.1
    NMS_THRESH = 0.1
    #对每个类别进行一次画图
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        #利用非极大值抑制,从300个proposal中剔除掉与更大得分的proposal的IOU大于0.1的proposal
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
        dets = dets[inds, :]

        output_dir = os.path.join(output_path,
                                  "comp3_det_test_{:s}.txt".format(cls))

        with open(output_dir, 'a') as f:
            for i in range(len(dets)):
                bbox = dets[i, :4]
                score = dets[i, -1]
                bbox_result = "%s\t%f\t%f\t%f\t%f\t%f\n" % (
                    image_name, score, bbox[0], bbox[1], bbox[2], bbox[3])
                f.write(bbox_result)
Ejemplo n.º 23
0
    def demo(self, image_name, is_init=True):
        """Detect object classes in an image using pre-computed object proposals."""

        # Detect all object classes and regress object bounds
        timer = Timer()
        timer.tic()
        if is_init:
            raw_scores, raw_boxes, self.feature_map, self.rpn_boxes, self.rpn_scores, self.im_scales = im_detect(
                self.sess, self.net, image_name, is_part=False)
            CONF_THRESH = self.score_thresh
            NMS_THRESH = self.nms_thresh
            self.objects = []
            for cls_ind, cls in enumerate(CLASSES[1:]):
                cls_ind += 1  # because we skipped background
                cls_boxes = raw_boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
                cls_scores = raw_scores[:, cls_ind]
                dets = np.hstack(
                    (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
                keep = nms(dets, NMS_THRESH)
                dets = dets[keep, :]

                inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
                if len(inds) > 0:
                    for i in inds:
                        bbox = dets[i, :4]
                        score = dets[i, -1]
                        box_height = bbox[3] - bbox[1]
                        box_width = bbox[2] - bbox[0]
                        c_x = np.round(bbox[0] + box_width / 2.0)
                        c_y = np.round(bbox[1] + box_height / 2.0)
                        if cls == 'stawberry':
                            cls = 'strawberry'
                        object_coordinates = {
                            'name': cls,
                            'score': score,
                            'boxes': list([c_x, c_y, box_width, box_height])
                        }
                        self.objects.append(object_coordinates)
        else:
            _, _, self.feature_map, self.rpn_boxes, self.rpn_scores, self.im_scales = im_detect(
                self.sess, self.net, image_name, is_part=True)
        timer.toc()
def ctpn(sess, net, image_name, result_file, img_type):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    resized_img, scale = resize_im(img,
                                   scale=TextLineCfg.SCALE,
                                   max_scale=TextLineCfg.MAX_SCALE)
    # detect single text
    scores, boxes = test_ctpn(sess, net, resized_img)
    # connect text into lines
    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    # add post process to filter unrelated boxes
    post_process(boxes, resized_img, img_type, scale)
    # draw boxes and write to file
    draw_boxes(resized_img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 25
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    if img is None:
        print("No File")
        exit()
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    #    print(('Detection took {:.3f}s for '
    #           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
Ejemplo n.º 26
0
def ctpn(sess, net, image_name,save_path1,save_path2):
    timer = Timer()
    timer.tic()

    #读取图片
    img = cv2.imread(image_name)
    img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) 
    #灰度化处理
    img2 = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
    img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2RGB)

    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes2(img, boxes,image_name, save_path2,scale)
    draw_boxes(img, boxes,image_name, save_path1,scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))
Ejemplo n.º 27
0
def ctpn(sess, net, image_name):
    print('CTPN - start')
    timer = Timer()
    timer.tic()

    img = cv2.imread(image_name)
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    print('1')
    scores, boxes = test_ctpn(sess, net, img)
    print('2')
    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

    return np.int32(boxes / scale)
Ejemplo n.º 28
0
def demo(image_name, out_file, sess):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im = cv2.imread(image_name)

    blobs, im_scales = _get_blobs(im)
    assert len(im_scales) == 1, "Only single-image batch implemented"

    im_blob = blobs['data']
    blobs['im_info'] = np.array(
        [im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
    print(blobs["im_info"])

    #Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    print("====freeze_graph_test()-> blobs====\n", blobs)
    scores, boxes = freeze_graph_test(sess, blobs)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(
        timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.5
    NMS_THRESH = 0.3
    #

    im = im[:, :, (2, 1, 0)]

    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, out_file, thresh=CONF_THRESH)

    cv2.imencode('.jpg', im)[1].tofile(out_file)
Ejemplo n.º 29
0
class ProgressBar():
    def __init__(self, epoch_count, one_batch_count, pattern):
        self.total_count = one_batch_count
        self.current_index = 0
        self.current_epoch = 1
        self.epoch_count = epoch_count
        self.train_timer = Timer()
        self.pattern = pattern

    def show(self, currentEpoch, *args):
        self.current_index += 1
        if self.current_index == 1:
            self.train_timer.tic()
        self.current_epoch = currentEpoch
        perCount = int(self.total_count / 100)  # 7
        perCount = 1 if perCount == 0 else perCount
        percent = int(self.current_index / perCount)

        if self.total_count % perCount == 0:
            dotcount = int(self.total_count / perCount)
        else:
            dotcount = int(self.total_count / perCount)

        s1 = "\rEpoch:%d / %d [%s%s] %d / %d " % (
            self.current_epoch, self.epoch_count, "*" * (int(percent)), " " *
            (dotcount - int(percent)), self.current_index, self.total_count)

        s2 = self.pattern % tuple([float("{:.3f}".format(x)) for x in args])

        s3 = "%s,%s,remain=%s" % (s1, s2,
                                  self.train_timer.remain(
                                      self.current_index, self.total_count))
        sys.stdout.write(s3)
        sys.stdout.flush()
        if self.current_index == self.total_count:
            self.train_timer.toc()
            s3 = "%s,%s,total=%s" % (s1, s2, self.train_timer.averageTostr())
            sys.stdout.write(s3)
            sys.stdout.flush()
            self.current_index = 0
            print("\r")
Ejemplo n.º 30
0
def ctpn(sess, net, image_name):
    img = cv2.imread(image_name)
    im = check_img(img)
    timer = Timer()
    timer.tic()
    scores, boxes = test_ctpn(sess, net, im)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.9
    NMS_THRESH = 0.3
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32)
    keep = nms(dets, NMS_THRESH)
    dets = dets[keep, :]
    
    keep = np.where(dets[:, 4] >= 0.7)[0]
    dets = dets[keep, :]
    line = connect_proposal(dets[:, 0:4], dets[:, 4], im.shape)
    save_results(image_name, im, line,thresh=0.9)
Ejemplo n.º 31
0
def demo(sess, net, image_id, image_name):
    im_file = os.path.join(cfg.FLAGS2["data_dir"], 'image', image_name)
    im = cv2.imread(im_file)

    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    # print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    CONF_THRESH = 0.1
    NMS_THRESH = 0.1
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, image_id, image_name, thresh=CONF_THRESH)
Ejemplo n.º 32
0
    def detect(self, image_name):
        """Detect object classes in an image using pre-computed object proposals."""

        # Load the demo image
        # Detect all object classes and regress object bounds
        image = readimage(image_name)
        image = image_transform_1_3(image)
        timer = Timer()
        timer.tic()
        scores, boxes = self.im_detect(image)
        timer.toc()
        # print('kkk', np.argmax(scores, axis=1))
        # print('rois--------------', scores)
        print('Detection took {:.3f}s for '
              '{:d} object proposals'.format(timer.total_time, boxes.shape[0]))

        CONF_THRESH = 0.7
        NMS_THRESH = 0.1
        dets_list = []
        for cls_ind, cls in enumerate(self.classes_detect[1:]):
            inds = np.where(scores[:, cls_ind] > CONF_THRESH)[0]
            cls_ind += 1  # because we skipped background
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack(
                (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
            keep = nms(dets[inds, :], NMS_THRESH)
            dets = dets[keep, :]
            inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
            cls_ind_list = np.empty((len(inds), 1), np.int32)
            cls_ind_list.fill(cls_ind)
            dets = np.hstack((dets[inds, :-1], cls_ind_list))
            dets_list.append(dets)
        dets = np.vstack(dets_list)
        dets[:, 0:2] = np.floor(dets[:, 0:2])
        dets[:, 2:] = np.ceil(dets[:, 2:])
        dets = dets.astype(np.int32)
        print('jjj', dets)
        self.vis(image, image_name, dets)
        return dets
Ejemplo n.º 33
0
    def test_image(self, sess, image, im_info):
        feed_dict = {self._image: image, self._im_info: im_info}
        # cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"],
        #                                                  self._predictions['cls_prob'],
        #                                                  self._predictions['bbox_pred'],
        #                                                  self._predictions['rois']],
        #                                                 feed_dict=feed_dict)
        timer = Timer()
        timer.tic()
        predictions = sess.run(self._predictions, feed_dict=feed_dict)
        timer.toc()
        print('Prediction took {:.3f}s'.format(timer.total_time))

        # keep M1 M2 M3 branch to detect small/medium/large faces
        if 'M1' in self._feat_branches:
            cls_prob = np.concatenate((predictions["M1"]["rois_scores"],
                                       predictions["M2"]["rois_scores"],
                                       predictions["M3"]["rois_scores"]),
                                      axis=0)
            rois = np.concatenate(
                (predictions["M1"]["rois"], predictions["M2"]["rois"],
                 predictions["M3"]["rois"]),
                axis=0)
            kpoints = np.concatenate(
                (predictions["M1"]["kpoints"], predictions["M2"]["kpoints"],
                 predictions["M3"]["kpoints"]),
                axis=0)

        # discard M1 branch, only keep M2 and M3 branches to detect medium and large faces
        else:
            print('do not contain M1 branch!!!!')
            cls_prob = np.concatenate((predictions["M2"]["rois_scores"],
                                       predictions["M3"]["rois_scores"]),
                                      axis=0)
            rois = np.concatenate(
                (predictions["M2"]["rois"], predictions["M3"]["rois"]), axis=0)
            kpoints = np.concatenate(
                (predictions["M2"]["kpoints"], predictions["M3"]["kpoints"]),
                axis=0)
        return cls_prob, rois, kpoints
Ejemplo n.º 34
0
def ctpn(sess, net, image_name):
    timer = Timer()
    timer.tic()

    #cp image from s3
    awss3.getInputImage(image_name)
    image_name = './data/demo/' + image_name

    img = cv2.imread(image_name)
    img, scale = resize_im(img,
                           scale=TextLineCfg.SCALE,
                           max_scale=TextLineCfg.MAX_SCALE)
    scores, boxes = test_ctpn(sess, net, img)

    textdetector = TextDetector()
    boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
    draw_boxes(img, image_name, boxes, scale)
    timer.toc()
    result = ('Detection took {:.3f}s for '
              '{:d} object proposals').format(timer.total_time, boxes.shape[0])
    print(result)
    return result
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    # G:\PyCharm\PyCharmSpaceWork\Faster-RCNN-TensorFlow2-Python3\data\demo\000001.jpg
    im_file = os.path.join(cfg.FLAGS2["data_dir"], 'demo', image_name)
    # print("==================im_file===========", im_file)
    # opencv读取图片
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()

    # 调用了lib/model/test.py里的im_detect()方法,返回的是分数和检测框。
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.1
    NMS_THRESH = 0.1
    for cls_ind, cls in enumerate(CLASSES[1:]):
        '''cls 标签的类型 roses'''
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        # [[ 0.         7.872568  63.        55.951324   0.7933805]]
        # 最后一个值是相似度
        dets = dets[keep, :]
        vis_detections(im, cls, dets, image_name=image_name, thresh=CONF_THRESH )

    # 保存标记的图片
    if not os.path.exists(SAVA_DIR):
        os.makedirs(SAVA_DIR)
    plt.savefig(os.path.join(SAVA_DIR, image_name))
Ejemplo n.º 36
0
    def validation(self, index, mode):
        #####################################
        # Preparation
        #####################################
        #-------------------------------
        # metric
        #-------------------------------
        mAP_RPN = Evaluate_metric(1, overlap_threshold=cfg.MAP_THRESH)
        mAP_CLASSIFICATION = Evaluate_metric(cfg.NUM_CLASSES, ignore_class=[0], overlap_threshold=cfg.MAP_THRESH)
        mAP_MASK = Evaluate_metric(cfg.NUM_CLASSES, ignore_class=[0], overlap_threshold=cfg.MAP_THRESH)
        if mode == 'val':
            data_loader = self.dataloader_val
            data_logger = self.logger_val
        elif mode == 'trainval':
            data_loader = self.dataloader_trainval
            data_logger = self.logger_trainval

        ####################################
        # Accumulate data
        ####################################
        timer = Timer()
        timer.tic()
        print('starting validation....')
        for iter, blobs in enumerate(tqdm(data_loader)):
            # if no box: skip
            if len(blobs['gt_box']) == 0:
                continue

            if cfg.USE_IMAGES:
                grid_shape = blobs['data'].shape[-3:]
                projection_helper = ProjectionHelper(cfg.INTRINSIC, cfg.PROJ_DEPTH_MIN, cfg.PROJ_DEPTH_MAX, cfg.DEPTH_SHAPE, grid_shape, cfg.VOXEL_SIZE)
                proj_mapping = [projection_helper.compute_projection(d.cuda(), c.cuda(), t.cuda()) for d, c, t in zip(blobs['nearest_images']['depths'][0], blobs['nearest_images']['poses'][0], blobs['nearest_images']['world2grid'][0])]

                if None in proj_mapping: #invalid sample
                    continue
                
                blobs['proj_ind_3d'] = []
                blobs['proj_ind_2d'] = []
                proj_mapping0, proj_mapping1 = zip(*proj_mapping)
                blobs['proj_ind_3d'].append(torch.stack(proj_mapping0))
                blobs['proj_ind_2d'].append(torch.stack(proj_mapping1))

            self.net.forward(blobs, 'TEST', [])
            #--------------------------------------
            # RPN: loss, metric 
            #--------------------------------------
            if cfg.USE_RPN:
                # (n, 6)
                gt_box = blobs['gt_box'][0].numpy()[:, 0:6]
                gt_box_label = np.zeros(gt_box.shape[0])

                try:
                    pred_box_num = (self.net._predictions['roi_scores'][0][:, 0] > cfg.ROI_THRESH).nonzero().size(0)
                    pred_box = self.net._predictions['rois'][0].cpu().numpy()[:pred_box_num]
                    pred_box_label = np.zeros(pred_box_num) 
                    pred_box_score = self.net._predictions['roi_scores'][0].cpu().numpy()[:pred_box_num, 0]
                except:
                    pred_box = self.net._predictions['rois'][0].cpu().numpy()[:1]
                    pred_box_label = np.zeros(1)
                    pred_box_score = self.net._predictions['roi_scores'][0].cpu().numpy()[:1, 0]

                #evaluation metric 
                mAP_RPN.evaluate(pred_box,
                                 pred_box_label,
                                 pred_box_score,
                                 gt_box,
                                 gt_box_label)

            #--------------------------------------
            # Classification: loss, metric 
            #--------------------------------------
            if cfg.USE_CLASS:
                # groundtruth
                gt_box = blobs['gt_box'][0].numpy()[:, 0:6]
                gt_class = blobs['gt_box'][0][:, 6].numpy()

                # predictions
                pred_class = self.net._predictions['cls_pred'].data.cpu().numpy()

                # only predictions['rois'] is list and is Tensor / others are no list and Variable
                rois = self.net._predictions['rois'][0].cpu()
                box_reg_pre = self.net._predictions["bbox_pred"].data.cpu().numpy()
                box_reg = np.zeros((box_reg_pre.shape[0], 6))
                pred_conf_pre = self.net._predictions['cls_prob'].data.cpu().numpy()
                pred_conf = np.zeros((pred_conf_pre.shape[0]))


                for pred_ind in range(pred_class.shape[0]):
                    box_reg[pred_ind, :] = box_reg_pre[pred_ind, pred_class[pred_ind]*6:(pred_class[pred_ind]+1)*6]
                    pred_conf[pred_ind] = pred_conf_pre[pred_ind, pred_class[pred_ind]]

                pred_box = bbox_transform_inv(rois, torch.from_numpy(box_reg).float())
                pred_box = clip_boxes(pred_box, self.net._scene_info[:3]).numpy()

                # pickup
                sort_index = []
                for conf_index in range(pred_conf.shape[0]):
                    if pred_conf[conf_index] > cfg.CLASS_THRESH:
                        sort_index.append(True)
                    else:
                        sort_index.append(False)

                # eliminate bad box
                for idx, box in enumerate(pred_box):
                    if round(box[0]) >= round(box[3]) or round(box[1]) >= round(box[4]) or round(box[2]) >= round(box[5]):
                        sort_index[idx] = False

                if len(pred_box[sort_index]) == 0:
                    print('no pred box')

                if iter < cfg.VAL_NUM:
                    os.makedirs('{}/{}'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), exist_ok=True)
                    np.save('{}/{}/pred_class'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_class)
                    np.save('{}/{}/pred_conf'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_conf)
                    np.save('{}/{}/pred_box'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_box)
                    np.save('{}/{}/scene'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), np.where(blobs['data'][0,0].numpy() <= 1, 1, 0))
                    np.save('{}/{}/gt_class'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), gt_class)
                    np.save('{}/{}/gt_box'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), gt_box)

                mAP_CLASSIFICATION.evaluate(
                        pred_box[sort_index],
                        pred_class[sort_index],
                        pred_conf[sort_index],
                        gt_box,
                        gt_class)

            #--------------------------------------
            # MASK: loss, metric 
            #--------------------------------------
            if cfg.USE_MASK:
                # gt data
                gt_box = blobs['gt_box'][0].numpy()[:, 0:6]
                gt_class = blobs['gt_box'][0][:, 6].numpy()
                gt_mask = blobs['gt_mask'][0]

                pred_class = self.net._predictions['cls_pred'].data.cpu().numpy()
                pred_conf = np.zeros((pred_class.shape[0]))
                for pred_ind in range(pred_class.shape[0]):
                    pred_conf[pred_ind] = self.net._predictions['cls_prob'].data.cpu().numpy()[pred_ind, pred_class.data[pred_ind]]

                # pickup
                sort_index = pred_conf > cfg.CLASS_THRESH

                # eliminate bad box
                for idx, box in enumerate(pred_box):
                    if round(box[0]) >= round(box[3]) or round(box[1]) >= round(box[4]) or round(box[2]) >= round(box[5]):
                        sort_index[idx] = False

                pred_mask = []
                mask_ind = 0
                for ind, cls in enumerate(pred_class):
                    if sort_index[ind]:
                        mask = self.net._predictions['mask_pred'][0][mask_ind][0][cls].data.cpu().numpy()
                        mask = np.where(mask >=cfg.MASK_THRESH, 1, 0).astype(np.float32)
                        pred_mask.append(mask)
                        mask_ind += 1

                if iter < cfg.VAL_NUM: 
                    pickle.dump(pred_mask, open('{}/{}/pred_mask'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))
                    pickle.dump(sort_index, open('{}/{}/pred_mask_index'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))
                    pickle.dump(gt_mask, open('{}/{}/gt_mask'.format(cfg.VAL_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))

                mAP_MASK.evaluate_mask(
                        pred_box[sort_index],
                        pred_class[sort_index],
                        pred_conf[sort_index],
                        pred_mask,
                        gt_box,
                        gt_class, 
                        gt_mask, 
                        self.net._scene_info)

            self.net.delete_intermediate_states()
        timer.toc()
        print('It took {:.3f}s for Validation on chunks'.format(timer.total_time()))

        ###################################
        # Summary
        ###################################
        if cfg.USE_RPN:
            mAP_RPN.finalize()
            print('AP of RPN: {}'.format(mAP_RPN.mAP()))
            data_logger.scalar_summary('AP_ROI', mAP_RPN.mAP(), index)

        if cfg.USE_CLASS:
            mAP_CLASSIFICATION.finalize()
            print('mAP of CLASSIFICATION: {}'.format(mAP_CLASSIFICATION.mAP()))
            for class_ind in range(cfg.NUM_CLASSES):
                if class_ind not in mAP_CLASSIFICATION.ignore_class:
                    print('class {}: {}'.format(class_ind, mAP_CLASSIFICATION.AP(class_ind)))
            data_logger.scalar_summary('mAP_CLASSIFICATION', mAP_CLASSIFICATION.mAP(), index)

        if cfg.USE_MASK:
            mAP_MASK.finalize()
            print('mAP of mask: {}'.format(mAP_MASK.mAP()))
            for class_ind in range(cfg.NUM_CLASSES):
                if class_ind not in mAP_MASK.ignore_class:
                    print('class {}: {}'.format(class_ind, mAP_MASK.AP(class_ind)))
            data_logger.scalar_summary('mAP_MASK', mAP_MASK.mAP(), index)
Ejemplo n.º 37
0
    def train_model(self, epochs):
        #1. construct the computation graph
        self.net.init_modules()

        #save net structure to data folder
        net_f = open(os.path.join(self.output_dir, 'nn.txt'), 'w')
        net_f.write(str(self.net))
        net_f.close()

        #find previous snapshot 
        lsf, nfiles, sfiles = self.find_previous()

        #2. restore weights
        if lsf == 0:
            lr, last_iter, stepsizes, self.np_paths, self.ss_paths = self.initialize()
        else:
            lr, last_iter, stepsizes, self.np_paths, self.ss_paths = self.restore(str(sfiles[-1]),
                                                                                 str(nfiles[-1]))
        #3. fix weights and eval mode
        self.fix_eval_parts()

        # construct optimizer
        self.construct_optimizer(lr)

        if len(stepsizes) != 0:
            next_stepsize = stepsizes.pop(0)
        else:
            next_stepsize = -1

        train_timer = Timer()
        current_snapshot_epoch = int(last_iter / len(self.dataloader_train))
        for epoch in range(current_snapshot_epoch, epochs):
            print("start epoch {}".format(epoch))
            with output(initial_len=9, interval=0) as content:
                for iter, blobs in enumerate(tqdm(self.dataloader_train)):
                    last_iter += 1
                    # adjust learning rate
                    if last_iter == next_stepsize:
                        lr *= cfg.GAMMA
                        self.scale_lr(self.optimizer, lr)
                        if len(stepsizes) != 0:
                            next_stepsize = stepsizes.pop(0)

                    batch_size = blobs['data'].shape[0]
                    if len(blobs['gt_box']) < batch_size: #invalid sample
                        continue
                    train_timer.tic()
                    # IMAGE PART
                    if cfg.USE_IMAGES:
                        grid_shape = blobs['data'].shape[-3:]
                        projection_helper = ProjectionHelper(cfg.INTRINSIC, cfg.PROJ_DEPTH_MIN, cfg.PROJ_DEPTH_MAX, cfg.DEPTH_SHAPE, grid_shape, cfg.VOXEL_SIZE)
                        proj_mapping = [[projection_helper.compute_projection(d.cuda(), c.cuda(), t.cuda()) for d, c, t in zip(blobs['nearest_images']['depths'][i], blobs['nearest_images']['poses'][i], blobs['nearest_images']['world2grid'][i])] for i in range(batch_size)]

                        jump_flag = False
                        for i in range(batch_size):
                            if None in proj_mapping[i]: #invalid sample
                                jump_flag = True
                                break
                        if jump_flag:
                            continue
                        
                        blobs['proj_ind_3d'] = []
                        blobs['proj_ind_2d'] = []
                        for i in range(batch_size):
                            proj_mapping0, proj_mapping1 = zip(*proj_mapping[i])
                            blobs['proj_ind_3d'].append(torch.stack(proj_mapping0))
                            blobs['proj_ind_2d'].append(torch.stack(proj_mapping1))

                        
                    self.net.forward(blobs)
                    self.optimizer.zero_grad()
                    self.net._losses["total_loss"].backward()
                    self.optimizer.step()

                    train_timer.toc()

                    # Display training information
                    if iter % (cfg.DISPLAY) == 0:
                        self.log_print(epoch*len(self.dataloader_train)+iter, lr, content, train_timer.average_time())
                    self.net.delete_intermediate_states()

                    # validate if satisfying the time criterion
                    if train_timer.total_time() / 3600 >= cfg.VAL_TIME:
                        print('------------------------VALIDATION------------------------------')
                        self.validation(last_iter, 'val')
                        print('------------------------TRAINVAL--------------------------------')
                        self.validation(last_iter, 'trainval')

                        # snapshot
                        if cfg.VAL_TIME > 0.0:
                            ss_path, np_path = self.snapshot(last_iter)
                            self.np_paths.append(np_path)
                            self.ss_paths.append(ss_path)

                            #remove old snapshots if too many
                            if len(self.np_paths) > cfg.SNAPSHOT_KEPT and cfg.SNAPSHOT_KEPT:
                                self.remove_snapshot()

                        train_timer.clean_total_time()
Ejemplo n.º 38
0
    def train_model(self, sess, max_iters, restore=False):
        """Network training loop."""
        data_layer = get_data_layer(self.roidb, self.imdb.num_classes)
        total_loss,model_loss, rpn_cross_entropy, rpn_loss_box=self.net.build_loss(ohem=cfg.TRAIN.OHEM)
        # scalar summary
        tf.summary.scalar('rpn_reg_loss', rpn_loss_box)
        tf.summary.scalar('rpn_cls_loss', rpn_cross_entropy)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss',total_loss)
        summary_op = tf.summary.merge_all()

        log_image, log_image_data, log_image_name =\
            self.build_image_summary()

        # optimizer
        lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
        if cfg.TRAIN.SOLVER == 'Adam':
            opt = tf.train.AdamOptimizer(cfg.TRAIN.LEARNING_RATE)
        elif cfg.TRAIN.SOLVER == 'RMS':
            opt = tf.train.RMSPropOptimizer(cfg.TRAIN.LEARNING_RATE)
        else:
            # lr = tf.Variable(0.0, trainable=False)
            momentum = cfg.TRAIN.MOMENTUM
            opt = tf.train.MomentumOptimizer(lr, momentum)

        global_step = tf.Variable(0, trainable=False)
        with_clip = True
        if with_clip:
            tvars = tf.trainable_variables()
            grads, norm = tf.clip_by_global_norm(tf.gradients(total_loss, tvars), 10.0)
            train_op = opt.apply_gradients(list(zip(grads, tvars)), global_step=global_step)
        else:
            train_op = opt.minimize(total_loss, global_step=global_step)

        # intialize variables
        sess.run(tf.global_variables_initializer())
        restore_iter = 0

        # load vgg16
        if self.pretrained_model is not None and not restore:
            try:
                print(('Loading pretrained model '
                   'weights from {:s}').format(self.pretrained_model))
                self.net.load(self.pretrained_model, sess, True)
            except:
                raise Exception('Check your pretrained model {:s}'.format(self.pretrained_model))

        # resuming a trainer
        if restore:
            try:
                ckpt = tf.train.get_checkpoint_state(self.output_dir)
                print('Restoring from {}...'.format(ckpt.model_checkpoint_path), end=' ')
                self.saver.restore(sess, ckpt.model_checkpoint_path)
                stem = os.path.splitext(os.path.basename(ckpt.model_checkpoint_path))[0]
                restore_iter = int(stem.split('_')[-1])
                sess.run(global_step.assign(restore_iter))
                print('done')
            except:
                raise 'Check your pretrained {:s}'.format(ckpt.model_checkpoint_path)

        last_snapshot_iter = -1
        timer = Timer()
        for iter in range(restore_iter, max_iters):
            timer.tic()
            # learning rate
            if iter != 0 and iter % cfg.TRAIN.STEPSIZE == 0:
                sess.run(tf.assign(lr, lr.eval() * cfg.TRAIN.GAMMA))
                print(lr)

            # get one batch
            blobs = data_layer.forward()

            feed_dict={
                self.net.data: blobs['data'],
                self.net.im_info: blobs['im_info'],
                self.net.keep_prob: 0.5,
                self.net.gt_boxes: blobs['gt_boxes'],
                self.net.gt_ishard: blobs['gt_ishard'],
                self.net.dontcare_areas: blobs['dontcare_areas']
            }
            res_fetches=[]
            fetch_list = [total_loss,model_loss, rpn_cross_entropy, rpn_loss_box,
                          summary_op,
                          train_op] + res_fetches

            total_loss_val,model_loss_val, rpn_loss_cls_val, rpn_loss_box_val, \
                summary_str, _ = sess.run(fetches=fetch_list, feed_dict=feed_dict)

            self.writer.add_summary(summary=summary_str, global_step=global_step.eval())

            _diff_time = timer.toc(average=False)


            if (iter) % (cfg.TRAIN.DISPLAY) == 0:
                print('iter: %d / %d, total loss: %.4f, model loss: %.4f, rpn_loss_cls: %.4f, rpn_loss_box: %.4f, lr: %f'%\
                        (iter, max_iters, total_loss_val,model_loss_val,rpn_loss_cls_val,rpn_loss_box_val,lr.eval()))
                print('speed: {:.3f}s / iter'.format(_diff_time))

            if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = iter
                self.snapshot(sess, iter)

        if last_snapshot_iter != iter:
            self.snapshot(sess, iter)
Ejemplo n.º 39
0
    def test(net, data_loader, data_logger):
        #####################################
        # Preparation
        #####################################
        os.makedirs(cfg.TEST_SAVE_DIR, exist_ok=True)
        mAP_CLASSIFICATION = Evaluate_metric(cfg.NUM_CLASSES, ignore_class=[0], overlap_threshold=cfg.MAP_THRESH)
        mAP_MASK = Evaluate_metric(cfg.NUM_CLASSES, ignore_class=[0], overlap_threshold=cfg.MAP_THRESH)

        ####################################
        # Accumulate data
        ####################################
        pred_all = {}
        gt_all = {}

        timer = Timer()
        timer.tic()
        print('starting test on whole scan....')
        for iter, blobs in enumerate(tqdm(data_loader)):

            try:
                gt_box = blobs['gt_box'][0].numpy()[:, 0:6]
                gt_class = blobs['gt_box'][0][:, 6].numpy()
            except:
                continue

            # color proj
            killing_inds = None
            if cfg.USE_IMAGES:
                grid_shape = blobs['data'].shape[-3:]
                projection_helper = ProjectionHelper(cfg.INTRINSIC, cfg.PROJ_DEPTH_MIN, cfg.PROJ_DEPTH_MAX, cfg.DEPTH_SHAPE, grid_shape, cfg.VOXEL_SIZE)
                if grid_shape[0]*grid_shape[1]*grid_shape[2] > cfg.MAX_VOLUME or blobs['nearest_images']['depths'][0].shape[0] > cfg.MAX_IMAGE:
                    proj_mapping = [projection_helper.compute_projection(d, c, t) for d, c, t in zip(blobs['nearest_images']['depths'][0], blobs['nearest_images']['poses'][0], blobs['nearest_images']['world2grid'][0])]
                else:
                    proj_mapping = [projection_helper.compute_projection(d.cuda(), c.cuda(), t.cuda()) for d, c, t in zip(blobs['nearest_images']['depths'][0], blobs['nearest_images']['poses'][0], blobs['nearest_images']['world2grid'][0])]
                    
                killing_inds = []
                real_proj_mapping = []
                if None in proj_mapping: #invalid sample
                    for killing_ind, killing_item in enumerate(proj_mapping):
                        if killing_item == None:
                            killing_inds.append(killing_ind)
                        else:
                            real_proj_mapping.append(killing_item)
                    print('{}: (invalid sample: no valid projection)'.format(blobs['id']))
                else:
                    real_proj_mapping = proj_mapping
                blobs['proj_ind_3d'] = []
                blobs['proj_ind_2d'] = []
                proj_mapping0, proj_mapping1 = zip(*real_proj_mapping)
                blobs['proj_ind_3d'].append(torch.stack(proj_mapping0))
                blobs['proj_ind_2d'].append(torch.stack(proj_mapping1))

            net.forward(blobs, 'TEST', killing_inds)

            # test with detection pipeline
            pred_class = net._predictions['cls_pred'].data.cpu().numpy()
            rois = net._predictions['rois'][0].cpu()
            box_reg_pre = net._predictions["bbox_pred"].data.cpu().numpy()
            box_reg = np.zeros((box_reg_pre.shape[0], 6))
            pred_conf_pre = net._predictions['cls_prob'].data.cpu().numpy()
            pred_conf = np.zeros((pred_conf_pre.shape[0]))

            for pred_ind in range(pred_class.shape[0]):
                box_reg[pred_ind, :] = box_reg_pre[pred_ind, pred_class[pred_ind]*6:(pred_class[pred_ind]+1)*6]
                pred_conf[pred_ind] = pred_conf_pre[pred_ind, pred_class[pred_ind]]

            pred_box = bbox_transform_inv(rois, torch.from_numpy(box_reg).float())
            pred_box = clip_boxes(pred_box, net._scene_info[:3]).numpy()

            os.makedirs('{}/{}'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), exist_ok=True)
            np.save('{}/{}/pred_class'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_class)
            np.save('{}/{}/pred_conf'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_conf)
            np.save('{}/{}/pred_box'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), pred_box)
            np.save('{}/{}/scene'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), np.where(blobs['data'][0,0].numpy() <= 1, 1, 0))
            np.save('{}/{}/gt_class'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), gt_class)
            np.save('{}/{}/gt_box'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), gt_box)

            # pickup
            sort_index = []
            for conf_index in range(pred_conf.shape[0]):
                if pred_conf[conf_index] > cfg.CLASS_THRESH:
                    sort_index.append(True)
                else:
                    sort_index.append(False)

            # eliminate bad box
            for idx, box in enumerate(pred_box):
                if round(box[0]) >= round(box[3]) or round(box[1]) >= round(box[4]) or round(box[2]) >= round(box[5]):
                    sort_index[idx] = False

            mAP_CLASSIFICATION.evaluate(
                    pred_box[sort_index],
                    pred_class[sort_index],
                    pred_conf[sort_index],
                    gt_box,
                    gt_class)

            if cfg.USE_MASK:
                gt_mask = blobs['gt_mask'][0]
                # pickup
                sort_index = []
                for conf_index in range(pred_conf.shape[0]):
                    if pred_conf[conf_index] > cfg.CLASS_THRESH:
                        sort_index.append(True)
                    else:
                        sort_index.append(False)

                # eliminate bad box
                for idx, box in enumerate(pred_box):
                    if round(box[0]) >= round(box[3]) or round(box[1]) >= round(box[4]) or round(box[2]) >= round(box[5]):
                        sort_index[idx] = False

                # test with mask pipeline
                net.mask_backbone.eval()
                net.mask_backbone.cuda()
                mask_pred_batch = []
                for net_i in range(1):
                    mask_pred = []
                    for pred_box_ind, pred_box_item in enumerate(pred_box):
                        if sort_index[pred_box_ind]:
                            mask_pred.append(net.mask_backbone(Variable(blobs['data'].cuda())[net_i:net_i+1, :, 
                                                                            int(round(pred_box_item[0])):int(round(pred_box_item[3])),
                                                                            int(round(pred_box_item[1])):int(round(pred_box_item[4])), 
                                                                            int(round(pred_box_item[2])):int(round(pred_box_item[5]))
                                                                            ], [] if cfg.USE_IMAGES else None))

                    mask_pred_batch.append(mask_pred)
                net._predictions['mask_pred'] = mask_pred_batch

                # save test result
                pred_mask = []
                mask_ind = 0
                for ind, cls in enumerate(pred_class):
                    if sort_index[ind]:
                        mask = net._predictions['mask_pred'][0][mask_ind][0][cls].data.cpu().numpy()
                        mask = np.where(mask >=cfg.MASK_THRESH, 1, 0).astype(np.float32)
                        pred_mask.append(mask)
                        mask_ind += 1

                pickle.dump(pred_mask, open('{}/{}/pred_mask'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))
                pickle.dump(sort_index, open('{}/{}/pred_mask_index'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))
                pickle.dump(gt_mask, open('{}/{}/gt_mask'.format(cfg.TEST_SAVE_DIR, blobs['id'][0].split('/')[-1][:12]), 'wb'))

                mAP_MASK.evaluate_mask(
                        pred_box[sort_index],
                        pred_class[sort_index],
                        pred_conf[sort_index],
                        pred_mask,
                        gt_box,
                        gt_class, 
                        gt_mask, 
                        net._scene_info)

        timer.toc()
        print('It took {:.3f}s for test on whole scenes'.format(timer.total_time()))

        ###################################
        # Summary
        ###################################
        if cfg.USE_CLASS:
            mAP_CLASSIFICATION.finalize()
            print('mAP of CLASSIFICATION: {}'.format(mAP_CLASSIFICATION.mAP()))
            for class_ind in range(cfg.NUM_CLASSES):
                if class_ind not in mAP_CLASSIFICATION.ignore_class:
                    print('class {}: {}'.format(class_ind, mAP_CLASSIFICATION.AP(class_ind)))

        if cfg.USE_MASK:
            mAP_MASK.finalize()
            print('mAP of mask: {}'.format(mAP_MASK.mAP()))
            for class_ind in range(cfg.NUM_CLASSES):
                if class_ind not in mAP_MASK.ignore_class:
                    print('class {}: {}'.format(class_ind, mAP_MASK.AP(class_ind)))