コード例 #1
0
def test():
    import os
    im_file = 'demo/00001.jpg'
    # im_file = 'data/VOCdevkit2007/VOC2007/JPEGImages/009036.jpg'
    # im_file = '/media/longc/Data/data/2DMOT2015/test/ETH-Crossing/img1/000100.jpg'
    image = cv2.imread(im_file)

    model_file = './model/VGGnet_fast_rcnn_iter_70000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch3/faster_rcnn_100000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch2/faster_rcnn_2000.h5'
    detector = FasterRCNN()
    network.load_net(model_file, detector)
    detector.cuda()
    detector.eval()
    print('load model successfully!')

    # network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
    # print('save model succ')

    t = Timer()
    t.tic()
    # image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
    dets, scores, classes = detector.detect(image, 0.7)
    runtime = t.toc()
    print('total spend: {}s'.format(runtime))

    im2show = np.copy(image)
    for i, det in enumerate(dets):
        det = tuple(int(x) for x in det)
        cv2.rectangle(im2show, det[0:2], det[2:4], (255, 205, 51), 2)
        cv2.putText(im2show, '%s: %.3f' % (classes[i], scores[i]), (det[0], det[1] + 15), cv2.FONT_HERSHEY_PLAIN,
                    1.0, (0, 0, 255), thickness=1)
    cv2.imwrite(os.path.join('demo', 'out.jpg'), im2show)
コード例 #2
0
def test():
    import os
    im_file = 'demo/004545.jpg'
    image = cv2.imread(im_file)

    detector = FasterRCNN()
    network.load_net('/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5',
                     detector)
    detector.cuda()
    print('load model successfully!')

    # network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
    # print('save model succ')

    t = Timer()
    t.tic()
    # image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
    dets, scores, classes = detector.detect(image, 0.3)
    runtime = t.toc()
    print('total spend: {}s'.format(runtime))

    im2show = np.copy(image)
    for i, det in enumerate(dets):
        if scores[i] < 0.3:
            continue
        det = tuple(int(x) for x in det)
        cv2.rectangle(im2show, det[0:2], det[2:4], (255, 205, 51), 2)
        cv2.putText(im2show,
                    '%s: %.3f' % (classes[i], scores[i]),
                    (det[0], det[1] + 15),
                    cv2.FONT_HERSHEY_PLAIN,
                    1.0, (0, 0, 255),
                    thickness=1)
    cv2.imwrite(os.path.join('demo', 'out.jpg'), im2show)
コード例 #3
0
def test():
    import os
    im_file = 'demo/004545.jpg'
    # im_file = 'data/VOCdevkit2007/VOC2007/JPEGImages/009036.jpg'
    # im_file = '/media/longc/Data/data/2DMOT2015/test/ETH-Crossing/img1/000100.jpg'
    image = cv2.imread(im_file)

    # model_file = './VGGnet_fast_rcnn_iter_70000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch3/faster_rcnn_100000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch2/faster_rcnn_2000.h5'
    model_file = './models/saved_model_max/faster_rcnn_100000.h5'
    detector = FasterRCNN()
    network.load_net(model_file, detector)
    detector.cuda()
    detector.eval()
    print('load model successfully!')

    # network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
    # print('save model succ')

    t = Timer()
    t.tic()
    # image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
    dets, scores, classes = detector.detect(image, 0.7)
    runtime = t.toc()
    print('total spend: {}s'.format(runtime))

    im2show = np.copy(image)
    img = mpimg.imread(im_file)
    # Create figure and axes
    fig, ax = plt.subplots(1)

    # Display the image
    ax.imshow(img)
    # Create a Rectangle patch
    for i, det in enumerate(dets):
        w = det[2] - det[0]
        h = det[3] - det[1]
        rect = patches.Rectangle(det[0:2],
                                 w,
                                 h,
                                 linewidth=1,
                                 edgecolor='r',
                                 facecolor='none')
        # text
        plt.text(det[0], det[1], '%s: %.3f' % (classes[i], scores[i]))

        # Add the patch to the Axes
        ax.add_patch(rect)

    plt.show()
    print('aa')
コード例 #4
0
def test(visualize=False):
    import os
    im_file = 'data/cervix/train/Type_2/1381.jpg'
    im_name = im_file.split('/')[-1]
    image = cv2.imread(im_file)

    # model_file = 'models/VGGnet_fast_rcnn_iter_70000.h5'
    model_file = 'models/saved_model3/faster_rcnn_100000.h5'
    expm = model_file.split('/')[-1].split('.')[0]
    expm_dir = os.path.join('demo', expm)
    if not os.path.exists(expm_dir):
        os.makedirs(expm_dir)

    detector = FasterRCNN()
    network.load_net(model_file, detector)
    detector.cuda()
    detector.eval(
    )  # set model in evaluation mode, has effect on Dropout and Batchnorm. Use train() to set train mode.
    print('load model successfully!')

    # network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
    # print('save model succ')

    t = Timer()
    t.tic()
    # image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
    dets, scores, classes = detector.detect(image, 0.7)
    runtime = t.toc()
    print('total spend: {}s'.format(runtime))

    im2show = np.copy(image)
    for i, det in enumerate(dets):
        det = tuple(int(x) for x in det)
        cv2.rectangle(im2show, det[0:2], det[2:4], (255, 205, 51), 4)
        cv2.putText(im2show,
                    '%s: %.3f' % (classes[i], scores[i]),
                    (det[0], det[1] + 15),
                    cv2.FONT_HERSHEY_PLAIN,
                    1.0, (0, 0, 255),
                    thickness=1)
    cv2.imwrite(os.path.join('demo', expm, im_name), im2show)

    if visualize:
        im2show = cv2.resize(im2show,
                             None,
                             None,
                             fx=0.15,
                             fy=0.15,
                             interpolation=cv2.INTER_LINEAR)
        cv2.imshow('demo', im2show)
        cv2.waitKey(0)
コード例 #5
0
 def analysis_video(self, result_dir):
     
     self.statusbar_stringvar.set('Analysis..Please wait..')
     model_file = 'model.h5'
     detector = FasterRCNN()
     network.load_net(model_file, detector)
     detector.cuda()
     detector.eval()
     print('load model successfully!')
     
     info_dict = {}
     info_dict['pictures'] = []
     for index in range(len(self.image_list)):
         accuracy = 0.
         pic_info = {}
         pic_info['objects'] = []
         dets, scores, classes = detector.detect(self.image_list[index], 0.8)
         im2show = np.copy(self.image_list[index])
         for i, det in enumerate(dets):
             object_info = {}
             det = tuple(int(x) for x in det)
             cv2.rectangle(im2show, det[0:2], det[2:4], (255, 205, 51), 2)
             cv2.putText(im2show, '%s: %.3f' % (classes[i], scores[i]), (det[0], det[1] + 15), cv2.FONT_HERSHEY_PLAIN,
                     1.0, (0, 0, 255), thickness=1)
             accuracy += scores[i]
             #object info initial
             object_info['name'] = classes[i]
             object_info['accuracy'] = scores[i]
             object_info['bbox'] = det
             pic_info['objects'].append(object_info)
             
         # pic_info initial
         
         pic_info['filename'] = os.path.basename(self.video_path).split('.')[0] + '_' + str(index + 1) + '.jpg'
         pic_info['size'] = im2show.shape
         info_dict['pictures'].append(pic_info)
         
         cv2.imwrite(os.path.join(result_dir, pic_info['filename']), im2show)
         self.view_table.update(index + 1, **{
                 'name': pic_info['filename'],
                 'accuracy': accuracy / len(classes),
                 'state': 'yes'
             })
     self.statusbar_stringvar.set('Analysis done!')
     return info_dict
コード例 #6
0
def test():
    # Set up dataloader
    data_loader = DAVIS_seq_dataloader(split='val')

    model_file = './model/VGGnet_fast_rcnn_iter_70000.h5'
    detector = FasterRCNN()
    network.load_net(model_file, detector)
    detector.cuda()
    detector.eval()
    print('Load Faster R-CNN model successfully!')

    # unet_model = './model/vgg_unet_1e-4_500.h5'
    # unet = UNet()
    # network.load_net(unet_model, unet)
    # unet.cuda()
    # network.weights_normal_init(unet, dev=0.01)
    # unet.load_from_faster_rcnn_h5(h5py.File(model_file))
    criterion_bce = torch.nn.BCELoss().cuda()
    weight_decay = 5e-5
    # optimizer = torch.optim.SGD(list(unet.parameters())[26:], lr=1e-4, weight_decay=weight_decay)
    # print('Load U-Net model successfully!')

    crop_set = []
    # Iterate
    for i in range(data_loader.num_seq):
        # Get the first frame info
        seq = data_loader.seq_list[data_loader.out_pointer]
        seq_len = data_loader.seq_len[seq]
        img_blobs, seg_blobs = data_loader.get_next_minibatch()
        img = img_blobs[0,:,:,:]
        im_data, im_scales = detector.get_image_blob(img)
        im_info = np.array([[im_data.shape[1], im_data.shape[2], im_scales[0]]], dtype=np.float32)
        # Get the category of the object in the first frame
        rmin, rmax, cmin, cmax = bbox(seg_blobs[0,:,:,0])
        features, rois = detector(im_data, im_info, rpn_only=True)
        new_rois_np = np.array([[0, cmin, rmin, cmax, rmax]], dtype=np.float32)
        new_rois_t = torch.from_numpy(new_rois_np).cuda()
        new_rois = Variable(new_rois_t, requires_grad=False)
        pooled_features = detector.roi_pool(features, new_rois)
        x = pooled_features.view(pooled_features.size()[0], -1)
        x = detector.fc6(x)
        x = detector.fc7(x)
        cls_score = detector.score_fc(x)
        cls_prob = F.softmax(cls_score)
        bbox_pred = detector.bbox_fc(x)
        cls_prob_np = cls_prob.cpu().data.numpy()
        bbox_pred_np = bbox_pred.cpu().data.numpy()
        cls_idx = cls_prob_np.argmax()
        cls_conf = cls_prob_np.max()

        # Overfit U-Net with the first frame
        # for i in range(100):
        #     unet.train()
        #     img_t = torch.from_numpy(img_blobs).permute(0,3,1,2).float().cuda()
        #     img_v = Variable(img_t, requires_grad=False)
        #     seg_t = torch.from_numpy(seg_blobs).permute(0,3,1,2).float().cuda()
        #     seg_v = Variable(seg_t, requires_grad=False)
        #     pred = unet(img_v)
            # loss = criterion_bce(pred, seg_v)
        #     pred_view = pred.view(-1, 1)
        #     seg_view = seg_v.view(-1, 1)    
        #     EPS = 1e-6
        #     loss = 0.6 * seg_view.mul(torch.log(pred_view+EPS)) + 0.4 * seg_view.mul(-1).add(1).mul(torch.log(1-pred+EPS))
        #     loss = -torch.mean(loss)
        #     loss_val = loss.data[0]
        #     optimizer.zero_grad()
        #     loss.backward()
        #     optimizer.step()
        #     print('{}/100: {}'.format(i, loss_val))
        # unet.eval()

        # Merge region proposals overlapping with last frame proposal
        for j in range(1, seq_len):
            img_blobs, _ = data_loader.get_next_minibatch()
            img = img_blobs[0,:,:,:]
            im_data, im_scales = detector.get_image_blob(img)
            # 300 x 5, the first elements are useless here
            features, rois = detector(im_data, im_info, rpn_only=True)
            x1, y1, x2, y2 = merge_rois((rmin, rmax, cmin, cmax), rois.cpu().data.numpy(), thres=0.75)

            # Have overlapping proposals
            if x1 is not None:
                # Send to following layers to refine the bbox
                new_rois_np = np.array([[0, x1, y1, x2, y2]], dtype=np.float32)
                new_rois_t = torch.from_numpy(new_rois_np).cuda()
                new_rois = Variable(new_rois_t, requires_grad=False)
                pooled_features = detector.roi_pool(features, new_rois)
                x = pooled_features.view(pooled_features.size()[0], -1)
                x = detector.fc6(x)
                x = detector.fc7(x)
                cls_score = detector.score_fc(x)
                cls_prob = F.softmax(cls_score)
                bbox_pred = detector.bbox_fc(x)
                cls_prob_np = cls_prob.cpu().data.numpy()
                bbox_pred_np = bbox_pred.cpu().data.numpy()

                # Only regress bbox when confidence is greater than 0.8
                if cls_prob_np.max() > 0.8 and cls_prob_np.argmax() != 0:
                    keep = cls_prob_np.argmax()
                    pred_boxes, scores, classes = detector.interpret_faster_rcnn(cls_prob, bbox_pred, new_rois, im_info, im_data.shape, 0.8)

                    cx = (x1 + x2) / 2
                    cy = (y1 + y2) / 2
                    width = x2 - x1 + 1
                    height = y2 - y1 + 1
                    dx = bbox_pred_np[0,keep*4+0]
                    dy = bbox_pred_np[0,keep*4+1]
                    dw = bbox_pred_np[0,keep*4+2]
                    dh = bbox_pred_np[0,keep*4+3]
            
                    pred_x = dx * width + cx
                    pred_y = dy * height + cy
                    pred_w = np.exp(dw) * width
                    pred_h = np.exp(dh) * height

                    x1 = pred_x - pred_w / 2
                    x2 = pred_x + pred_w / 2
                    y1 = pred_y - pred_h / 2
                    y2 = pred_y + pred_h / 2

            # No overlapping proposals
            if x1 is None:
                # Using Faster R-CNN again to find potential objects
                dets, scores, classes = detector.detect(img, 0.6)
                # Cannot find any salient object
                if dets.shape[0] == 0:
                    x1, y1, x2, y2 = cmin, rmin, cmax, rmax
                else:
                    x1 = dets[:,0]
                    y1 = dets[:,1]
                    x2 = dets[:,2]
                    y2 = dets[:,3]
                    pred_area = (x2 - x1 + 1) * (y2 - y1 + 1)
                    init_area = (cmax - cmin + 1) * (rmax - rmin + 1)
                    xx1 = np.maximum(x1, cmin)
                    xx2 = np.minimum(x2, cmax)
                    yy1 = np.maximum(y1, rmin)
                    yy2 = np.minimum(y2, rmax)
                    inter = (xx2 - xx1 + 1) * (yy2 - yy1 + 1)
                    ovr = inter / (pred_area + init_area - inter)
                    # If there is overlapping, choose the largest IoU bbox
                    try:
                        ovr = ovr[ovr > 0.3]
                        ovr_idx = np.argsort(ovr)[-1]
                        x1 = dets[ovr_idx,0]
                        y1 = dets[ovr_idx,1]
                        x2 = dets[ovr_idx,2]
                        y2 = dets[ovr_idx,3]
                    # Else, choose the highest objectness score one
                    except:
                        if cls_idx == 0:
                            temp_idx = scores.argmax()
                            x1 = dets[temp_idx,0]
                            y1 = dets[temp_idx,1]
                            x2 = dets[temp_idx,2]
                            y2 = dets[temp_idx,3]
                        else:
                            cx = (x1 + x2) / 2
                            cy = (y1 + y2) / 2
                            cc = (cmin + cmax) / 2
                            cr = (rmin + rmax) / 2
                            dist = np.sqrt(np.square(cx-cc) + np.square(cy-cr))
                            dist_idx = np.argsort(dist)
                            for di in dist_idx:
                                if classes[di] == _CLASSES[cls_idx]:
                                    x1 = dets[di,0]
                                    y1 = dets[di,1]
                                    x2 = dets[di,2]
                                    y2 = dets[di,3]

            # Crop the region and send it to U-Net
            try:
                x1 = int(max(x1, 0))
                x2 = int(min(x2, im_data.shape[2]))
                y1 = int(max(y1, 0))
                y2 = int(min(y2, im_data.shape[1]))
            except:
                x1 = int(max(x1[0], 0))
                x2 = int(min(x2[0], im_data.shape[2]))
                y1 = int(max(y1[0], 0))
                y2 = int(min(y2[0], im_data.shape[1]))

            # MEAN_PIXEL = np.array([103.939, 116.779, 123.68])
            # crop = img_blobs[:, y1:y2+1, x1:x2+1, :] - MEAN_PIXEL
            # crop = img_blobs[:,:,:,:] - MEAN_PIXEL
            # crop_v = Variable(torch.from_numpy(crop).permute(0, 3, 1, 2).cuda(), requires_grad=False)
            # pred = unet(crop_v)
            # pred_np = pred.cpu().data.numpy()[0,0,:,:]
            # pred_np[pred_np < 0.5] = 0
            # pred_np[pred_np >= 0.5] = 1
            # pred_np = pred_np * 255
            # res = pred_np.astype(int)
            # cv2.imwrite('test.png', res)

            if y2 - y1 <= 1 or x2 - x1 <= 1:
                ipdb.set_trace()
            cv2.imwrite(os.path.join('demo', 'crop_{}_{}.png'.format(i, j)), img[y1:y2+1,x1:x2+1,:])

            rmin = y1
            rmax = y2
            cmin = x1
            cmax = x2

            im2show = np.copy(img)
            cv2.rectangle(im2show, (int(x1),int(y1)), (int(x2),int(y2)), (0, 255, 0), 2)
            cv2.imwrite(os.path.join('demo', '{}_{}.jpg'.format(i, j)), im2show)
            temp = [i, j, x1, y1, x2, y2]
            crop_set.append(temp)

    # Save
    crop_set = np.array(crop_set)
    np.save('crop', crop_set)
コード例 #7
0
ファイル: listDemo.py プロジェクト: Yuexiaoxi10/CLASP-Project
def test():
    import os
    # im_file = 'demo/004545.jpg'
    # im_file = 'data/VOCdevkit2007/VOC2007/JPEGImages/009036.jpg'
    # im_file = '/media/longc/Data/data/2DMOT2015/test/ETH-Crossing/img1/000100.jpg'

    matName = 'exp5bC9.mat'
    model_file = '/home/dong/PycharmProjects/fasterRCNN/faster_rcnn_pytorch-master/model/CLASP_m_rotation_withNoRot_More/faster_rcnn_20000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch3/faster_rcnn_100000.h5'
    # model_file = '/media/longc/Data/models/faster_rcnn_pytorch2/faster_rcnn_2000.h5'
    #CLASP_class = np.asarray(['__background__',  # always index 0
    #                          'person', 'bin'])
    UCF_class = np.asarray(['__background__', 'person', 'bin'])
    label = UCF_class[1:]  #CLASP_class[1:]
    detector = FasterRCNN(UCF_class)  #CLASP_class
    network.load_net(model_file, detector)
    detector.cuda()
    detector.eval()
    print('load model successfully!')

    #filename = "/home/dong/PycharmProjects/fasterRCNN/faster_rcnn_pytorch-master/CLASP/video/07212017_EXPERIMENT_9A_Aug7/mp4s/Camera_9.mp4"
    #vid = imageio.get_reader(filename, 'ffmpeg')
    imgPath = "/home/dong/PycharmProjects/fasterRCNN/faster_rcnn_pytorch-master/CLASP/exp5bC9/exp5bC9/"  #"/home/dong/PycharmProjects/fasterRCNN/faster_rcnn_pytorch-master/CLASP/C11_50_selected/"
    imgType = '*.jpg'
    image_list = []
    for filename in glob.glob(imgPath + imgType):  # assuming jpg
        #im = Image.open(filename)
        image_list.append(filename)
        #im.close()

    spliter = 'Frame'  #'Frame'
    result = {x: np.zeros([1, 5]) for x in label}

    for i, name in enumerate(image_list):
        ele = Image.open(name)
        image = np.asarray(ele)
        str = ele.filename
        str = str.split(spliter)[1].split('.')[0]
        ind = int(str)
        t = Timer()
        t.tic()
        dets, scores, classes = detector.detect(image, 0.7)
        runtime = t.toc()
        for j, label in enumerate(classes):
            tmp = np.empty([1, 5])
            tmp[0][0:4] = dets[j]
            tmp[0][4] = ind
            if result[label].max() == 0:
                result[label][0] = tmp
            else:
                result[label] = np.append(result[label], tmp, axis=0)

        print('Progress: {a:8.2f}%'.format(a=i * 100.0 / image_list.__len__()))
        print('total spend: {}s'.format(runtime))
        ele.close()

    sio.savemat(matName, result)  #result_9AC11_selected.mat
    #for im in enumerate(vid):
    #image = np.asarray(im)

    # network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
    # print('save model succ')

    # image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
    '''
コード例 #8
0
    t = Timer()

    raw_img_types = set()
    num_total = 0
    for directory in directories:
        num_total_in_class = 0
        dest_subdir = os.path.join(dest_dir, os.path.basename(directory))
        for filename in os.listdir(directory):
            t.tic()
            suffix = filename.split('.')[-1]
            raw_img_types.add(suffix)
            path = os.path.join(directory, filename)
            try:
                img = cv2.imread(path)
                img_name = filename[:-len(suffix)] + dest_type
                dets, scores, classes = detector.detect(img, 0.7)
                if len(dets) == 0:
                    img_roi = img
                else:
                    x1, y1, x2, y2 = dets[0].astype(int)  # choose the highest score
                    img_roi = img[y1:y2, x1:x2]

                if img_size:
                    img_roi = cv2.resize(img_roi, img_size)
                path = os.path.join(dest_subdir, img_name)
                cv2.imwrite(path, img_roi)
                num_total_in_class += 1

                # # save all dets with different names
                # for i, det in enumerate(dets):
                #     det = tuple(int(x) for x in det)