Example #1
0
def plot_gt(img, bboxes, im_path, mode='xyxyxyxy'):
    if not os.path.exists('temp'):
        os.mkdir('temp')
    if mode == 'xywha':
        bboxes = rbox_2_quad(bboxes, mode=mode)
    if mode == 'xyxya':
        bboxes = rbox_2_quad(bboxes, mode=mode)
    for box in bboxes:
        img = cv2.polylines(cv2.UMat(img),
                            [box.reshape(-1, 2).astype(np.int32)], True,
                            (0, 0, 255), 2)
        cv2.imwrite(
            os.path.join('temp', 'augment_%s' % (os.path.split(im_path)[1])),
            img)
    print('Check augmentation results in `temp` folder!!!')
Example #2
0
def icdar_evaluate(model, 
                   target_size, 
                   test_path, 
                   dataset):
    if dataset == 'IC15':
        output = './datasets/IC_eval/icdar15'
    elif dataset == 'IC13':
        output = './datasets/IC_eval/icdar13'
    else:
        raise NotImplementedError

    ims_dir = test_path
    out_dir = './temp'
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    
    ims_list = [x for x in os.listdir(ims_dir) if is_image(x)]
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', 'Hmean')
    nt = 0
    for idx, im_name in enumerate(tqdm(ims_list, desc=s)):
        im_path = os.path.join(ims_dir, im_name)
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        dets = im_detect(model, im, target_sizes=target_size)
        nt += len(dets)
        out_file = os.path.join(out_dir, 'res_' + im_name[:im_name.rindex('.')] + '.txt')
        with codecs.open(out_file, 'w', 'utf-8') as f:
            if dets.shape[0] == 0:
                continue
            if dataset == 'IC15':
                res = sort_corners(rbox_2_quad(dets[:, 2:]))
                for k in range(dets.shape[0]):
                    f.write('{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f}\n'.format(
                        res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                        res[k, 4], res[k, 5], res[k, 6], res[k, 7])
                    )
            if dataset == 'IC13':
                res = rbox_2_aabb(dets[:, 2:])
                for k in range(dets.shape[0]):
                    f.write('{:.0f},{:.0f},{:.0f},{:.0f}\n'.format(
                        res[k, 0], res[k, 1], res[k, 2], res[k, 3])
                    )
    
    zip_name = 'submit.zip'
    make_zip(out_dir, zip_name)
    shutil.move(os.path.join('./', zip_name), os.path.join(output, zip_name))
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    result = os.popen('cd {0} && python script.py -g=gt.zip -s=submit.zip '.format(output)).read()
    sep = result.split(':')
    precision = sep[1][:sep[1].find(',')].strip()
    recall = sep[2][:sep[2].find(',')].strip()
    f1 = sep[3][:sep[3].find(',')].strip()
    map = 0
    p = eval(precision)
    r = eval(recall)
    hmean = eval(f1)
    # display result
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', len(ims_list), nt, p, r, 0, hmean))
    return p, r, map, hmean 
Example #3
0
def demo(args):
    #
    model = STELA(backbone=args.backbone, num_classes=2)
    model.load_state_dict(torch.load(args.weights))
    model.eval()

    ims_list = [x for x in os.listdir(args.ims_dir) if is_image(x)]

    for _, im_name in enumerate(ims_list):
        im_path = os.path.join(args.ims_dir, im_name)
        src = cv2.imread(im_path, cv2.IMREAD_COLOR)
        im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
        cls_dets = im_detect(model, im, target_sizes=args.target_size)
        for j in range(len(cls_dets)):
            cls, scores = cls_dets[j, 0], cls_dets[j, 1]
            bbox = cls_dets[j, 2:]
            if len(bbox) == 4:
                draw_caption(src, bbox, '{:1.3f}'.format(scores))
                cv2.rectangle(src, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])),
                              color=(0, 0, 255),
                              thickness=2)
            else:
                pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))],
                               dtype=np.int32)
                cv2.drawContours(src, pts, 0, color=(0, 255, 0), thickness=2)
                # display original anchors
                # if len(bbox) > 5:
                #     pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32)
                #     cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2)
        # resize for better shown
        im = cv2.resize(src, (800, 800), interpolation=cv2.INTER_LINEAR)
        cv2.imshow('Detection Results', im)
        cv2.waitKey(0)
Example #4
0
def data_evaluate(model, target_size, test_path, conf=0.01, dataset=None):
    root_dir = 'datasets/evaluate'
    out_dir = os.path.join(root_dir, 'detection-results')
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    os.makedirs(out_dir)

    ds = DATASETS[dataset]()

    with open(test_path, 'r') as f:
        if dataset == 'VOC':
            im_dir = test_path.replace('/ImageSets/Main/test.txt',
                                       '/JPEGImages')
            ims_list = [
                os.path.join(im_dir,
                             x.strip('\n') + '.jpg') for x in f.readlines()
            ]
        else:
            ims_list = [
                x.strip('\n') for x in f.readlines() if is_image(x.strip('\n'))
            ]
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', 'Hmean')
    nt = 0
    for idx, im_path in enumerate(tqdm(ims_list, desc=s)):
        print(idx, im_path)
        im_name = os.path.split(im_path)[1]
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR),
                          cv2.COLOR_BGR2RGB)
        dets = im_detect(model, im, target_sizes=target_size, conf=conf)
        nt += len(dets)
        out_file = os.path.join(out_dir,
                                im_name[:im_name.rindex('.')] + '.txt')
        with codecs.open(out_file, 'w', 'utf-8') as f:
            if dets.shape[0] == 0:
                f.close()
                continue
            res = sort_corners(rbox_2_quad(dets[:, 2:]))
            for k in range(dets.shape[0]):
                f.write(
                    '{} {:.2f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'
                    .format(ds.return_class(dets[k, 0]), dets[k, 1], res[k, 0],
                            res[k, 1], res[k, 2], res[k, 3], res[k, 4],
                            res[k, 5], res[k, 6], res[k, 7]))
        # assert len(os.listdir(os.path.join(root_dir,'ground-truth'))) != 0, 'No labels found in test/ground-truth!! '
        assert len(os.listdir("./HRSC2016/Test/Annotations")
                   ) != 0, 'No labels found in test/ground-truth!! '
    mAP = eval_mAP(root_dir, use_07_metric=False)
    # display result
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', len(ims_list), nt, 0, 0, mAP, 0))
    # thres = [0.5,0.6,0.7,0.8,0.9]
    # for thre in thres:
    #     ap = eval_mAP(root_dir, use_07_metric=True, thres = thre)
    #     print('ap_{}:  {}'.format(thre, ap))
    return 0, 0, mAP, 0
Example #5
0
def dota_evaluate(model, 
                  target_size, 
                  test_path,
                  conf = 0.01):
    # 
    root_data, evaldata = os.path.split(test_path)
    splitdata = evaldata + 'split'
    ims_dir = os.path.join(root_data, splitdata + '/' + 'images')
    root_dir = 'outputs'
    res_dir = os.path.join(root_dir, 'detections')          # 裁剪图像的检测结果   
    integrated_dir = os.path.join(root_dir, 'integrated')   # 将裁剪图像整合后成15个txt的结果
    merged_dir = os.path.join(root_dir, 'merged')           # 将整合后的结果NMS

    if  os.path.exists(root_dir):
        shutil.rmtree(root_dir)
    os.makedirs(root_dir)

    for f in [res_dir, integrated_dir, merged_dir]: 
        if os.path.exists(f):
            shutil.rmtree(f)
        os.makedirs(f)

    ds = DOTADataset()
    # loss = torch.zeros(3)
    ims_list = [x for x in os.listdir(ims_dir) if is_image(x)]
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', 'Hmean')
    nt = 0
    for idx, im_name in enumerate(tqdm(ims_list, desc=s)):
        im_path = os.path.join(ims_dir, im_name)
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        dets = im_detect(model, im, target_sizes=target_size, conf = conf)
        nt += len(dets)
        out_file = os.path.join(res_dir,  im_name[:im_name.rindex('.')] + '.txt')
        with codecs.open(out_file, 'w', 'utf-8') as f:
            if dets.shape[0] == 0:
                f.close()
                continue
            res = sort_corners(rbox_2_quad(dets[:, 2:]))
            for k in range(dets.shape[0]):
                f.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {} {} {:.2f}\n'.format(
                    res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                    res[k, 4], res[k, 5], res[k, 6], res[k, 7],
                    ds.return_class(dets[k, 0]), im_name[:-4], dets[k, 1],)
                )
    ResultMerge(res_dir, integrated_dir, merged_dir)
    ## calc mAP
    mAP, classaps = task1_eval(merged_dir, test_path)
    # # display result
    pf = '%20s' + '%10.3g' * 6  # print format    
    print(pf % ('all', len(ims_list), nt, 0, 0, mAP, 0))
    return 0, 0, mAP, 0 
Example #6
0
    def __call__(self, img, labels, mode=None):
        if random.random() < self.p:
            if mode == 'xywha':
                labels = rbox_2_quad(labels, mode = 'xywha')
                img, labels = random_affine(img, labels, 
                            degree=self.degree,translate=self.translate,
                            scale=self.scale,shear=self.shear ) 
                labels = quad_2_rbox(labels, mode = 'xywha')

            else:
                img, labels = random_affine(img, labels, 
                                degree=self.degree,translate=self.translate,
                                scale=self.scale,shear=self.shear ) 
        return img, labels 
Example #7
0
def demo(args):
    hyps = hyp_parse(args.hyp)
    ds = DATASETS[args.dataset](level=1)
    model = RetinaNet(backbone=args.backbone, hyps=hyps)
    if args.weight.endswith('.pth'):
        chkpt = torch.load(args.weight)
        # load model
        if 'model' in chkpt.keys():
            model.load_state_dict(chkpt['model'])
        else:
            model.load_state_dict(chkpt)
        print('load weight from: {}'.format(args.weight))
    model.eval()

    #     if  os.path.exists('outputs'):
    #         shutil.rmtree('outputs')
    #     os.mkdir('outputs')

    t0 = time.time()
    if not args.dataset == 'DOTA':
        ims_list = [x for x in os.listdir(args.ims_dir) if is_image(x)]
        for idx, im_name in enumerate(ims_list):
            s = ''
            t = time.time()
            im_path = os.path.join(args.ims_dir, im_name)
            s += 'image %g/%g %s: ' % (idx, len(ims_list), im_path)
            src = cv2.imread(im_path, cv2.IMREAD_COLOR)
            im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
            cls_dets = im_detect(model, im, target_sizes=args.target_size)
            for j in range(len(cls_dets)):
                cls, scores = cls_dets[j, 0], cls_dets[j, 1]
                bbox = cls_dets[j, 2:]
                if len(bbox) == 4:
                    draw_caption(src, bbox, '{:1.3f}'.format(scores))
                    cv2.rectangle(src, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])),
                                  color=(0, 0, 255),
                                  thickness=2)
                else:
                    pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))],
                                   dtype=np.int32)
                    cv2.drawContours(src,
                                     pts,
                                     0,
                                     color=(0, 255, 0),
                                     thickness=2)
                    put_label = True
                    if put_label:
                        label = ds.return_class(cls) + str(' %.2f' % scores)
                        fontScale = 0.7
                        font = cv2.FONT_HERSHEY_COMPLEX
                        thickness = 1
                        t_size = cv2.getTextSize(label,
                                                 font,
                                                 fontScale=fontScale,
                                                 thickness=thickness)[0]
                        c1 = tuple(bbox[:2].astype('int'))
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 5
                        cv2.rectangle(src, c1, c2, [0, 255, 0], -1)  # filled
                        cv2.putText(src,
                                    label, (c1[0], c1[1] - 5),
                                    font,
                                    fontScale, [0, 0, 0],
                                    thickness=thickness,
                                    lineType=cv2.LINE_AA)
                    # display original anchors
                    # if len(bbox) > 5:
                    #     pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32)
                    #     cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2)

            # resize for better shown
            # im = cv2.resize(src, (800, 800), interpolation=cv2.INTER_LINEAR)
            # cv2.imshow('Detection Results', im)
            # cv2.waitKey(0)

            print('%sDone. (%.3fs) %d objs' %
                  (s, time.time() - t, len(cls_dets)))
            # save image

            out_path = os.path.join('outputs', os.path.split(im_path)[1])
            cv2.imwrite(out_path, src)
    ## DOTA detct on large image
    else:
        evaluate(args.target_size,
                 args.ims_dir,
                 'DOTA',
                 args.backbone,
                 args.weight,
                 hyps=hyps,
                 conf=0.05)
        if os.path.exists('outputs/dota_out'):
            shutil.rmtree('outputs/dota_out')
        os.mkdir('outputs/dota_out')
        exec(
            'cd outputs &&  rm -rf detections && rm -rf integrated  && rm -rf merged'
        )
        ResultMerge('outputs/detections', 'outputs/integrated',
                    'outputs/merged', 'outputs/dota_out')
        img_path = os.path.join(args.ims_dir, 'images')
        label_path = 'outputs/dota_out'
        save_imgs = False
        if save_imgs:
            show_dota_results(img_path, label_path)
    print('Done. (%.3fs)' % (time.time() - t0))
Example #8
0
def evaluate(model, args):
    #
    if args.dataset == 'ICDAR 2013':
        ims_dir = os.path.join(args.test_dir, args.dataset, 'Task 1/Test/IMS')
        eval_dir = os.path.join(args.eval_dir, 'icdar13')
    elif args.dataset == 'ICDAR 2015':
        ims_dir = os.path.join(args.test_dir, args.dataset, 'Task 1/Test/IMS')
        eval_dir = os.path.join(args.eval_dir, 'icdar15')
    elif args.dataset == 'ICDAR 2017':
        ims_dir = os.path.join(args.test_dir, args.dataset, 'Task 1/Test/IMS')
        eval_dir = os.path.join(args.eval_dir, 'icdar17')
    elif args.dataset == 'COCO':
        ims_dir = os.path.join(args.test_dir, args.dataset, 'Test/IMS')
        eval_dir = os.path.join(args.eval_dir, 'coco')
    else:
        raise NotImplementedError
    #
    out_dir = './temp'
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    #
    ims_list = [x for x in os.listdir(ims_dir) if is_image(x)]
    for idx, im_name in enumerate(ims_list):
        im_path = os.path.join(ims_dir, im_name)
        im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR),
                          cv2.COLOR_BGR2RGB)
        _t.tic()
        dets = im_detect(model, im, target_sizes=args.target_size)
        print('\rim_detect: {:d}/{:d}, time: {:.3f}, '.format(
            idx + 1, len(ims_list), _t.toc()),
              end='')
        if args.dataset == 'ICDAR 2017':
            out_file = os.path.join(out_dir,
                                    im_name[:im_name.rindex('.')] + '.txt')
            out_file = out_file.replace('ts', 'res')
        elif args.dataset == 'COCO':
            im_index = im_name[im_name.rindex('_') + 1:im_name.rindex('.')]
            out_file = os.path.join(out_dir,
                                    'res_' + str(int(im_index)) + '.txt')
        else:
            out_file = os.path.join(
                out_dir, 'res_' + im_name[:im_name.rindex('.')] + '.txt')
        with codecs.open(out_file, 'w', 'utf-8') as f:
            if dets.shape[0] == 0:
                continue
            if args.dataset == 'ICDAR 2013':
                res = rbox_2_aabb(dets[:, 2:])
                for k in range(dets.shape[0]):
                    f.write('{:.0f},{:.0f},{:.0f},{:.0f}\n'.format(
                        res[k, 0], res[k, 1], res[k, 2], res[k, 3]))
            elif args.dataset == 'ICDAR 2015':
                res = sort_corners(rbox_2_quad(dets[:, 2:]))
                for k in range(dets.shape[0]):
                    f.write(
                        '{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f}\n'
                        .format(res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                                res[k, 4], res[k, 5], res[k, 6], res[k, 7]))
            elif args.dataset == 'ICDAR 2017':
                res = sort_corners(rbox_2_quad(dets[:, 2:]))
                for k in range(dets.shape[0]):
                    f.write(
                        '{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.0f},{:.2f}\n'
                        .format(res[k, 0], res[k, 1], res[k, 2], res[k, 3],
                                res[k, 4], res[k, 5], res[k, 6], res[k, 7],
                                dets[k, 1]))
            if args.dataset == 'COCO':
                res = rbox_2_aabb(dets[:, 2:])
                for k in range(dets.shape[0]):
                    f.write('{:.0f},{:.0f},{:.0f},{:.0f},{:.2f}\n'.format(
                        res[k, 0], res[k, 1], res[k, 2], res[k, 3], dets[k,
                                                                         1]))
    #
    zip_name = 'submit.zip'
    make_zip(out_dir, zip_name)
    shutil.move(os.path.join('./', zip_name), os.path.join(eval_dir, zip_name))
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    if args.dataset == 'ICDAR 2013' or args.dataset == 'ICDAR 2015':
        os.system(
            'cd {0} && python2 script.py -g=gt.zip -s=submit.zip '.format(
                eval_dir))
        print()
    else:
        # evaluated online
        raise NotImplementedError
Example #9
0
def demo(backbone='eb2',
         weights='weights/deploy_eb_ship_15.pth',
         ims_dir='sample',
         target_size=768):
    #
    model = STELA(backbone=backbone, num_classes=2)
    model.load_state_dict(torch.load(weights))
    # model.eval()
    # print(model)

    classifier = EfficientNet.from_name(net_name)
    num_ftrs = classifier._fc.in_features
    classifier._fc = nn.Linear(num_ftrs, class_num)

    classifier = classifier.cuda()
    best_model_wts = 'dataset/weismoke/model/efficientnet-b0.pth'
    classifier.load_state_dict(torch.load(best_model_wts))

    ims_list = [x for x in os.listdir(ims_dir) if is_image(x)]
    import shutil
    shutil.rmtree('output/')
    os.mkdir('output/')
    for _, im_name in enumerate(ims_list):
        im_path = os.path.join(ims_dir, im_name)
        src = cv2.imread(im_path, cv2.IMREAD_COLOR)
        im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
        import time
        # start=time.clock()
        cls_dets = im_detect(model, im, target_sizes=target_size)
        end = time.clock()
        # print('********time*********',end-start)
        # val='/home/jd/projects/haha/chosename/val_plane_split/label_new/'
        """
        if(len(cls_dets)==0):
            print('*********no********',im_name)
            #image_path = os.path.join(img_path, name + ext) #样本图片的名称
            shutil.move(val+im_name[0:-4]+'.txt', 'hard')  #移动该样本图片到blank_img_path
            shutil.move(im_path, 'hard/')     #移动该样本图片的标签到blank_label_path
            continue
        """
        fw = open('output/' + im_name[:-4] + '.txt', 'a')
        fw.truncate()
        for j in range(len(cls_dets)):
            cls, scores = cls_dets[j, 0], cls_dets[j, 1]
            # print ('cls,score',cls,scores)

            bbox = cls_dets[j, 2:]
            # print(bbox)
            if len(bbox) == 4:
                draw_caption(src, bbox, '{:1.3f}'.format(scores))
                cv2.rectangle(src, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])),
                              color=(0, 0, 255),
                              thickness=2)
            else:
                pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))],
                               dtype=np.int32)
                # print('####pts####',pts)
                cv2.drawContours(src, pts, 0, color=(0, 255, 0), thickness=2)
                # display original anchors
                # if len(bbox) > 5:
                #     pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32)
                #     cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2)
                patch = crop_image(im, pts)
                pred = classify(classifier, patch)

            fw.write(
                str(pts.flatten()[0]) + ' ' + str(pts.flatten()[1]) + ' ' +
                str(pts.flatten()[2]) + ' ' + str(pts.flatten()[3]) + ' ' +
                str(pts.flatten()[4]) + ' ' + str(pts.flatten()[5]) + ' ' +
                str(pts.flatten()[6]) + ' ' + str(pts.flatten()[7]) + ' ' +
                classes[pred] + '\n')
        fw.close()

        # resize for better shown
        im = cv2.resize(src, (768, 768), interpolation=cv2.INTER_LINEAR)
        cv2.imwrite('output_img/' + im_name, im)

    train_img_dir = '/home/jd/projects/bifpn/sample_plane/'
    groundtruth_txt_dir = '/home/jd/projects/haha/chosename/val_plane_split/label_new/'
    detect_txt_dir = '/home/jd/projects/bifpn/output/'
    Recall, Precision, mAP = compute_acc(train_img_dir, groundtruth_txt_dir,
                                         detect_txt_dir)
    print('*******', Recall)
    return Recall, Precision, mAP