def demo(args): # model = STELA(backbone=args.backbone, num_classes=2) model.load_state_dict(torch.load(args.weights)) model.eval() ims_list = [x for x in os.listdir(args.ims_dir) if is_image(x)] for _, im_name in enumerate(ims_list): im_path = os.path.join(args.ims_dir, im_name) src = cv2.imread(im_path, cv2.IMREAD_COLOR) im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB) cls_dets = im_detect(model, im, target_sizes=args.target_size) for j in range(len(cls_dets)): cls, scores = cls_dets[j, 0], cls_dets[j, 1] bbox = cls_dets[j, 2:] if len(bbox) == 4: draw_caption(src, bbox, '{:1.3f}'.format(scores)) cv2.rectangle(src, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=(0, 0, 255), thickness=2) else: pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))], dtype=np.int32) cv2.drawContours(src, pts, 0, color=(0, 255, 0), thickness=2) # display original anchors # if len(bbox) > 5: # pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32) # cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2) # resize for better shown im = cv2.resize(src, (800, 800), interpolation=cv2.INTER_LINEAR) cv2.imshow('Detection Results', im) cv2.waitKey(0)
def demo(args): hyps = hyp_parse(args.hyp) ds = DATASETS[args.dataset](level=1) model = RetinaNet(backbone=args.backbone, hyps=hyps) if args.weight.endswith('.pth'): chkpt = torch.load(args.weight) # load model if 'model' in chkpt.keys(): model.load_state_dict(chkpt['model']) else: model.load_state_dict(chkpt) print('load weight from: {}'.format(args.weight)) model.eval() # if os.path.exists('outputs'): # shutil.rmtree('outputs') # os.mkdir('outputs') t0 = time.time() if not args.dataset == 'DOTA': ims_list = [x for x in os.listdir(args.ims_dir) if is_image(x)] for idx, im_name in enumerate(ims_list): s = '' t = time.time() im_path = os.path.join(args.ims_dir, im_name) s += 'image %g/%g %s: ' % (idx, len(ims_list), im_path) src = cv2.imread(im_path, cv2.IMREAD_COLOR) im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB) cls_dets = im_detect(model, im, target_sizes=args.target_size) for j in range(len(cls_dets)): cls, scores = cls_dets[j, 0], cls_dets[j, 1] bbox = cls_dets[j, 2:] if len(bbox) == 4: draw_caption(src, bbox, '{:1.3f}'.format(scores)) cv2.rectangle(src, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=(0, 0, 255), thickness=2) else: pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))], dtype=np.int32) cv2.drawContours(src, pts, 0, color=(0, 255, 0), thickness=2) put_label = True if put_label: label = ds.return_class(cls) + str(' %.2f' % scores) fontScale = 0.7 font = cv2.FONT_HERSHEY_COMPLEX thickness = 1 t_size = cv2.getTextSize(label, font, fontScale=fontScale, thickness=thickness)[0] c1 = tuple(bbox[:2].astype('int')) c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 5 cv2.rectangle(src, c1, c2, [0, 255, 0], -1) # filled cv2.putText(src, label, (c1[0], c1[1] - 5), font, fontScale, [0, 0, 0], thickness=thickness, lineType=cv2.LINE_AA) # display original anchors # if len(bbox) > 5: # pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32) # cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2) # resize for better shown # im = cv2.resize(src, (800, 800), interpolation=cv2.INTER_LINEAR) # cv2.imshow('Detection Results', im) # cv2.waitKey(0) print('%sDone. (%.3fs) %d objs' % (s, time.time() - t, len(cls_dets))) # save image out_path = os.path.join('outputs', os.path.split(im_path)[1]) cv2.imwrite(out_path, src) ## DOTA detct on large image else: evaluate(args.target_size, args.ims_dir, 'DOTA', args.backbone, args.weight, hyps=hyps, conf=0.05) if os.path.exists('outputs/dota_out'): shutil.rmtree('outputs/dota_out') os.mkdir('outputs/dota_out') exec( 'cd outputs && rm -rf detections && rm -rf integrated && rm -rf merged' ) ResultMerge('outputs/detections', 'outputs/integrated', 'outputs/merged', 'outputs/dota_out') img_path = os.path.join(args.ims_dir, 'images') label_path = 'outputs/dota_out' save_imgs = False if save_imgs: show_dota_results(img_path, label_path) print('Done. (%.3fs)' % (time.time() - t0))
def demo(backbone='eb2', weights='weights/deploy_eb_ship_15.pth', ims_dir='sample', target_size=768): # model = STELA(backbone=backbone, num_classes=2) model.load_state_dict(torch.load(weights)) # model.eval() # print(model) classifier = EfficientNet.from_name(net_name) num_ftrs = classifier._fc.in_features classifier._fc = nn.Linear(num_ftrs, class_num) classifier = classifier.cuda() best_model_wts = 'dataset/weismoke/model/efficientnet-b0.pth' classifier.load_state_dict(torch.load(best_model_wts)) ims_list = [x for x in os.listdir(ims_dir) if is_image(x)] import shutil shutil.rmtree('output/') os.mkdir('output/') for _, im_name in enumerate(ims_list): im_path = os.path.join(ims_dir, im_name) src = cv2.imread(im_path, cv2.IMREAD_COLOR) im = cv2.cvtColor(src, cv2.COLOR_BGR2RGB) import time # start=time.clock() cls_dets = im_detect(model, im, target_sizes=target_size) end = time.clock() # print('********time*********',end-start) # val='/home/jd/projects/haha/chosename/val_plane_split/label_new/' """ if(len(cls_dets)==0): print('*********no********',im_name) #image_path = os.path.join(img_path, name + ext) #样本图片的名称 shutil.move(val+im_name[0:-4]+'.txt', 'hard') #移动该样本图片到blank_img_path shutil.move(im_path, 'hard/') #移动该样本图片的标签到blank_label_path continue """ fw = open('output/' + im_name[:-4] + '.txt', 'a') fw.truncate() for j in range(len(cls_dets)): cls, scores = cls_dets[j, 0], cls_dets[j, 1] # print ('cls,score',cls,scores) bbox = cls_dets[j, 2:] # print(bbox) if len(bbox) == 4: draw_caption(src, bbox, '{:1.3f}'.format(scores)) cv2.rectangle(src, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=(0, 0, 255), thickness=2) else: pts = np.array([rbox_2_quad(bbox[:5]).reshape((4, 2))], dtype=np.int32) # print('####pts####',pts) cv2.drawContours(src, pts, 0, color=(0, 255, 0), thickness=2) # display original anchors # if len(bbox) > 5: # pts = np.array([rbox_2_quad(bbox[5:]).reshape((4, 2))], dtype=np.int32) # cv2.drawContours(src, pts, 0, color=(0, 0, 255), thickness=2) patch = crop_image(im, pts) pred = classify(classifier, patch) fw.write( str(pts.flatten()[0]) + ' ' + str(pts.flatten()[1]) + ' ' + str(pts.flatten()[2]) + ' ' + str(pts.flatten()[3]) + ' ' + str(pts.flatten()[4]) + ' ' + str(pts.flatten()[5]) + ' ' + str(pts.flatten()[6]) + ' ' + str(pts.flatten()[7]) + ' ' + classes[pred] + '\n') fw.close() # resize for better shown im = cv2.resize(src, (768, 768), interpolation=cv2.INTER_LINEAR) cv2.imwrite('output_img/' + im_name, im) train_img_dir = '/home/jd/projects/bifpn/sample_plane/' groundtruth_txt_dir = '/home/jd/projects/haha/chosename/val_plane_split/label_new/' detect_txt_dir = '/home/jd/projects/bifpn/output/' Recall, Precision, mAP = compute_acc(train_img_dir, groundtruth_txt_dir, detect_txt_dir) print('*******', Recall) return Recall, Precision, mAP