Example #1
0
def evaluate(model, device, opt=None):

    if opt == None:

        class opt:
            anno_json = '/workspace/GitHub/YOLO/coco_forTest/annotations/instances_val2017_64.json'
            pred_json = './YOLOv4_training_pred.json'
            img_path = '/workspace/GitHub/YOLO/coco_forTest/images/val2017_64/'
            img_size = 416
            batch_size = 4
            conf_thresh = 0.001
            nms_thresh = 0.6
            use_cuda = 1

    model.to(device)

    # Data Loader
    anno = COCO(opt.anno_json)
    val_set = COCOImage(opt.anno_json, opt.img_path, opt.img_size)
    val_loader = DataLoader(val_set,
                            opt.batch_size,
                            shuffle=True,
                            num_workers=0)

    # Accumulate results
    result_dict = dict([])
    print('STRAT detection')
    for imgs, img_ids, sizes in tqdm(val_loader):
        # model
        boxes = do_detect(model,
                          imgs,
                          conf_thresh=opt.conf_thresh,
                          nms_thresh=opt.nms_thresh,
                          use_cuda=opt.use_cuda,
                          verbose=False)
        # process

        for img_id, box, H, W in zip(img_ids.numpy(), boxes, sizes[0].numpy(),
                                     sizes[1].numpy()):
            result_dict[img_id] = (img_id, box, H, W)

    # Transform results to COCO format
    print('Convert results to COCO format')
    total = []
    for img_id in tqdm(result_dict.keys()):
        one_result = coco_format(result_dict[img_id])
        total.extend(one_result)

    with open(opt.pred_json, 'w') as f:
        json.dump(total, f)

    # COCO Evaluation
    from pycocotools.cocoeval import COCOeval
    pred = anno.loadRes(opt.pred_json)  # init predictions api
    eval = COCOeval(anno, pred, 'bbox')
    eval.evaluate()
    eval.accumulate()
    eval.summarize()

    return eval
Example #2
0
def demo(model, config, anchors):
    if torch.cuda.is_available():
        model.cuda()
    class_names = load_class_names(config.namesfile)
    fh = open(config.val_label)
    print("load val_label: {}".format(config.val_label))
    i = 0
    image_paths = get_first_10_imgs(config)
    print("Got images: ")
    print(image_paths)
    for i, image_path in enumerate(image_paths):
        img = cv2.imread(image_path)
        sized = cv2.resize(img, (config.width, config.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
        start = time.time()
        boxes = do_detect(model, sized, 0.4, 0.6, torch.cuda.is_available())
        finish = time.time()
        print('{}: Predicted in {:.04f} seconds.'.format(
            image_path, (finish - start)))

        plot_boxes_cv2(img,
                       boxes[0],
                       savename=os.path.join("demo_img",
                                             "predictions_{}.jpg".format(i)),
                       class_names=class_names)
def test_model_time(opts, model, frame_size, annotations):
    output_file = open('profile_latency_{}.txt'.format(frame_size), 'w')
    print("{:<20s},{:<20s},{:<20s}".format(
        "ModelSize", "Batch", "InferenceTime"), file=output_file)
    images = annotations["images"]
    img_idx = 0
    total_images = len(images)
    for batch in range(1, opts.max_batch_size+1, 1):
        print("Processing batch size", batch)
        for _ in range(opts.total_iter):
            jpg_files = read_image_in_jpg(
                opts, frame_size, img_idx, batch, total_images, images)

            # input = np.random.rand(batch, frame_size, frame_size, 3)
            torch.cuda.synchronize(opts.gpu_id)
            start_time = timeit.default_timer()
            start_perf = time.perf_counter()
            input = read_jpg_in_numpy(jpg_files, frame_size) # dtype is uint8
            assert input.shape[0] == batch and input.shape[1] == frame_size \
                and input.shape[2] == frame_size and input.shape[3] == 3
            with torch.no_grad():
                output = do_detect(model, input, 0.5, 0.4,
                                   use_cuda=(not opts.no_cuda), gpu_number=opts.gpu_id)

            torch.cuda.synchronize(opts.gpu_id)
            inference_time = (timeit.default_timer() - start_time) * 1000
            perf_time = (time.perf_counter() - start_perf) * 1000
            print("Processing batch size:", batch, inference_time, perf_time)
            print("{:<20d},{:<20d},{:<20.2f}".format(
                frame_size, batch, inference_time), file=output_file)
            # ms = torch.cuda.memory_summary(device=None, abbreviated=False)
            # stats = torch.cuda.memory_stats(device=None)
            # print(ms)
            # torch.cuda.empty_cache()
    output_file.close()
Example #4
0
def main():
    import sys
    import cv2

    namesfile = None
    if len(sys.argv) == 6:
        n_classes = int(sys.argv[1])
        weightfile = sys.argv[2]
        imgfile = sys.argv[3]
        height = int(sys.argv[4])
        width = int(sys.argv[5])
    elif len(sys.argv) == 7:
        n_classes = int(sys.argv[1])
        weightfile = sys.argv[2]
        imgfile = sys.argv[3]
        height = sys.argv[4]
        width = int(sys.argv[5])
        namesfile = int(sys.argv[6])
    else:
        print('Usage: ')
        print('  python models.py num_classes weightfile imgfile namefile')

    model = Yolov4(yolov4conv137weight=None, n_classes=n_classes, inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        elif n_classes == 1:
            namesfile = 'data/smoke.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
Example #5
0
    def __call__(self, img):
        width, height = 416, 416
        sized = cv2.resize(img, (width, height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        boxes = do_detect(self.model, sized, 0.4, 0.6, self.use_cuda)
        return boxes[0]
Example #6
0
def get_predictions_yolov4(imgfile):
    namesfile = None
    n_classes = 80
    weightfile = 'yolov4.pth'
    height = 608
    width = 608

    model = Yolov4(yolov4conv137weight=None,
                   n_classes=n_classes,
                   inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)
    W = img.shape[1]
    H = img.shape[0]
    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    preds = do_detect(model, sized, 0.4, 0.6, use_cuda)
    print(preds)
    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    boxes = []
    classes = []
    scores = []
    for r in preds[0]:

        box = r[0:4]
        x1 = int((box[0] - box[2] / 2.0) * W)
        y1 = int((box[1] - box[3] / 2.0) * H)
        x2 = int((box[0] + box[2] / 2.0) * W)
        y2 = int((box[1] + box[3] / 2.0) * H)
        score = r[5]
        c = class_names[r[6]]
        boxes.append((x1, y1, x2, y2))
        classes.append(c)
        scores.append(score)
    return boxes, classes, scores
Example #7
0
    def detect(self,
               cv_image,
               IS_BGR=True):  # cv_imaeg <- cv2.imread / numpy image(BGR)
        resized_image = cv2.resize(cv_image, (416, 416))
        if IS_BGR:
            resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)

        boxes = do_detect(self.model, resized_image, 0.4, 0.6, self.use_cuda)

        return boxes
Example #8
0
    def predict(self, image, session_id):
        ''' predicts objects on image and return an image with bounding box on it '''

        out_path = f'./out/{session_id}.jpg'
        t1 = time.time()
        image_resized_arr = self.preprocess(image)
        boxes = do_detect(self.model, image_resized_arr, 0.4, 0.6,
                          self.use_cuda)
        plot_boxes_cv2(image_resized_arr, boxes[0], out_path, self.class_names)
        t2 = time.time()

        print("----------")
        print(f"total latency: {str((t2-t1)*1000)} ms")
        print("----------")

        return out_path
    def object_detection(self, frame, ret):
        if ret == False:
            print(ret)
            return 'no_frame'
        else:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            sized = cv2.resize(frame, (512, 512))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            #### Start detection using do_detect() function
            #             start = time.time()
            ######### output must be boxes[0], which contains tbrl, confidence level, and class number
            boxes = do_detect(self.m, sized, 0.4, 0.6, True)
            #             print(type(boxes), len(boxes[0]), ': number of detected cars', boxes[0])
            #             finish = time.time()
            #             print('yolo elapsed in: %f sec' % (finish - start))
            return boxes[0]
Example #10
0
def detect(img):
    namesfile = 'data/coco.names'
    n_classes = 80
    weightfile = './yolov4.pth'
    imgfile = img
    height = 512
    width = 512

    model = Yolov4(yolov4conv137weight=None, n_classes=n_classes, inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)  # 输出框位置和名字

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
    return boxes[0]
Example #11
0
def detect_and_draw(model, image_path, conf_thresh=0.5, show=False, **kwargs):
    """
    """
    model.eval()
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    resized = cv2.resize(image, (Cfg.width, Cfg.height))

    detected_boxes = do_detect(model, resized, conf_thresh, 0.5, False)[0]
    detected_acne_num = len(detected_boxes)
    img_with_boxes, detected_boxes, detected_scores = plot_boxes_cv2(
        image, detected_boxes)
    detected_severity = to_severity(detected_acne_num)

    if show:
        title = kwargs.get("title", None)
        fig, (ax1, ax2) = plt.subplots(1,
                                       2,
                                       figsize=kwargs.get("figsize", (30, 18)))
        ax1.imshow(img_with_boxes)
        ax2.imshow(image)
        if title:
            ax1.set_title(title + f'\ndetected number {detected_acne_num}',
                          y=-0.1,
                          fontsize=20)
            ax1.axis("off")
            ax2.set_title('original image', y=-0.1, fontsize=20)
            ax2.axis("off")
        if kwargs.get("savefig", False):
            fmt = kwargs.get("fmt", "png")
            plt.savefig(f"./fig/{title or str(int(time.time()))}.{fmt}",
                        bbox_inches='tight',
                        transparent=True)
        plt.show()

    return detected_acne_num, detected_boxes, detected_scores, detected_severity
Example #12
0
    width, height = (darknet_model.width, darknet_model.height)
    darknet_model.load_weights(weightfile)
    if use_cuda:
        darknet_model.cuda()
    class_names = load_class_names(namesfile)

    t1 = time.time()
    total_time = round(t1 - t0, 2)
    print("1 - Initiated DepthModel. -- {} minutes {} seconds".format(
        total_time // 60, total_time % 60))

    print("====================================")
    print("====================================")
    print("====================================")
    # Inference
    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
    start = time.time()
    boxes = do_detect(darknet_model, sized, 0.4, 0.6, use_cuda)
    finish = time.time()
    print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))
    print(boxes)
    plot_boxes_cv2(img,
                   boxes[0],
                   savename='demo_yolo.jpg',
                   class_names=class_names)
    print("====================================")
    print("====================================")
    print("====================================")
Example #13
0
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect
    conf_thresh = 0.01
    nms_thresh = 0.01
    for i in range(2):  # This 'for' loop is for speed check
        # Because the first iteration is usually longer
        boxes = do_detect(model,
                          sized,
                          conf_thresh,
                          nms_thresh,
                          use_cuda,
                          device=device)

    img_box = []
    scores = []
    labels = []
    width = img.shape[1]
    height = img.shape[0]
    for i in range(len(boxes)):
        # (y1, x1, y2, x2). (top,left,right,bottom)
        box = boxes[0][i]
        x1 = box[0] * width
        y1 = box[1] * height
        x2 = box[2] * width
        y2 = box[3] * height
Example #14
0
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
Example #15
0
        print('  python models.py num_classes weightfile imgfile namefile')

    model = Yolov4(n_classes=n_classes)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cpu'))
    model.load_state_dict(pretrained_dict)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        else:
            print("please give namefile")

    use_cuda = 0
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (608, 608))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    boxes = do_detect(model, sized, 0.5, n_classes, 0.4, use_cuda)

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes, 'predictions.jpg', class_names)
def main():
    n_classes = 18
    weightfile = r'D:\DeepBlueProjects\chem-lab\pytorch-YOLOv4-master\checkpoints\Yolov4_epoch6.pth'
    imgfile = r'D:\Data\CMS01_single-end\val\JPEGImages\frontfront_0518.jpg'
    base_dir = r'D:\Data\chem-yolov4\eval-dataset\top'
    gt_path = os.path.join(base_dir, 'gt.json')
    name_id_path = os.path.join(base_dir, 'name_id.json')
    with open(gt_path, 'r') as f:
        gt_dict = json.load(f)
    with open(name_id_path, 'r') as f:
        name_id_dict = json.load(f)

    input_size = (960, 960)

    model = Yolov4(yolov4conv137weight=None,
                   n_classes=n_classes,
                   inference=True)
    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in pretrained_dict.items():
        name = k[7:]  # remove `module.`,表面从第7个key值字符取到最后一个字符,正好去掉了module.
        new_state_dict[name] = v  # 新字典的key值对应的value为一一对应的值。

    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    data_txt = os.path.join(base_dir, 'data.txt')
    save_dir = os.path.join(base_dir, 'JPEGImages_pred')
    result_dir = os.path.join(base_dir, 'result_txt')
    with open(data_txt, 'r') as f:
        imgfiles = f.readlines()

    box_list = []
    for imgfile in imgfiles:

        img = cv2.imread(imgfile.strip('\n'))
        img_h, img_w, _ = img.shape

        img_name = imgfile.split('\\')[-1].strip('\n')
        img_id = name_id_dict[img_name]
        result_txt = os.path.join(result_dir, img_name[:-4] + '.txt')
        result_f = open(result_txt, 'w')
        # Inference input size is 416*416 does not mean training size is the same
        # Training size could be 608*608 or even other sizes
        # Optional inference sizes:
        #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
        #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
        sized = cv2.resize(img, (input_size[1], input_size[0]))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        # for i in range(2):  # This 'for' loop is for speed check
        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.01, 0.3, use_cuda)

        for box in boxes[0]:
            x1 = int((box[0] - box[2] / 2.0) * img_w)
            y1 = int((box[1] - box[3] / 2.0) * img_h)
            x2 = int((box[0] + box[2] / 2.0) * img_w)
            y2 = int((box[1] + box[3] / 2.0) * img_h)
            w = x2 - x1
            h = y2 - y1

            if len(box) >= 7:
                cls_conf = box[5]
                cls_id = box[6]
                box_list.append({
                    "image_id": img_id,
                    "category_id": int(cls_id),
                    "bbox": [x1, y1, w, h],
                    "score": float(cls_conf)
                })
                string = ','.join([
                    str(cls_id),
                    str(x1),
                    str(y1),
                    str(x2),
                    str(y2),
                    str(cls_conf)
                ]) + '\n'
                result_f.write(string)

                if cls_conf > 0.3:
                    cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),
                                  (255, 0, 255), 1)
                    cv2.putText(img, str(cls_id), (int(x1 + 10), int(y1 + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255),
                                1)
                    cv2.putText(img, str(round(cls_conf, 3)),
                                (int(x1 + 30), int(y1 + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (20, 240, 0), 1)
            else:
                print('????')

        result_f.close()
        namesfile = 'data/chem.names'
        class_names = load_class_names(namesfile)
        save_name = os.path.join(save_dir, img_name)
        plot_boxes_cv2(img, boxes[0], save_name, class_names)
        # cv2.imshow('result', img)
        # cv2.waitKey(0)

    # cv2.destroyAllWindows()

    info, map_iou0_5 = get_coco_mAP(gt_dict, box_list)
    # print("---base_eval---epoch%d"%real_epoch)
    print(info)
        img = cv2.imread(imgfile)

        # Inference input size is 416*416 does not mean training size is the same
        # Training size could be 608*608 or even other sizes
        # Optional inference sizes:
        #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
        #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
        sized = cv2.resize(img, (width, height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        from tool.utils import load_class_names, plot_boxes_cv2
        from tool.torch_utils import do_detect

        for i in range(2):  # This 'for' loop is for speed check
            # Because the first iteration is usually longer
            boxes, period = do_detect(model, sized, 0.4, 0.6, use_cuda)

        # print(len(boxes))
        # print(boxes)
        # print(len(box))
        # print(box)
        box = boxes[0]
        pred_fname = 'predictions/mAp512_30/' + imgfile[imgfile.rfind('\\') +
                                                        1:]
        output_fname = 'predictions/mAp512_30/' + imgfile[
            imgfile.rfind('\\') + 1:imgfile.rfind('.png')] + '.txt'

        with open(output_fname, 'w') as outfile:
            if len(box) >= 1:
                class_names = load_class_names(namesfile)
                plot_boxes_cv2(img, box, pred_fname, class_names)
Example #18
0
    width = 608

    model = Yolov4(yolov4conv137weight=None,
                   n_classes=n_classes,
                   inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    # img = cv2.imread(imgfile)
    ##############################################################################################
    namesfile = 'data/coco.names'
    class_names = load_class_names(namesfile)
    trans = trans_thread()
    trans.start()
    while (1):
        if trans.inQue.empty() or trans.img.empty():
            continue
        trans.lock.acquire()
        mid = trans.inQue.get()
        img = trans.img.get()
        trans.lock.release()
        boxes = do_detect(model, [(mid[0], mid[1]), mid[2], mid[3]], img, 0.4,
                          0.6, use_cuda)
        frame = plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
        trans.fillData(frame)
Example #19
0
    args = get_args()

    model = Yolov4(yolov4conv137weight=None,
                   n_classes=args.num_classes,
                   inference=True)

    pretrained_dict = torch.load(args.weightfile,
                                 map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    ## Warming up the model
    do_detect(model, np.zeros((args.height, args.width, 3), np.uint8), .4, .6,
              use_cuda)

    if os.path.isdir(args.input):

        for parent, _, files_list in os.walk(args.input):
            parent_path = os.path.relpath(parent, args.input)
            if parent_path.startswith('.'):
                parent_path = parent_path[1:]

            for file in files_list:
                img_path = os.path.join(parent, file)
                rel_img_path = os.path.join(parent_path, file)

                img = cv2.imread(img_path)

                sized = cv2.resize(img, (args.width, args.height))
Example #20
0
    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.7, 0.3, use_cuda)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        elif n_classes == 18:
            namesfile = 'data/chem.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
Example #21
0
def test(model, annotations, dataset_dir, gt_annotations_path, visualize=True):
    if not annotations["images"]:
        print("Annotations do not have 'images' key")
        return
    images = annotations["images"]
    # images = images[:10]
    resFile = 'data/coco_val_outputs.json'

    if torch.cuda.is_available():
        use_cuda = 1
    else:
        use_cuda = 0

    # do one forward pass first to circumvent cold start
    throwaway_image = Image.open('data/dog.jpg').convert('RGB').resize(
        (model.width, model.height))
    do_detect(model, throwaway_image, 0.5, 0.4, use_cuda=1)
    boxes_json = []

    for i, image_annotation in enumerate(images):
        logging.info("currently on image: {}/{}".format(i + 1, len(images)))
        image_file_name = image_annotation["file_name"]

        image_id = image_annotation["id"]
        image_height = image_annotation["height"]
        image_width = image_annotation["width"]

        # open and resize each image first
        img = Image.open(os.path.join(dataset_dir, image_file_name))
        img = np.array(img, dtype=np.uint8)
        # sized = img
        sized = cv2.resize(img, (model.width, model.height))
        # sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        if use_cuda:
            model.cuda()

        start = time.time()
        boxes = do_detect(model, sized, 0.5, 0.4, use_cuda=1)
        boxes = boxes[0]
        finish = time.time()
        if type(boxes) == list:
            for box in boxes[:100]:
                box_json = {}
                category_id = box[-1] + 1
                score = box[-2]
                bbox_normalized = box[:4]
                box_json["category_id"] = int(category_id)
                box_json["image_id"] = int(image_id)

                x1 = float(box[0]) * image_width
                y1 = float(box[1]) * image_height
                x2 = float(box[2]) * image_width
                y2 = float(box[3]) * image_height
                bbox = [x1, y1, x2 - x1, y2 - y1]

                box_json["bbox_normalized"] = list(
                    map(lambda x: round(float(x), 2), bbox_normalized))
                box_json["bbox"] = bbox
                box_json["score"] = round(float(score), 2)
                box_json["timing"] = float(finish - start)
                boxes_json.append(box_json)
                # print("see box_json: ", box_json)
                with open(resFile, 'w') as outfile:
                    json.dump(boxes_json,
                              outfile,
                              default=myconverter,
                              indent=4)
        else:
            print(
                "warning: output from model after postprocessing is not a list, ignoring"
            )
            return

    with open(resFile, 'w') as outfile:
        json.dump(boxes_json, outfile, default=myconverter)

    return evaluate_on_coco(dataset_dir,
                            gt_annotations_path,
                            resFile,
                            visualise_images=visualize)
Example #22
0
def eval_mskim():
    import os
    import cv2
    import pickle
    import argparse
    import numpy as np

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    parser = argparse.ArgumentParser()
    parser.add_argument('--n_classes', default=1)
    parser.add_argument('--weightfile', default='checkpoints/Yolov4_epoch10.pth')
    parser.add_argument('--imgfile', default='data/mskim/test90_10percent.txt')
    parser.add_argument('--size', type=int, nargs='+', default=[608, 608])
    parser.add_argument('--namesfile', default='data/smoke.names')
    parser.add_argument('--nms_th', default=0.6)
    parser.add_argument('--conf_th', default=0.4)
    parser.add_argument('--save_path', default='predictions/')
    parser.add_argument('--save_image', default=False, action='store_true')
    args = parser.parse_args()

    target_h, target_w = args.size

    model = Yolov4(yolov4conv137weight=None, n_classes=args.n_classes, inference=True)

    pretrained_dict = torch.load(args.weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    with open(args.imgfile, 'r') as f:
        img_file_lst = [line.strip() for line in f.readlines()]
    common_path = os.path.commonpath(img_file_lst)

    prediction_results = {}
    class_names = load_class_names(args.namesfile)
    for img_file in img_file_lst:
        # origin image
        img = cv2.imread(img_file)
        
        origin_h, origin_w = img.shape[:2]

        sized = cv2.resize(img, (target_w, target_h))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        # argument of do_detect: (model, img, conf_thresh, nms_thresh, use_cuda=1)
        boxes = do_detect(model, sized, args.conf_th, args.nms_th, use_cuda)
        boxes_np = np.asarray(boxes) # shape: (1, 0) or (1, N, 7) [7: xmin, ymin, xmax, ymax, score, id]

        if boxes_np.shape[1] == 0:
            pred = np.zeros((1, 5))
        else:
            # box coordinates are normalized to 0 ~ 1
            pred = boxes_np[0, :, :-1]
            # resize to origin image scale
            pred[:, 0] = pred[:, 0] * origin_w
            pred[:, 1] = pred[:, 1] * origin_h
            pred[:, 2] = pred[:, 2] * origin_w
            pred[:, 3] = pred[:, 3] * origin_h
        prediction_results[os.path.relpath(img_file, common_path)] = pred

        # save prediction results
        if args.save_image:
            filename = os.path.basename(img_file)
            dirname = os.path.dirname(img_file)
            rel_dir = os.path.relpath(dirname, common_path)
            _save_path = os.path.join(args.save_path, rel_dir)
            os.makedirs(_save_path, exist_ok=True)
            plot_boxes_cv2(img, boxes[0], os.path.join(_save_path, filename), class_names)

    with open(os.path.join(args.save_path, 'prediction_results.pickle'), 'wb') as f:
        pickle.dump(prediction_results, f)
Example #23
0
    use_cuda = opt.use_cuda
    if use_cuda:
        torch.cuda.set_device(torch.device('cuda:{}'.format(opt.device)))
        m.cuda()

    # Data Loader
    anno = COCO(opt.anno_json)
    val_set = COCOImage(opt.anno_json, opt.img_path, opt.img_size)
    val_loader = DataLoader(val_set, opt.batch_size, shuffle=True, num_workers=0)

    # Accumulate results
    result_dict = dict([])
    print('STRAT detection')
    for imgs, img_ids, sizes in tqdm(val_loader):
        # model
        boxes = do_detect(m, imgs, conf_thresh=opt.conf_thresh, nms_thresh=opt.nms_thresh, use_cuda=use_cuda, verbose=False)
        # process
        
        for img_id, box, H, W in zip(img_ids.numpy(), boxes, sizes[0].numpy(), sizes[1].numpy()):
            result_dict[img_id] = (img_id, box, H, W)

    # Transform results to COCO format
    print('Convert results to COCO format')
    total = []
    for img_id in tqdm(result_dict.keys()):
        one_result = coco_format(result_dict[img_id])
        total.extend(one_result)

    with open(opt.pred_json, 'w')as f:
        json.dump(total, f)
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda, profile=True)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
Example #25
0
def evaluate(model, val_label, val_dataset_dir, use_cuda, net_width, net_height):
    os.makedirs("./tmp", exist_ok=True)
    resFile = "./tmp/res.json"

    f = open(val_label, 'r', encoding='utf-8')
    truth = {}
    for line in f.readlines():
        data = line.split(" ")  # 以空格分开不同目标
        truth[data[0]] = []  # data[0]是图片名称(xxx.jpg)
        for i in data[1:]:
            # 每一项是一个列表,[x1,y1,x2,y2,cls_id],列表中的元素全部为int类型
            truth[data[0]].append([int(float(j)) for j in i.split(',')])
    
    imgs = list(truth.keys())  # 列表
    # net_width = model.width
    # net_height = model.height
    # if use_cuda:
    #     model.cuda()
    # if torch.cuda.device_count() > 1:
    #     model = torch.nn.DataParallel(model)
    boxes_json = []
    for i, image_file_name in enumerate(imgs):
        image_id = get_image_id(image_file_name)
        img = cv2.imread(os.path.join(val_dataset_dir, image_file_name))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        image_height, image_width = img.shape[:-1]
        sized = cv2.resize(img, (net_width, net_height), cv2.INTER_LINEAR)
        start = time.time()
        boxes = do_detect(model, sized, 0.0, 0.6, use_cuda)
        finish = time.time()

        assert type(boxes[0]) == list
        for box in boxes[0]:
            box_json = {}
            # xmin,ymin,xmax,ymax -> xmin,ymin,w,h
            box[2] = box[2] - box[0]
            box[3] = box[3] - box[1]
            category_id = box[-1]
            score = box[-2]
            bbox_normalized = box[:4]
            box_json["category_id"] = int(category_id)
            box_json["image_id"] = int(image_id)
            bbox = []
            for i, bbox_coord in enumerate(bbox_normalized):
                modified_bbox_coord = float(bbox_coord)
                if i % 2:
                    modified_bbox_coord *= image_height
                else:
                    modified_bbox_coord *= image_width
                modified_bbox_coord = round(modified_bbox_coord, 2)
                bbox.append(modified_bbox_coord)
            box_json["bbox_normalized"] = list(map(lambda x: round(float(x), 2), bbox_normalized))
            box_json["bbox"] = bbox
            box_json["score"] = round(float(score), 2)
            box_json["timing"] = float(finish - start)
            boxes_json.append(box_json)
    if len(boxes_json) == 0:
        return None
    with open(resFile, 'w') as outfile:
        json.dump(boxes_json, outfile)

    return resFile   
Example #26
0
            # Inference input size is 416*416 does not mean training size is the same
            # Training size could be 608*608 or even other sizes
            # Optional inference sizes:
            #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
            #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
            sized = cv2.resize(img, (width, height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            to_plot = plot_boxes_cv2(img, gt_boxes, class_names=class_names)

            if eval_model:
                with torch.no_grad():
                    # for i in range(2):  # This 'for' loop is for speed check
                    #                     # Because the first iteration is usually longer
                    #     boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)
                    boxes = do_detect(model, sized, 0.4, 0.6, use_device)
                    pred = plot_boxes_cv2(img,
                                          boxes[0],
                                          class_names=class_names)
                    to_plot = np.concatenate([to_plot, pred], axis=1)

            # to_plot = (255 * np.clip(to_plot, 0, 1)).astype(np.uint8)

            ############################  detect: end   ##################################

            if output_video:
                # to_plots.append(to_plot)
                cv2.imwrite(os.path.join(video_dir, str(it) + ".png"), to_plot)
                for i, name in enumerate(class_names):
                    contains = False
                    for box in gt_boxes:
Example #27
0
Cfg.cfgfile = '/home/isv/Documents/tensorrt/yolov4/darknet-master/yolov4-tiny.cfg'
Cfg.weights_file = '/home/isv/Documents/tensorrt/yolov4/darknet-master/backup/yolov4-tiny_last.weights'
model = Darknet(Cfg.cfgfile)
model.print_network()
model.load_weights(Cfg.weights_file)
if torch.cuda.is_available():
    use_cuda = 1
else:
    use_cuda = 0

if use_cuda:
    model = model.cuda()
img = cv2.imread('/home/isv/qt_projects/build-final_thread-Desktop_Qt_5_13_2_GCC_64bit-Debug/plateSaved/187.jpg')
img = cv2.resize(img, (320, 320))
# for i in range(10):
boxes = do_detect(model, img, 0.1, 0.4, use_cuda)

numberOfDetection = len(boxes[0])
for i in range(numberOfDetection):
    each_box = boxes[0][i]
    x = int(320 * each_box[0])
    y = int(320 * each_box[1])
    w = int(320 * each_box[2])
    h = int(320 * each_box[3])
    img = cv2.rectangle(img, (x, y), (w, h), (100, 220, 0), 2)
    cv2.putText(img, str(each_box[6]), (x, h), cv2.FONT_ITALIC, 1, (100, 200, 100), 2)
    print(each_box, ' ', x)
import matplotlib.pyplot as plt

plt.imshow(img, 'gray')
plt.show()
def test(model, annotations, cfg):
    if not annotations["images"]:
        print("Annotations do not have 'images' key")
        return
    images = annotations["images"]
    # images = images[:10]
    resFile = 'data/coco_val_outputs.json'

    if torch.cuda.is_available():
        use_cuda = 1
    else:
        use_cuda = 0

    # do one forward pass first to circumvent cold start
    throwaway_image = Image.open('data/dog.jpg').convert('RGB').resize(
        (model.width, model.height))
    do_detect(model, throwaway_image, 0.5, 80, 0.4, use_cuda)
    boxes_json = []

    for i, image_annotation in enumerate(images):
        logging.info("currently on image: {}/{}".format(i + 1, len(images)))
        image_file_name = image_annotation["file_name"]
        image_id = image_annotation["id"]
        image_height = image_annotation["height"]
        image_width = image_annotation["width"]

        # open and resize each image first
        img = Image.open(os.path.join(cfg.dataset_dir,
                                      image_file_name)).convert('RGB')
        sized = img.resize((model.width, model.height))

        if use_cuda:
            model.cuda()

        start = time.time()
        boxes = do_detect(model, sized, 0.0, 80, 0.4, use_cuda)
        finish = time.time()
        if type(boxes) == list:
            for box in boxes:
                box_json = {}
                category_id = box[-1]
                score = box[-2]
                bbox_normalized = box[:4]
                box_json["category_id"] = int(category_id)
                box_json["image_id"] = int(image_id)
                bbox = []
                for i, bbox_coord in enumerate(bbox_normalized):
                    modified_bbox_coord = float(bbox_coord)
                    if i % 2:
                        modified_bbox_coord *= image_height
                    else:
                        modified_bbox_coord *= image_width
                    modified_bbox_coord = round(modified_bbox_coord, 2)
                    bbox.append(modified_bbox_coord)
                box_json["bbox_normalized"] = list(
                    map(lambda x: round(float(x), 2), bbox_normalized))
                box_json["bbox"] = bbox
                box_json["score"] = round(float(score), 2)
                box_json["timing"] = float(finish - start)
                boxes_json.append(box_json)
                # print("see box_json: ", box_json)
                with open(resFile, 'w') as outfile:
                    json.dump(boxes_json, outfile, default=myconverter)
        else:
            print(
                "warning: output from model after postprocessing is not a list, ignoring"
            )
            return

        # namesfile = 'data/coco.names'
        # class_names = load_class_names(namesfile)
        # plot_boxes(img, boxes, 'data/outcome/predictions_{}.jpg'.format(image_id), class_names)

    with open(resFile, 'w') as outfile:
        json.dump(boxes_json, outfile, default=myconverter)

    evaluate_on_coco(cfg, resFile)
Example #29
0
	def infer(self,data):
		sized = cv2.resize(data, (self.width, self.height))
		sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
		boxes = do_detect(self.model, sized, 0.4, 0.6, self.use_cuda,self.is_half)
		plot_boxes_cv2(data, boxes[0], savename='./data/predictions.jpg', class_names=self.class_names)
Example #30
0
    if use_cuda:
        model.cuda()

    # img = cv2.imread(imgfile)
    ##############################################################################################

    # _thread.start_new_thread(key_check,(c,c1,s))
    trans = trans_thread()
    trans.start()

    while (FLAG):
        if trans.inQue.empty():
            continue
        trans.lock.acquire()
        img = trans.inQue.get()
        trans.lock.release()
        # print(img.shape)
        # cv2.imshow("11", img)
        sized = cv2.resize(img, (width, height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
        y1, x10, x18 = do_detect(model, sized, 0.4, 0.6, use_cuda)

        y11, y12, x10, x18 = y1[0].detach().cpu().numpy(), y1[1].detach().cpu(
        ).numpy(), x10.detach().cpu().numpy(), x18.detach().cpu().numpy()
        if not trans.outQue.full():
            trans.fillData((y11, y12, x10, x18))
    # print(y1,y2,x18)
    # c.close()
    # c1.close()
    # s.close()