Exemplo n.º 1
0
def demo():
    import os
    from vizer.draw import draw_boxes

    yolo = YOLOv3("cfg/yolo_v3.cfg", "weight/yolov3.weights", "cfg/coco.names")
    print("yolo.size =", yolo.size)
    root = "./demo"
    resdir = os.path.join(root, "results")
    os.makedirs(resdir, exist_ok=True)
    files = [
        os.path.join(root, file) for file in os.listdir(root)
        if file.endswith('.jpg')
    ]
    files.sort()
    for filename in files:
        img = cv2.imread(filename)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bbox, cls_conf, cls_ids = yolo(img)

        if bbox is not None:
            img = draw_boxes(img,
                             bbox,
                             cls_ids,
                             cls_conf,
                             class_name_map=yolo.class_names)
        # save results
        cv2.imwrite(os.path.join(resdir, os.path.basename(filename)),
                    img[:, :, (2, 1, 0)])
Exemplo n.º 2
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    # dummy_input = torch.randn(1, 3, 300, 300, device='cuda')
    # input_names = ["input"]
    # output_names = ["output"]
    # torch.onnx.export(model, dummy_input, "vgg_ssd300_voc.onnx", verbose=True, input_names=input_names, output_names=output_names)

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
Exemplo n.º 3
0
    def predict(self, img):
        height, width = img.shape[:2]
        images = self.transforms(img)[0].unsqueeze(0)
        with torch.no_grad():
            result = self.model(images.to(self.device))[0]
        result = result.resize((width, height)).to(self.cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']
        indices = np.zeros(labels.shape)
        for label in self.target_labels:
            new_indices = labels == label
            indices = indices + new_indices
        indices = indices > 0
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        indices = scores > self.score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]

        font = ImageFont.truetype('arial.ttf', 10)
        drawn_image = draw_boxes(img,
                                 boxes,
                                 labels,
                                 scores,
                                 self.class_names,
                                 font=font).astype(np.uint8)
        drawn_image = self._draw_info(drawn_image, boxes)
        return len(boxes), drawn_image
Exemplo n.º 4
0
def run_demo(cfg,
             ckpt,
             score_threshold,
             images_dir: pathlib.Path,
             output_dir: pathlib.Path,
             dataset_type,
             num_images=None):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    elif dataset_type == "mnist":
        class_names = MNISTDetection.class_names
    elif dataset_type == "tdt4265":
        class_names = TDT4265Dataset.class_names
    elif dataset_type == "waymo":
        class_names = WaymoDataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')

    model = SSDDetector(cfg)
    model = torch_utils.to_cuda(model)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = list(images_dir.glob("*.png")) + list(
        images_dir.glob("*.jpg"))

    output_dir.mkdir(exist_ok=True, parents=True)

    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    drawn_images = []
    for i, image_path in enumerate(
            tqdm.tqdm(image_paths[:num_images], desc="Predicting on images")):
        image_name = image_path.stem

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)

        result = model(torch_utils.to_cuda(images))[0]

        result = result.resize((width, height)).cpu().numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']
        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        drawn_images.append(drawn_image)
        im = Image.fromarray(drawn_image)
        output_path = output_dir.joinpath(f"{image_name}.png")
        im.save(output_path)
    return drawn_images
Exemplo n.º 5
0
def run_demo(cfg, ckpt, score_threshold, images_dir: pathlib.Path,
             output_dir: pathlib.Path, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    elif dataset_type == "mnist":
        class_names = MNISTDetection.class_names
    else:
        raise NotImplementedError('Not implemented now.')

    model = SSDDetector(cfg)
    model = torch_utils.to_cuda(model)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = list(images_dir.glob("*.png")) + list(
        images_dir.glob("*.jpg"))

    output_dir.mkdir(exist_ok=True, parents=True)

    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    drawn_images = []
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = image_path.name

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(torch_utils.to_cuda(images))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).cpu().numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']
        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = "|".join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        image_name = image_path.name

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        drawn_images.append(drawn_image)
    return drawn_images
Exemplo n.º 6
0
 def predict(self, batch):
     probs = self.forward(batch)
     ret = []
     for id in range(len(probs)):
         boxes, labels, scores = probs[id]['boxes'], probs[id]['labels'], probs[id]['scores']
         indices = scores > self.threshold
         boxes = boxes[indices]
         labels = labels[indices]
         scores = scores[indices]
         draw = draw_boxes(batch[id], boxes, labels, scores, self.class_names).astype(np.uint8)
         ret.append(draw)
     return ret
Exemplo n.º 7
0
def visualize_training_set(cfg, image_id="Czech_000006"):
    data_loader = make_data_loader(cfg, is_train=True)
    if isinstance(data_loader, list):
        data_loader = data_loader[0]
    dataset = data_loader.dataset

    image = dataset._read_image(image_id)
    boxes, labels = dataset._get_annotation(image_id)
    image = draw_boxes(image,
                       boxes,
                       labels,
                       class_name_map=dataset.class_names)

    plt.imshow(image)
    plt.show()
Exemplo n.º 8
0
def detect_objects_and_plot(img: 'PIL Image', model, threshold=0.3):
    """ Takes in an PIL image and model, run it on the image and returns an annotated PIL image. Also
    takes in conf threshold above which to consider valid prediction.
    """
    # some transorms
    resize = transforms.Resize((300, 300))
    to_tensor = transforms.ToTensor()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    image = normalize(to_tensor(resize(img)))

    model.eval()
    with torch.no_grad():
        locs, confs = model(image.unsqueeze(0))

    for i in range(locs.size(0)):
        # Decode targets here if possible
        locs[i] = decode(locs[i], model.criterion.priors,
                         model.cfg.priors.variance)

    confs = F.softmax(confs, dim=2)
    scores, idxs = confs.max(dim=2)

    bboxes, scores, labels = filter_boxes_batched(locs,
                                                  scores,
                                                  idxs,
                                                  min_conf=threshold,
                                                  nms_thresh=0.1)

    # since there is only a single image
    bboxes = bboxes[0]
    scores = scores[0]
    labels = labels[0]

    # Transform to original image dimensions
    dims = torch.FloatTensor([img.width, img.height, img.width, img.height])
    bboxes = bboxes * dims

    annotated_image = draw_boxes(img,
                                 bboxes,
                                 labels,
                                 scores,
                                 class_name_map=coco_class_name)
    annotated_image = Image.fromarray(annotated_image)
    return annotated_image
Exemplo n.º 9
0
    def Detect_single_img(self, image, score_threshold=0.7, device='cuda'):
        """
        检测单张照片
        eg:
            image, boxes, labels, scores= net.Detect_single_img(img)
            plt.imshow(image)
            plt.show()

        :param image:           图片,PIL.Image.Image
        :param score_threshold: 阈值
        :param device:          检测时所用设备,默认'cuda'
        :return:                添加回归框的图片(np.array),回归框,标签,分数
        """
        self.eval()
        assert isinstance(image, Image.Image)
        w, h = image.width, image.height
        images_tensor = transfrom(self.cfg, is_train=False)(
            np.array(image))[0].unsqueeze(0)

        self.to(device)
        images_tensor = images_tensor.to(device)
        time1 = time.time()
        detections = self.forward_with_postprocess(images_tensor)[0]
        boxes, labels, scores = detections
        boxes, labels, scores = boxes.to('cpu').numpy(), labels.to(
            'cpu').numpy(), scores.to('cpu').numpy()
        boxes[:, 0::2] *= (w / self.cfg.MODEL.INPUT.IMAGE_SIZE)
        boxes[:, 1::2] *= (h / self.cfg.MODEL.INPUT.IMAGE_SIZE)

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        print("Detect {} object, inference cost {:.2f} ms".format(
            len(scores), (time.time() - time1) * 1000))
        # 图像数据加框
        drawn_image = draw_boxes(
            image=image,
            boxes=boxes,
            labels=labels,
            scores=scores,
            class_name_map=self.cfg.DATA.DATASET.CLASS_NAME).astype(np.uint8)
        return drawn_image, boxes, labels, scores
Exemplo n.º 10
0
def demo():
    import os
    from vizer.draw import draw_boxes

    yolo = ScaledYOLOv4('detector/scaled_yolov4/parent/models/yolov4-csp.cfg',
                        'detector/scaled_yolov4/weights/yolov4-csp.weights',
                        'detector/scaled_yolov4/parent/data/coco.names')
    print("yolo.size =", yolo.size)
    root = "tests/scaled_yolov4"
    resdir = os.path.join("logs/scaled_yolov4_demo/results")
    os.makedirs(resdir, exist_ok=True)
    files = [os.path.join(root, file) for file in os.listdir(root) if file.endswith('.jpg')]
    files.sort()
    for filename in files:
        img = cv2.imread(filename)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bbox, cls_conf, cls_ids = yolo(img)

        if bbox is not None:
            img = draw_boxes(img, bbox, cls_ids, cls_conf, class_name_map=yolo.class_names)
        # save results
        cv2.imwrite(os.path.join(resdir, os.path.basename(filename)), img[:, :, (2, 1, 0)])
priors = PriorBox(cfg)()
if isinstance(data_loader, list):
    data_loader = data_loader[0]
for img, batch, *_ in data_loader:
    boxes = batch["boxes"]
    # SSD Target transform transfers target boxes into prior locations
    # Have to revert the transformation
    boxes = box_utils.convert_locations_to_boxes(boxes, priors,
                                                 cfg.MODEL.CENTER_VARIANCE,
                                                 cfg.MODEL.SIZE_VARIANCE)
    boxes = box_utils.center_form_to_corner_form(boxes)

    # Remove all priors that are background
    boxes = boxes[0]
    labels = batch["labels"][0].squeeze().cpu().numpy()
    boxes = boxes[labels != 0]
    labels = labels[labels != 0]
    # Resize to image widht and height
    boxes[:, [0, 2]] *= img.shape[3]
    boxes[:, [1, 3]] *= img.shape[2]
    img = img.numpy()
    # NCHW to HWC (only select first element of batch)
    img = np.moveaxis(img, 1, -1)[0]
    # Remove normalization
    img = img * std + mean

    img = img.astype(np.uint8)
    img = draw_boxes(img, boxes, labels)
    plt.imshow(img)
    plt.show()
Exemplo n.º 12
0
def show_result_viz(img,
                    result,
                    class_names,
                    score_thr=0.3,
                    wait_time=0,
                    show=True,
                    out_file=None):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        wait_time (int): Value of waitKey param.
        show (bool, optional): Whether to show the image with opencv or not.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.

    Returns:
        np.ndarray or None: If neither `show` nor `out_file` is specified, the
            visualized image is returned, otherwise None is returned.
    """
    assert isinstance(class_names, (tuple, list))
    img = np.array(Image.open(img).convert("RGB"))
    #img = mmcv.imread(img)
    #img = img.copy()
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)

    # uses vizer instead
    boxes = bboxes[:, 0:4]
    scores = bboxes[:, 4]
    indices = scores > score_thr
    boxes = boxes[indices]
    labels = labels[indices]
    scores = scores[indices]
    print(labels)

    drawn_image = draw_boxes(img, boxes, labels, scores,
                             class_names).astype(np.uint8)
    Image.fromarray(drawn_image).save(out_file)

    # mmcv.imshow_det_bboxes(
    #     img,
    #     bboxes,
    #     labels,
    #     class_names=class_names,
    #     score_thr=score_thr,
    #     show=show,
    #     wait_time=wait_time,
    #     out_file=out_file)
    if not (show or out_file):
        return img
Exemplo n.º 13
0
def run_demo(cfg, ckpt, score_threshold, images_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == "pick":
        class_names = PICKDataset.class_names
    elif dataset_type == "cotb":
        class_names = COTBDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    train_epoch = weight_file.split('/')[2]
    train_epoch = train_epoch.split('.')[0].split('_')[1]
    save_path = os.path.join('demo', dataset_type, cfg.MODEL.BACKBONE.NAME,
                             train_epoch)

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg')) + glob.glob(
        os.path.join(images_dir, '*.jpeg'))
    mkdir(save_path)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(save_path, image_name))
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser(description="DPNet Demo")
    parser.add_argument(
        "--config-file",
        default=
        "configs/e2e_faster_rcnn_R_101_FPN_1x_rpc_syn_render_density_map.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--images_dir",
        required=True,
        type=str,
        help="path to images file",
    )
    parser.add_argument(
        "--save_dir",
        default='rpc_results',
        type=str,
        help="path to images file",
    )
    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.7,
        help="Minimum score for the prediction to be shown",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        default=800,
        help="Smallest size of the image to feed to the model. "
        "Model was trained with 800, which gives best results",
    )
    parser.add_argument(
        "--show-mask-heatmaps",
        dest="show_mask_heatmaps",
        help="Show a heatmap probability for the top masks-per-dim masks",
        action="store_true",
    )
    parser.add_argument(
        "--masks-per-dim",
        type=int,
        default=2,
        help="Number of heatmaps per dimension to show",
    )
    parser.add_argument(
        "opts",
        help="Modify model config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    # load config from file and command-line arguments
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_threshold=args.confidence_threshold,
        show_mask_heatmaps=args.show_mask_heatmaps,
        masks_per_dim=args.masks_per_dim,
        min_image_size=args.min_image_size,
    )
    if os.path.exists(args.save_dir):
        shutil.rmtree(args.save_dir)
    os.mkdir(args.save_dir)

    with open('/data7/lufficc/rpc/instances_test2019.json') as fid:
        data = json.load(fid)

    images = {}
    for x in data['images']:
        images[x['id']] = x

    annotations = defaultdict(list)
    for x in data['annotations']:
        annotations[images[x['image_id']]['file_name']].append(x)
    annotations = dict(annotations)

    counter = {
        'easy': 0,
        'medium': 0,
        'hard': 0,
    }

    data_images = data['images'].copy()
    random.shuffle(data_images)
    FONT = ImageFont.truetype('/data7/lufficc/projects/DPNet/demo/arial.ttf',
                              8)
    for image_ann in data_images:
        if counter[image_ann['level']] >= 20:
            continue
        image_path = os.path.join(args.images_dir, image_ann['file_name'])
        img = cv2.imread(image_path)
        annotation = annotations[image_ann['file_name']]
        prediction = coco_demo.run_on_opencv_image(img)

        new_size = (400, 400)

        img = cv2.resize(img, new_size)
        prediction = prediction.resize(new_size)

        boxes = prediction.bbox.numpy()
        labels = prediction.get_field('labels').numpy()
        scores = prediction.get_field('scores').numpy()

        img = draw_boxes(img,
                         boxes,
                         labels,
                         scores,
                         COCODemo.CATEGORIES,
                         width=2,
                         font=FONT,
                         alpha=0.4)
        gt_labels = sorted([ann['category_id'] for ann in annotation])
        if gt_labels == sorted(labels.tolist()):
            print('Get {}.'.format(image_ann['level']))
            cv2.imwrite(
                os.path.join(
                    args.save_dir,
                    image_ann['level'] + '_' + os.path.basename(image_path)),
                img)
            counter[image_ann['level']] += 1
Exemplo n.º 15
0
def pavement_distress(video_name, cap, fps_threshold, threshold, output_video,
                      output_labels, transforms, model, device, cpu_device,
                      class_name):
    height, width = cap.h, cap.w
    #out = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*'MJPG'), 20.0, (width, height)) #DIVX for .MP4
    #f_output = open(output_labels, 'a+')

    all_labels = []
    fps = 1
    counting_fps = fps_threshold

    latest_iteration = st.empty()
    bar = st.progress(0)
    count = 0  #counting progress bar
    num_frames = cap.reader.nframes
    progress = 100 / num_frames

    threshold_display = st.empty()
    threshold_display.text(f'Confidence Threshold: {threshold}')
    aligator = st.empty()
    memanjang = st.empty()
    melintang = st.empty()
    lubang = st.empty()
    total_labels = st.empty()

    aligator_count = 0
    memanjang_count = 0
    melintang_count = 0
    lubang_count = 0
    total_labels_count = 0

    for frame in cap.iter_frames():
        count = count + progress
        if count < 100:
            latest_iteration.text(f'Process: {math.ceil(count)}%')
            bar.progress(math.ceil(count))
        else:
            latest_iteration.text(f'Process: {math.floor(count)}%')
            bar.progress(math.floor(count))

        frames = transforms(frame)[0].unsqueeze(0)

        with torch.no_grad():
            result = model(frames.to(device))[0]
            result = result.resize((width, height)).to(cpu_device).numpy()
            boxes, labels, scores = result['boxes'], result['labels'], result[
                'scores']

        indices = scores > threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]

        if fps == fps_threshold:
            drawn_frame = draw_boxes(frame, boxes, labels, scores,
                                     class_name).astype(np.uint8)
            #cv2.imwrite(f'./results/{os.path.splitext(os.path.basename(video_name))[0]}/{counting_fps}_frames/frame_{fps}.jpg', drawn_frame)
            cv2.imwrite(
                f'./results/{os.path.splitext(os.path.basename(video_name))[0]}/confidence_threshold/{threshold}_threshold/frame_{fps}.jpg',
                drawn_frame)
            print(
                f'./results/{os.path.splitext(os.path.basename(video_name))[0]}/{threshold}_threshold/frame_{fps}.jpg'
            )

            fps_threshold += counting_fps
            fps += 1

        else:
            fps += 1
Exemplo n.º 16
0
coco_class_name = [
    '__bg', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
    'hair drier', 'toothbrush'
]
if __name__ == '__main__':
    name = '000000308476'
    with open('data/%s.pickle' % name, 'rb') as f:
        data = pickle.load(f)
    img = Image.open('data/%s.jpg' % name)
    img = draw_masks(img, data['masks'], data['labels'])
    img = draw_boxes(img,
                     boxes=data['boxes'],
                     labels=data['labels'],
                     scores=data['scores'],
                     class_name_map=coco_class_name,
                     score_format=':{:.4f}')
    plt.imshow(img)
    plt.show()
Exemplo n.º 17
0
import numpy as np
import matplotlib.pyplot as plt
from train import get_parser
from ssd.config.defaults import cfg
from ssd.data.build import make_data_loader
from vizer.draw import draw_boxes
args = get_parser().parse_args()

cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()

data_loader = make_data_loader(cfg, is_train=False)
if isinstance(data_loader, list):
    data_loader = data_loader[0]
dataset = data_loader.dataset
indices = list(range(len(dataset)))
np.random.shuffle(indices)
for idx in indices:
    image = dataset._read_image(idx)
    boxes, labels = dataset.get_annotation(idx)
    image = draw_boxes(image,
                       boxes,
                       labels,
                       class_name_map=dataset.class_names)
    plt.imshow(image)
    plt.show()
Exemplo n.º 18
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, onnx_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    device = "cpu" if not torch.cuda.is_available() else device

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    # get model ready for onnx export
    mkdir(onnx_dir)
    model_onnx = build_detection_model(cfg)
    model_onnx = model_onnx.to(device)
    checkpointer_onnx = CheckPointer(model_onnx, save_dir=cfg.OUTPUT_DIR)
    checkpointer_onnx.load(ckpt, use_latest=ckpt is None)
    # replace the SSD box head postprocessor with the onnx version for exporting
    model_onnx.box_head.post_processor = PostProcessorOnnx(cfg)
    model_onnx.eval()

    # export with ONNX
    # onnx modle takes the name of the pth ckpt file
    model_onnx_name = os.path.basename(ckpt).split('.')[0] + ".onnx"
    model_onnx_path = os.path.join(onnx_dir, model_onnx_name)
    if not os.path.exists(model_onnx_path):
        print(f'Model exported as onnx to {model_onnx_path}')
        dummy_input = torch.zeros(
            [1, 3, cfg.INPUT.IMAGE_SIZE, cfg.INPUT.IMAGE_SIZE]).to(device)
        torch.onnx.export(model_onnx,
                          dummy_input,
                          model_onnx_path,
                          export_params=True,
                          do_constant_folding=True,
                          opset_version=11,
                          input_names=['input'],
                          output_names=['boxes', 'scores', 'labels'],
                          dynamic_axes={
                              'input': {0: 'batch_size', 2: "height", 3: "width"}},
                          verbose=False)

    # load exported onnx model for inference test
    print(
        f'Loading exported onnx model from {model_onnx_path} for inference comparison test')
    onnx_runtime_sess = onnxruntime.InferenceSession(model_onnx_path)

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)

        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start
        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result['scores']

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Pytorch: ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths), image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "pytorch_" + image_name))

        """
        Compute ONNX Runtime output prediction
        """

        start = time.time()
        ort_inputs = {onnx_runtime_sess.get_inputs()[0].name: np.array(images)}
        boxes, scores, labels = onnx_runtime_sess.run(None, ort_inputs)
        inference_time = time.time() - start

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        # resize bounding boxes to size of the original image
        boxes[:, 0::2] *= (width)
        boxes[:, 1::2] *= (height)
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Onnx:    ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths),
                                                       image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "onnx_" + image_name))
Exemplo n.º 19
0
def run_demo(cfg,
             ckpt,
             score_threshold,
             images_dir,
             output_dir,
             dataset_type,
             model_path=None):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        class_names = TxtDataset(dataset_name=dataset_type).class_names
    # else:
    #     raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    ##
    # model.backbone.bn_fuse()#需要修改demo.py   要bn_fuse   因为fpga端没有bn
    # model.to(device)
    # ##
    if model_path is None:
        checkpointer.load(ckpt, use_latest=ckpt is None)
        weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
        print('Loaded weights from {}'.format(weight_file))
    else:
        model.load_state_dict(torch.load(model_path))

    if cfg.TEST.BN_FUSE is True:
        print('BN_FUSE.')
        model.backbone.bn_fuse()
        model.to(device)
    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))  #.png
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))