示例#1
0
    def decode(self, data):
        """Fill buffer with a raw binary data

        Each byte already must represent one pixel

        Parameters::

            data : image binary data
        """

        buffer = api.begin(self._obj, self._width, self._height)

        converters.raw(buffer, data)

        api.end(self._obj)

        for i in range(api.count(self._obj)):

            # Extract first code
            api.extract(self._obj, i, self._code)
            try:
                api.decode(self._code, self._data)
            except api.exceptions.DecodeException:
                continue

            yield Code(
                tuple([(corner.x, corner.y) for corner in self._code.corners]),
                self._code.size,
                self._data.version,
                self._data.ecc_level,
                self._data.data_type,
                ctypes.string_at(self._data.payload, self._data.payload_len),
            )
示例#2
0
    def decode(self, data):
        """Fill buffer with a raw binary data

        Each byte already must represent one pixel

        Parameters::

            data : image binary data
        """

        buffer = api.begin(self._obj, self._width, self._height)

        converters.raw(buffer, data)

        api.end(self._obj)

        for i in range(api.count(self._obj)):

            # Extract first code
            api.extract(self._obj, i, self._code)
            try:
                api.decode(self._code, self._data)
            except api.exceptions.DecodeException:
                continue

            yield Code(
                tuple([(corner.x, corner.y) for corner in self._code.corners]),
                self._code.size,
                self._data.version,
                self._data.ecc_level,
                self._data.data_type,
                ctypes.string_at(self._data.payload, self._data.payload_len),
            )
示例#3
0
def decode(image):
    """Recognize image and return generator with all the available QR codes

    Currently supports only PIL Image object as an parameter
    """

    # TODO: `image` type check

    # Convert to grayscale mode
    if image.mode not in ('1', 'L'):
        image = image.convert('L')

    width, height = image.size
    pixels = image.load()

    obj = api.new()
    api.resize(obj, width, height)
    buffer = api.begin(obj, width, height)

    # Fill buffer with a image pixels. One cell, one pixel.
    for idx, pixel in enumerate(converters.pil(image)):
        buffer[idx] = pixel

    # Finish codes identification
    api.end(obj)

    code = api.structures.Code()
    data = api.structures.Data()

    for i in range(api.count(obj)):

        # Extract first code
        api.extract(obj, i, code)
        api.decode(code, data)

        yield Code(
            tuple([(corner.x, corner.y) for corner in code.corners]),
            code.size,
            data.version,
            data.ecc_level,
            data.data_type,
            ctypes.string_at(data.payload, data.payload_len),
        )

    api.destroy(obj)
示例#4
0
def decode(image):
    """Recognize image and return generator with all the available QR codes

    Currently supports only PIL Image object as an parameter
    """

    # TODO: `image` type check

    # Convert to grayscale mode
    if image.mode not in ('1', 'L'):
        image = image.convert('L')

    width, height = image.size
    pixels = image.load()

    obj = api.new()
    api.resize(obj, width, height)
    buffer = api.begin(obj, width, height)

    # Fill buffer with a image pixels. One cell, one pixel.
    for idx, pixel in enumerate(converters.pil(image)):
        buffer[idx] = pixel

    # Finish codes identification
    api.end(obj)

    code = api.structures.Code()
    data = api.structures.Data()

    for i in range(api.count(obj)):

        # Extract first code
        api.extract(obj, i, code)
        api.decode(code, data)

        yield Code(
            tuple([(corner.x, corner.y) for corner in code.corners]),
            code.size,
            data.version,
            data.ecc_level,
            data.data_type,
            ctypes.string_at(data.payload, data.payload_len),
        )

    api.destroy(obj)
示例#5
0
    cfg = load_yaml('config.yaml')
    root = cfg['paths']['virat_dir']
    D = CamNetDataset(root,
                      augment=get_val_transforms(cfg['train']['transforms']),
                      mode='train')
    print(len(D))
    for d in range(0, len(D)):
        # print(d)
        img, hms, labels = D[d]

        show = np.zeros([hms[0].shape[1], hms[0].shape[2], 3])
        show[:, :, 0] = hms[0][0]
        show = np.array(to_pil_image(img))
        hm = torch.Tensor(hms[0][0]).unsqueeze(0).unsqueeze(0).float().cuda()
        of = torch.Tensor(hms[0][1:3]).unsqueeze(0).float().cuda()
        wh = torch.Tensor(hms[0][3:5]).unsqueeze(0).float().cuda()
        out = [[hm, of, wh]]
        boxes, scores = decode(out, [4], 0.15, K=100)
        plt.imshow(show, cmap='hot', interpolation='nearest')
        for i, l in enumerate(boxes[0]):
            show = cv2.rectangle(show, (int(l[0]), int(l[1])),
                                 (int(l[2]), int(l[3])), (255, 0, 0), 2)
        for i, l in enumerate(labels):
            show = cv2.rectangle(show, (int(l[0]), int(l[1])),
                                 (int(l[2]), int(l[3])), (0, 255, 0), 1)
        if len(boxes[0]) == 0:
            print(d, labels)
            plt.imshow(show)
            plt.show()
示例#6
0
        h_frame = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        w_frame = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        print(h_frame, w_frame)
        i = 0
        f_res = fname[:-4] + '_res.avi'
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        vid_out = cv2.VideoWriter(f_res, fourcc, fps, (width, height))
        while (True):
            if i % 100 == 0:
                print("processing {}/{}".format(i, n_frames))
            # Capture frame-by-frame
            ret, frame = cap.read()
            if not ret:
                break
            frame, _ = padded_resize(frame, size=(320, 320))
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            x = to_tensor(frame).unsqueeze(0).float()
            x = normalize(x, mean=[0] * 3, std=[1] * 3)
            x = x.cuda()
            with torch.no_grad():
                out = net(x.cuda())
            boxes, scores = decode(out, (4, ), 0.4, K=50, use_nms=True)
            for j, (l, s) in enumerate(zip(boxes[0], scores[0])):
                frame = cv2.rectangle(frame, (int(l[0]), int(l[1])),
                                      (int(l[2]), int(l[3])), (0, 255, 0), 1)

            vid_out.write(frame)
            i = i + 1
        cap.release()
        vid_out.release()
        cv2.destroyAllWindows()
示例#7
0
def test(net, dataset, batch_size=32):
    net.eval()

    strides = dataset.strides

    loader = DataLoader(
        dataset,
        batch_size=batch_size,
        pin_memory=True,
        num_workers=4,
        shuffle=False,
        collate_fn=test_collate_fn
    )

    threshold = 0.05
    iou_thresh = 0.4
    thresh_num = 1000
    count_obj = 0
    
    if False:
        for i, data in tqdm(enumerate(loader), desc="Test: ", ascii=True, total=len(loader)):
            img, labels = data
            img = img.cuda()
            with torch.no_grad():
                out = net(img)
            boxes, scores = decode(out, strides, 0.35, K=100)            
            img_show = np.array(to_pil_image(img.squeeze(0).cpu()))
            for ll in labels:
                for l in ll:
                    img_show = cv2.rectangle(img_show, (int(l[0]), int(l[1])), (int(l[2]), int(l[3])), (255, 0, 0), 1)
            for i, l in enumerate(boxes[0]):
                img_show = cv2.rectangle(img_show, (int(l[0]), int(l[1])), (int(l[2]), int(l[3])), (0, 255, 0), 1)
        
            plt.imshow(img_show)
            plt.show()
        
    pr_curve = np.zeros((thresh_num, 2)).astype('float')
    for i, data in tqdm(enumerate(loader), desc=f"Test-{dataset.name()}: ", ascii=True, total=len(loader)):
        img, labels = data
        img = img.cuda()
        with torch.no_grad():
            out = net(img)
        boxes, scores = decode(out, strides, threshold, K=100)                

        for i in range(len(labels)):
            gt_boxes = labels[i].astype(np.double)
            result = []
            for b, s in zip(boxes[i], scores[i]):
                x1, y1, x2, y2 = b[0], b[1], b[2], b[3]
                box = [x1, y1, x2 - x1 +1, y2 - y1 +1, s]
                box = np.array(box).astype(np.double)
                result.append(box)
            result = np.array(result)
            count_obj += len(gt_boxes)
            if len(gt_boxes) == 0 or len(result) == 0:
                continue
            ignore = np.ones(gt_boxes.shape[0])
            pred_recall, proposal_list = image_eval(result, gt_boxes, ignore, iou_thresh, box_format='xyxy')
            _img_pr_info = img_pr_info(thresh_num, result, proposal_list, pred_recall)
            pr_curve += _img_pr_info

    pr_curve = dataset_pr_info(thresh_num, pr_curve, count_obj)
    propose = pr_curve[:, 0]
    recall = pr_curve[:, 1]
    ap = voc_ap(recall, propose)
    return ap, pr_curve