示例#1
0
def show_tracks(scene, tracks, frame_dets=(), first_frame=None):
    if not tracks:
        return
    if first_frame is None:
        first_frame = min(tr[0].frame for tr in tracks)
    last_frame = max(tr[-1].frame for tr in tracks)
    for f in range(first_frame, last_frame + 1):
        img = scene.frame(f)

        if f in frame_dets:
            for d in frame_dets[f]:
                d.draw(img, color=(255, 0, 0))

        for tr_id, tr in enumerate(tracks):
            if tr[0].frame <= f <= tr[-1].frame:
                det = tr[bisect([det.frame for det in tr], f) - 1]
                assert det.frame == f  # Did you interpolate_missing_detections(tracks)?
                if det.id is not None:
                    label = '%s:%s' % (tr_id, det.id)
                else:
                    label = tr_id
                det.draw(img, label=label)

        cv2.polylines(img,
                      np.array([scene.roi()]),
                      True, (0, 0, 0),
                      thickness=3)
        cv2.polylines(img,
                      np.array([scene.roi()]),
                      True, (255, 255, 255),
                      thickness=1)

        view(img)  #, pause=True)
        imwrite(img, "dbg/%.8d.jpg" % f)
示例#2
0
def make_graph(video_detections, fps, show=False):

    max_temporal_distance = fps
    min_iou = 0.0

    history = []
    graph = []
    for frame_idx, frame, detections in video_detections:
        for det in detections:
            det.next_weight_data = defaultdict(list)
            det.prev = set()
            graph.append(det)
            if show:
                det.draw(frame, label=det.id)

        history = history[-max_temporal_distance:]
        for old_detections in history:
            for prv in old_detections:
                for nxt in detections:
                    if nxt.iou(prv) > min_iou:
                        connect(prv, nxt, None)
        history.append(detections)

        if show:
            view(frame, pause=True)

    return graph
示例#3
0
def test_flipp_aspect_ratio_single_image_wide():
    """Test flipp with only one image wider than 16:9
    """
    from vi3o import view, flipp
    from vi3o.debugview import DebugViewer

    img_1 = np.zeros(shape=(5, 10), dtype=np.uint8)

    flipp()
    view(img_1)
    flipp()

    assert DebugViewer.named_viewers["Default"].image.width == 10
    assert DebugViewer.named_viewers["Default"].image.height == 5
示例#4
0
def test_flipp_aspect_ratio_default_viewer_1_1_vertical():
    """Test exactly 1:1 aspect ratio stacked horizontally
    """
    from vi3o import view, flipp
    from vi3o.debugview import DebugViewer

    img_1 = np.zeros(shape=(3, 9), dtype=np.uint8)
    img_2 = np.ones(shape=(3, 9), dtype=np.uint8) * 128
    img_3 = np.ones(shape=(3, 9), dtype=np.uint8) * 255

    flipp()
    view(img_1)
    view(img_2)
    view(img_3)
    flipp(aspect_ratio=1)

    assert DebugViewer.named_viewers["Default"].image.width == 9
    assert DebugViewer.named_viewers["Default"].image.height == 9
示例#5
0
def test_flipp_aspect_ratio_default_viewer_default_is_16_9_approx_narrow():
    """Test approimatelly 16:9 aspect ratio
    """
    from vi3o import view, flipp
    from vi3o.debugview import DebugViewer

    img_1 = np.zeros(shape=(9, 5), dtype=np.uint8)
    img_2 = np.ones(shape=(9, 5), dtype=np.uint8) * 128
    img_3 = np.ones(shape=(9, 5), dtype=np.uint8) * 255

    flipp()
    view(img_1)
    view(img_2)
    view(img_3)
    flipp()

    assert DebugViewer.named_viewers["Default"].image.width == 15
    assert DebugViewer.named_viewers["Default"].image.height == 9
示例#6
0
def test_flipp_aspect_ratio_default_viewer_infinite_vertical():
    """Test infinite horizontal stacking, this is the legacy default
    """
    from vi3o import view, flipp
    from vi3o.debugview import DebugViewer

    img_1 = np.zeros(shape=(10, 1), dtype=np.uint8)
    img_2 = np.ones(shape=(10, 1), dtype=np.uint8) * 128
    img_3 = np.ones(shape=(10, 1), dtype=np.uint8) * 255

    flipp()
    view(img_1)
    view(img_2)
    view(img_3)
    flipp(aspect_ratio=0)

    assert DebugViewer.named_viewers["Default"].image.width == 1
    assert DebugViewer.named_viewers["Default"].image.height == 30
示例#7
0
def imshowsc(img):
    """
    Rescales (and translates) the intensities of the image *img* to cover the 0..255 range.
    Then display the image *img* in the DebugViewer and pause the viewer with the image showing.
    """
    view(img, scale=True, pause=True)
示例#8
0
def imshow(img):
    """
    Display the image *img* in the DebugViewer and pause the viewer with the image showing.
    """
    view(img, pause=True)
示例#9
0
    def __next__(self):
        return self.next()

    def next(self):
        headers = {}
        while True:
            l = self._fd.readline()
            if not l:
                raise StopIteration
            if not l.strip() and headers:
                break
            if b':' in l:
                i = l.index(b':')
                headers[l[:i]] = l[i + 1:].strip()

        data = self._fd.read(int(headers[b'Content-Length']))
        img = imread(StringIO(data)).view(Frame)
        img.index = self.fcnt
        self.fcnt += 1
        img.timestamp = img.systime = -1  # FIXME
        return img


if __name__ == '__main__':
    from vi3o import view
    for img in AxisCam("192.168.0.90",
                       username="******",
                       password="******",
                       no_proxy=True):
        view(img)
示例#10
0
文件: netcam.py 项目: hakanardo/vi3o
            request.add_header("Authorization", "Basic %s" % base64string)
        self._fd = urllib2.urlopen(request)
        self.fcnt = 0

    def __iter__(self):
        return self

    def next(self):
        headers = {}
        while True:
            l = self._fd.readline()
            if not l:
                raise StopIteration
            if not l.strip() and headers:
                break
            if ':' in l:
                i = l.index(':')
                headers[l[:i]] = l[i+1:].strip()

        data = self._fd.read(int(headers['Content-Length']))
        img = imread(StringIO(data)).view(Frame)
        img.index = self.fcnt
        self.fcnt += 1
        img.timestamp = img.systime = -1 # FIXME
        return img

if __name__ == '__main__':
    from vi3o import view
    for img in AxisCam('10.2.3.125', 640, 360):
        view(img)
示例#11
0
文件: image.py 项目: hakanardo/vi3o
def imshowsc(img):
    """
    Rescales (and translates) the intensities of the image *img* to cover the 0..255 range.
    Then display the image *img* in the DebugViewer and pause the viewer with the image showing.
    """
    view(img, scale=True, pause=True)
示例#12
0
文件: image.py 项目: hakanardo/vi3o
def imshow(img):
    """
    Display the image *img* in the DebugViewer and pause the viewer with the image showing.
    """
    view(img, pause=True)
示例#13
0
def make_graph(video_detections, fps, show=False, max_connect=5):

    tracks = []
    lk_params = dict(winSize=(15, 15),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    feature_params = dict(maxCorners=5000,
                          qualityLevel=0.01,
                          minDistance=10,
                          blockSize=7)

    col = (255, 0, 0)
    max_len = 3 * fps
    min_klt_per_obj = 10
    velocity_history = fps // 2
    prediction_df = fps

    graph = []
    detect = True
    for frame_idx, frame, detections in video_detections:
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        height, width, _ = frame.shape

        estimate_intradet_iou(detections)
        for det in detections:
            det.next_weight_data = defaultdict(list)
            det.pre_vs = []
            det.post_vs = []
            det.prev = set()
            if show:
                det.draw(frame, label=det.id)
            graph.append(det)

        # Track klt points to next frame
        if len(tracks) > 0:
            interesting = []
            img0, img1 = prev_gray, frame_gray
            p0 = np.float32([(tr.x, tr.y) for tr in tracks]).reshape(-1, 1, 2)

            # See how the points have moved between the two frames
            p1, st, err1 = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None,
                                                    **lk_params)
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None,
                                                    **lk_params)
            d = abs(p0 - p0r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []
            for tr, (x, y), good_flag, e in zip(tracks, p1.reshape(-1, 2),
                                                good, err1.flat):
                if not good_flag:
                    continue
                if not (0 <= x < width and 0 <= y < height):
                    continue
                if e > 1e3:
                    continue
                tr.history.append((frame_idx, x, y, e))
                tr.history = tr.history[-max_len - 1:]
                new_tracks.append(tr)
                if show:
                    cv2.circle(frame, (x, y), 2,
                               (255 - min(e * 10, 255), 0, 0), -1)
                for prev_dets in tr.dets_history:
                    for det in prev_dets:
                        if det.id == 2860144:
                            interesting.append(tr)
            tracks = new_tracks
            interesting = tracks

            if show:
                cv2.polylines(frame, [
                    np.int32([(x, y) for _, x, y, _ in tr.history])
                    for tr in interesting
                ], False, col)

        # Find detections with too few klt points
        if detect:
            mask = np.zeros_like(frame_gray)
        detect = False
        min_area = float('Inf')
        for det in detections:
            cnt = 0
            for tr in tracks:
                if det.covers(tr.x, tr.y):
                    cnt += 1
            if cnt < min_klt_per_obj:
                det.update_mask(mask)
                detect = True
                min_area = min(min_area, det.area)

        # Detect new klt points
        if detect:
            feature_params['minDistance'] = int(
                np.sqrt(min_area / min_klt_per_obj))
            for tr in tracks:
                cv2.circle(mask, (tr.x, tr.y),
                           feature_params['minDistance'] // 2, 0, -1)
            p = cv2.goodFeaturesToTrack(frame_gray,
                                        mask=mask,
                                        **feature_params)
            if p is not None:
                for x, y in np.float32(p).reshape(-1, 2):
                    nt = KltTrack(frame_idx, x, y)
                    tracks.append(nt)

        # Assign detections to klt points and build detection connectivity graph
        new_tracks = []
        for tr in tracks:
            vx = vy = None

            last_dets = []
            for nxt in detections:
                if nxt.covers(tr.x, tr.y):
                    last_dets.append(nxt)
                    tr.dets_history_for_post_vx[frame_idx].append(nxt)
                    for prev_dets in tr.dets_history:
                        for prv in prev_dets:
                            df = nxt.frame - prv.frame
                            klt = tr.history[-df - 1:]
                            connect(prv, nxt, ('klt', klt))
                    if vx is None and len(
                            tr.history
                    ) > velocity_history:  # Predic where the detection will be in the future
                        hist = tr.history[-velocity_history:]
                        (vx,
                         x0), rx, _, _, _ = np.polyfit(range(len(hist)),
                                                       [p[1] for p in hist],
                                                       1,
                                                       full=True)
                        (vy,
                         y0), ry, _, _, _ = np.polyfit(range(len(hist)),
                                                       [p[2] for p in hist],
                                                       1,
                                                       full=True)
                        klt_res = [p[3] for p in hist]
                        r = (rx[0] + ry[0], sum(klt_res), max(klt_res))
                    if vx is not None:
                        v = (vx, vy) + r
                        nxt.pre_vs.append(v)
                        for df in range(1, prediction_df):
                            d = nxt.predict(df, vx, vy)
                            d.original = nxt
                            d.prediction_v = v
                            tr.predictions[d.frame].append(d)
                        for d in tr.dets_history_for_post_vx[frame_idx -
                                                             velocity_history]:
                            d.post_vs.append(v)
            if frame_idx - velocity_history in tr.dets_history_for_post_vx:
                del tr.dets_history_for_post_vx[frame_idx - velocity_history]
            if last_dets:
                tr.dets_history.append(last_dets)
            tr.dets_history = tr.dets_history[-max_connect:]
            f = frame_idx - max_len
            tr.dets_history = [
                last_dets for last_dets in tr.dets_history
                if last_dets[0].frame > f
            ]
            if tr.dets_history:
                new_tracks.append(tr)
        tracks = new_tracks

        # Form long term connection from predicted detections
        for tr in tracks:
            for prd in tr.predictions[frame_idx]:
                for det in detections:
                    if det not in tr.dets_history[-1] and prd.iou(det) > 0.5:
                        connect(prd.original, det, ('long', prd))
            del tr.predictions[frame_idx]

        if show:
            for det in detections:
                if det.pre_vs:
                    # vx = np.median([vx for vx, vy in det.pre_vs])
                    # vy = np.median([vy for vx, vy in det.pre_vs])
                    for vx, vy, r, _, _ in det.pre_vs:
                        df = 30
                        d = det.predict(df, vx, vy)
                        cv2.arrowedLine(frame, (int(det.cx), int(det.cy)),
                                        (int(d.cx), int(d.cy)),
                                        (0, 0, max(0, 255 - int(r))), 1)

        prev_gray = frame_gray
        if show:
            view(frame)

    return graph