示例#1
0
def _test_traindata_provider():
    seqs = load_seq_infos(1)
    seq = seqs[0]
    show_fid = TestCfg.SHOW_TRACK_RESULT_FID
    trk = tracker.ConvRegTracker()
    init = seq.gtRect[0]
    init_rect = Rect(*init)
    img_root = os.path.join(TestCfg.SEQUENCE_DIR, '../', seq.path)
    path = os.path.join(img_root,
                        seq.imgFormat.format(seq.startFrame))
    init_image = cv2.imread(path)
    display.show_track_res(seq.startFrame, init_image, init_rect, init_rect, show_fid)
    trk.init(init_image, init_rect)
    while True:
        frame_id = seq.startFrame
        path = os.path.join(img_root,
                            seq.imgFormat.format(frame_id))
        image = cv2.imread(path)
        gt_rect = Rect(*seq.gtRect[0])
        pred_rect = trk.track(image)
        display.show_track_res(frame_id, image, gt_rect, pred_rect, show_fid)
示例#2
0
def show_res(sec_image, obj_image, conf, out_conf, preprocess):
    conf = conf.copy()
    out_conf = out_conf.copy()
    si = sec_image.copy()

    si = np.asarray((si + 0.5) * 255.0, dtype=np.uint8)
    si_w = si.shape[1]
    si_h = si.shape[0]

    ti = obj_image.copy()
    ti = np.asarray((ti + 0.5) * 255.0, dtype=np.uint8)

    conf_y, conf_x = np.unravel_index(np.argmax(out_conf), out_conf.shape)
    predict_w = si_w / 2.5
    predict_h = si_h / 2.5

    srect = Rect(0, 0, sec_image.shape[1], sec_image.shape[0])
    pcx, pcy = preprocess.predict_location(srect, conf_x, conf_y)
    tlx = int(pcx - (predict_w - 1) / 2.0 + 0.5)
    tly = int(pcy - (predict_h - 1) / 2.0 + 0.5)
    obj_rect = Rect(tlx, tly, predict_w, predict_h).get_int_rect()
    cv2.rectangle(si, obj_rect.get_tl(), obj_rect.get_dr(), (255, 0, 0), 2)

    plt.figure(23245)
    plt.clf()
    plt.imshow(si)

    plt.figure(12897)
    plt.clf()
    plt.subplot(131)
    plt.imshow(ti)
    plt.subplot(132)
    plt.imshow(conf)
    plt.colorbar()
    plt.subplot(133)
    plt.imshow(out_conf)
    plt.colorbar()

    plt.show()
    plt.pause(0.1)
示例#3
0
def _test_statistic_motion():
    seqs = load_seq_infos()
    data_list = []
    img = np.zeros((2001,2001,3), dtype=np.uint8)
    for seq in seqs:
        _tmp = []
        _rect = Rect(*seq.gtRect[0])
        for i in range(1, len(seq.gtRect)):
            _next_rect = Rect(*seq.gtRect[i])
            _size = math.sqrt(_rect.w*_rect.h)
            rel_motion_x = (_next_rect.get_center()[0] - _rect.get_center()[0])/_size
            rel_motion_y = (_next_rect.get_center()[1] - _rect.get_center()[1])/_size
            _tmp.append((rel_motion_x, rel_motion_y))
            _rect = _next_rect
            idx = round(rel_motion_x * 1000) + 1000
            idy = round(rel_motion_y * 1000) + 1000

            if 0 <= idx < 2001 and 0 <= idy < 2001:
                img[idy, idx, 1] = 255
        data_list.extend(_tmp)
    print('Total length: {}'.format(len(data_list)))
    data = np.array(data_list, dtype=np.float)

    import scipy.stats
    rv = scipy.stats.norm.fit(data[:], floc=0.0)
    print(rv)
    cv2.circle(img, (1000,1000), radius=int(round(1000*rv[1])), color=(255,0,0), thickness=3)
    plt.figure()
    plt.imshow(img)
    plt.show()

    print('Variance: {:.4f}'.format(rv[1]))
    plt.waitforbuttonpress()
示例#4
0
def _test_tracker():
    seqs = load_seq_infos()
    seqs.sort(key=lambda o: o.name)
    show_fid = TestCfg.SHOW_TRACK_RESULT_FID
    trk = tracker.ConvRegTracker()
    seq = seqs[0]
    init = seq.gtRect[0]
    init_rect = Rect(*init)
    img_root = os.path.join(TestCfg.SEQUENCE_DIR, '../', seq.path)
    path = os.path.join(img_root,
                        seq.imgFormat.format(seq.startFrame))
    init_image = cv2.imread(path)
    display.show_track_res(seq.startFrame, init_image, init_rect, init_rect, show_fid)
    trk.init(init_image, init_rect)
    for fid in range(1, len(seq.gtRect)):
        frame_id = fid + seq.startFrame
        path = os.path.join(img_root,
                            seq.imgFormat.format(frame_id))
        image = cv2.imread(path)
        gt_rect = Rect(*seq.gtRect[fid])
        pred_rect = trk.track(image)
        display.show_track_res(frame_id, image, gt_rect, pred_rect, show_fid)
示例#5
0
文件: run_tracker.py 项目: xjtuwh/crt
def run_tracker(s_frames, init_rect):

    trker = ConvRegTracker()

    image = cv2.imread(s_frames[0])
    rect = Rect(*init_rect)
    trker.init(image, rect)

    res = []
    res.append(list(init_rect))
    for i in range(1, len(s_frames)):
        image = cv2.imread(s_frames[i])
        rect = trker.track(image)
        res.append([rect.x, rect.y, rect.w, rect.h])

    return res
示例#6
0
def clip_image(image,rect):
    iw = image.shape[1]
    ih = image.shape[0]
    im_rect = Rect(0,0,iw,ih)
    if rect.is_in_rect(im_rect):
        return image[rect.y:rect.y+rect.h,rect.x:rect.x+rect.w,:].copy()

    xa = np.arange(rect.w)+rect.x
    xa[xa<0] = 0
    xa[xa>=iw] = iw-1
    xa = np.tile(xa[None,:],(rect.h,1))

    ya = np.arange(rect.h)+rect.y
    ya[ya<0] = 0
    ya[ya>=ih] = ih-1
    ya = np.tile(ya[:,None],(1,rect.w))

    return image[ya,xa]
示例#7
0
    def get_object_rect_by_index(self, search_rect, obj_index_y, obj_index_x):
        _yi, _xi = obj_index_y, obj_index_x

        _x_resolution = search_rect.w / float(self.feature_size_w)
        _y_resolution = search_rect.h / float(self.feature_size_h)

        _dyi = _yi - int((self.response_size_h-1) / 2.0)
        _dxi = _xi - int((self.response_size_w-1) / 2.0)

        patch_cx, patch_cy = search_rect.get_center()
        pd_cx, pd_cy = patch_cx + _dxi*_x_resolution, patch_cy + _dyi*_y_resolution
        _search_ratio_w = self.feature_size_w / float(self.convolution_w)
        _search_ratio_h = self.feature_size_h / float(self.convolution_h)
        pd_w, pd_h = round(search_rect.w/_search_ratio_w), round(search_rect.h/_search_ratio_h)

        pd_tlx = round(pd_cx - (pd_w-1)/2.0)
        pd_tly = round(pd_cy - (pd_h-1)/2.0)

        final_rect = Rect(pd_tlx, pd_tly, pd_w, pd_h)
        return final_rect
示例#8
0
    def get_scaled_search_feature(self, image, object_rect):
        _scale_step_w = max(1, round(object_rect.w * self.scale_ratio))
        _scale_step_h = max(1, round(object_rect.h * self.scale_ratio))
        scaled_object_rects = []
        for i in range(2 * self.scale_test_num + 1):
            w = object_rect.w + _scale_step_w * (i - self.scale_test_num)
            h = object_rect.h + _scale_step_h * (i - self.scale_test_num)
            if w < 5 or h < 5:
                print('Warning: w < 5 or h < 5')
                continue
            cx, cy = object_rect.get_center()
            tl_x = round(cx - (w - 1)/2.0)
            tl_y = round(cy - (h - 1)/2.0)
            _rect = Rect(tl_x, tl_y, w, h)
            scaled_object_rects.append(_rect)

        _search_ratio_w = self.feature_size_w / float(self.convolution_w)
        _search_ratio_h = self.feature_size_h / float(self.convolution_h)

        _search_rect_list = []
        _search_bgr_list = []
        _search_input_list = []
        for _scaled_rect in scaled_object_rects:
            _search_rect = _scaled_rect.get_copy().scale_from_center(_search_ratio_w,
                                                                     _search_ratio_h)
            _search_bgr = clip_image(image, _search_rect)
            _search_input = cv2.resize(_search_bgr, (self.input_search_w, self.input_search_h))

            _search_rect_list.append(_search_rect)
            _search_bgr_list.append(_search_bgr)
            _search_input_list.append(_search_input)
        if self._show_search_bgr_fid:
            display.show_image(_search_bgr_list[0], self._show_search_bgr_fid, 'Train & search patch')

        _search_features = self.extractor.extract_multiple_features(_search_input_list)
        return _search_rect_list, _search_bgr_list, _search_features, scaled_object_rects
示例#9
0
def _test_data_provider():
    patch_rect = Rect(0,0, 500, 500)
    gt_rect = Rect(152, 134, 42, 120)
    response_size = (32, 32)