コード例 #1
0
ファイル: demo.py プロジェクト: MWL5/MDNet
                'samples': [samples[i]]
            }]
            blob = get_next_mini_batch(db)
            blob = {'data': blob['data']}
            net.blobs['data'].reshape(*blob['data'].shape)
            out = net.forward(**blob)['cls_prob']
            scores[i] = out[1]


if __name__ == '__main__':
    caffe.set_mode_gpu()
    caffe.set_device(1)
    # get the deploy solver and net with pre-trained caffe model
    train = os.path.join('model', 'deploy_solver.prototxt')
    test = os.path.join('model', 'deploy_test.prototxt')
    weights = os.path.join('model', 'MDNet_iter_800000.caffemodel')

    solver, net = get_solver_net(train, test, weights)

    # get the Evaluator
    dtype = 'VOT'
    dbpath = os.path.join('data', 'vot2014')
    gtpath = dbpath

    vdbc = VDBC(dbtype=dtype, dbpath=dbpath, gtpath=gtpath, flush=True)

    evl = Evaluator(vdbc)
    evl.set_video(1)

    evaluate(evl, solver, net)
コード例 #2
0
ファイル: test.py プロジェクト: MWL5/MDNet
        if score < threshold:
            finetune(solver, frame_samples, short_term)
        elif term % 10 == 0:
            finetune(solver, frame_samples, long_term)

        evl.report(box.reshape((4, )))
        gt = box.reshape((4, ))

        if VISUAL:
            ground_truth = evl.get_ground_truth()
            vis_detection(im_path, ground_truth, gt)

        im_path = evl.next_frame()


if __name__ == '__main__':
    solver, net = get_solver_net(train, test, weights)

    # get the Evaluator
    dtype = 'VOT'
    dbpath = os.path.join('data', 'vot2014')
    gtpath = dbpath

    vdbc = VDBC(dbtype=dtype, dbpath=dbpath, gtpath=gtpath, flush=True)
    evl = Evaluator(vdbc)

    video_num = evl.get_video_num()
    print 'Total video sequences: {}.'.format(video_num)
    for i in range(video_num):
        evaluate(evl, solver, net)
コード例 #3
0
        plt.gca().add_patch(
            plt.Rectangle((box[0], box[1]),
                          box[2],
                          box[3],
                          fill=False,
                          edgecolor='blue',
                          linewidth=1.5))

    plt.show()


if __name__ == '__main__':
    IM_PER_FRAME = 256
    dtype = 'VOT'
    dbpath = os.path.join('data', 'VOT')
    gtpath = dbpath

    vdbc = VDBC(dbtype=dtype, dbpath=dbpath, gtpath=gtpath, flush=True)
    evl = Evaluator(vdbc)

    evl.set_video(3)
    im_path, gt = evl.init_frame()
    im = cv2.imread(im_path)
    frame_samples = mdnet_sample(im, gt, PARAMS, IM_PER_FRAME, 'TEST')
    #frame_samples = uniform_sample(im ,gt, PARAMS, IM_PER_FRAME, 'TEST')

    bboxes = [sample['box'] for sample in frame_samples]
    vis_detection(im_path, gt, bboxes)

    #print type(bboxes)
コード例 #4
0
ファイル: test.py プロジェクト: webkader/MDNet
    print 'mAP: {}.'.format(evl.get_mAP())

    record.add_record(sample_num=sample_num,
                      frame_num=term,
                      mAP=evl.get_mAP(),
                      total_time=total_timer.diff,
                      finetune_iter=finetune_iter_)
    record._save_json()

if __name__ == '__main__':

    while IMS_PER_FRAME > 20:
        solver, net = get_solver_net(train, test, weights)

        # get the Evaluator
        dtype = 'VOT'
        dbpath = os.path.join('data', 'VOT')
        gtpath = dbpath

        vdbc = VDBC(dbtype=dtype, dbpath=dbpath, gtpath=gtpath, flush=True)
        evl = Evaluator(vdbc)

        video_num = evl.get_video_num()
        print 'Total video sequences: {}.'.format(video_num)
        #    for i in range(video_num):
        #        evaluate(evl, solver, net)

        evl.set_video(19)
        evaluate(evl, solver, net, IMS_PER_FRAME)
        IMS_PER_FRAME -= 5