コード例 #1
0
def play(filepath):
    global img
    global rects
    global video
    cv2.namedWindow(win_name)
    cv2.cv.SetMouseCallback(win_name, on_mouse, 0)

    clf = model.load_classifier()
    pos = 0 # frame position
    video = cv2.VideoCapture(filepath)
    frames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cv2.cv.CreateTrackbar('tracker', win_name, pos, int(frames), on_change)
    key_val = 0
    while(video.isOpened()):
        ret, img = video.read()
        if not ret:
            break
        if key_val == 0:
            cv2.imshow(win_name, img) 

        pos = pos + 1
        c = cv2.waitKey(key_val)
        if c == ord('c'):
            key_val = 1
            cv2.cv.SetMouseCallback(win_name, on_mouse_do_nothing, 0)
        elif c == ord('q'):
            break

        if key_val==1:
            if op.has_score_board(img, target_imgs[0], rects[0]):
                show_recognize(clf, img, rects[1:])
            cv2.imshow(win_name, img) 
    video.release()
    cv2.destroyAllWindows()
コード例 #2
0
def play(filepath):
    global img
    global rects
    global video
    cv2.namedWindow(win_name)
    cv2.cv.SetMouseCallback(win_name, on_mouse, 0)

    clf = model.load_classifier()
    pos = 0  # frame position
    video = cv2.VideoCapture(filepath)
    frames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cv2.cv.CreateTrackbar('tracker', win_name, pos, int(frames), on_change)
    key_val = 0
    while (video.isOpened()):
        ret, img = video.read()
        if not ret:
            break
        if key_val == 0:
            cv2.imshow(win_name, img)

        pos = pos + 1
        c = cv2.waitKey(key_val)
        if c == ord('c'):
            key_val = 1
            cv2.cv.SetMouseCallback(win_name, on_mouse_do_nothing, 0)
        elif c == ord('q'):
            break

        if key_val == 1:
            if op.has_score_board(img, target_imgs[0], rects[0]):
                show_recognize(clf, img, rects[1:])
            cv2.imshow(win_name, img)
    video.release()
    cv2.destroyAllWindows()
コード例 #3
0
def get_classifier(modelfile, cache=True, use_classify_many=False):
    classifier, encoder = load_classifier(modelfile)
    cache_dict = {}
    classes = encoder.classes_

    def classify(name):
        name = name.upper()
        if name in cache_dict:
            return cache_dict[name]
        else:
            feature = gen_features(name)
            label_numeric = classifier.predict(feature)[0]
            label = classes[label_numeric]

            if cache:
                cache_dict[name] = label
            return label

    classify.classifier = classifier

    def classify_many(names):
        features = [gen_features(name) for name in names]
        labels = classifier.predict(features)
        return [classes[l] for l in labels]

    classify_many.classifier = classifier

    if use_classify_many:
        return classify_many
    else:
        return classify
コード例 #4
0
def test_get_text(img):
    imgs, pos = get_text(img)
    print pos
    clf = model.load_classifier()
    for i,img in enumerate(imgs):
        cv2.imshow(str(i),img)
        print model.pred(clf,img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #5
0
def test_get_text(img):
    imgs, pos = get_text(img)
    print pos
    clf = model.load_classifier()
    for i, img in enumerate(imgs):
        cv2.imshow(str(i), img)
        print model.pred(clf, img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #6
0
def find_num_pos(video, lines):
    fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    if len(lines) == 0:
        print 'no lines'
        return

    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0)
    clf = model.load_classifier()
    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, fps * 60 * 15)
    while (video.isOpened):
        ret, cur = video.read()
        if not ret:
            break

        key = cv2.waitKey(1)
        if key == ord('q'):
            break

        for line in lines:
            imgs, pos = nc.get_text(cur[line[0] + 1:line[1] - 1])
            #        cv2.imshow('binary', board)
            #        print len(imgs)
            for i, img in enumerate(imgs):
                num, prob = model.pred_prob(clf, img)
                if prob > 0.2:
                    continue
                cv2.putText(cur,
                            str(int(num)), (pos[i][0], line[0]),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.6, (0, 0, 0xff),
                            thickness=2)

        cv2.imshow('cur', cur)


#        binary = op.get_binary(cur)
#        edges = cv2.Canny(binary,50,150,apertureSize = 3)

#        lines = cv2.HoughLines(edges,1,np.pi/180,200)
#        print edges, lines
#        if lines != None:

    video.release()
    cv2.destroyAllWindows()
コード例 #7
0
def find_num_pos(video, lines):
    fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    if len(lines) == 0:
        print 'no lines'
        return

    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0)
    clf = model.load_classifier()
    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, fps * 60 * 15)
    while(video.isOpened):
        ret, cur = video.read()
        if not ret:
            break

        key = cv2.waitKey(1)
        if key == ord('q'):
            break

        for line in lines:
            imgs, pos = nc.get_text(cur[line[0]+1:line[1]-1])
#        cv2.imshow('binary', board)
#        print len(imgs)
            for i, img in enumerate(imgs):
                num, prob = model.pred_prob(clf, img)
                if prob > 0.2:
                    continue
                cv2.putText(cur, str(int(num)),(pos[i][0],line[0]), cv2.FONT_HERSHEY_SIMPLEX,0.6, (0,0,0xff), thickness = 2)

        cv2.imshow('cur', cur)
        

#        binary = op.get_binary(cur) 
#        edges = cv2.Canny(binary,50,150,apertureSize = 3)

#        lines = cv2.HoughLines(edges,1,np.pi/180,200)
#        print edges, lines
#        if lines != None:

    video.release()
    cv2.destroyAllWindows()
コード例 #8
0
def get_score_info(video):
    global step
    score_frames = []
    twenty_four_frames = []
    camera_change_frames = []

    score_a, score_b = 0, 0  # score of both team
    last_sec = 0  # 24 second board
    #    last_frame = None
    pos = 0  # frame position
    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, pos)
    clf = model.load_classifier()
    while (video.isOpened()):
        if pos % step == 0:
            ret, frame = video.red()
        else:
            ret = video.grab()

        if not ret:
            break

        # skip each step steps
        if pos % step != 0:
            pos += 1
            continue

        # if camera change
        if pos != 0 and hist.camera_change(last_frame, frame):
            camera_change_frames.append(pos)
            print 'camera change', pos
        last_frame = frame

        if op.has_score_board(frame, target_imgs[0], rects[0]):
            # score a
            img = op.get_subimage(frame, rects[1][0], rects[1][1])
            num = model.recognize_number(clf, img)
            #            print 'a ', num
            if score_two_or_three(num, score_a):
                score_frames.append(pos)
                print 'a from', score_a, 'to', num, pos
            if not noise(num, score_a):
                score_a = num

            # score b
            img = op.get_subimage(frame, rects[2][0], rects[2][1])
            num = model.recognize_number(clf, img)
            #            print 'b ', num
            if score_two_or_three(num, score_b):
                score_frames.append(pos)
                print 'b from', score_b, 'to', num, pos
            if not noise(num, score_b):
                score_b = num

            # 24 second board
            img = op.get_subimage(frame, rects[3][0], rects[3][1])
            sec = model.recognize_number(clf, img)
            if last_sec != 24 and sec == 24:
                print 'twenty four', pos
                twenty_four_frames.append(pos)
            last_sec = sec

        pos += 1


#        print pos

    print score_frames, twenty_four_frames, camera_change_frames
    return score_frames, twenty_four_frames, camera_change_frames
コード例 #9
0
def get_score_info(video):
    global step
    score_frames = []
    twenty_four_frames = []
    camera_change_frames = []

    score_a, score_b = 0,0 # score of both team
    last_sec = 0 # 24 second board 
#    last_frame = None
    pos = 0 # frame position
    video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, pos)
    clf = model.load_classifier()
    while(video.isOpened()):
        if pos % step == 0: 
            ret, frame = video.red()
        else:
            ret = video.grab()
        
        if not ret:
            break
        
        # skip each step steps
        if pos % step != 0: 
            pos += 1
            continue

        # if camera change
        if pos != 0 and hist.camera_change(last_frame, frame):
            camera_change_frames.append(pos)
            print 'camera change', pos
        last_frame = frame 


        if op.has_score_board(frame, target_imgs[0], rects[0]):
            # score a
            img = op.get_subimage(frame, rects[1][0], rects[1][1])
            num = model.recognize_number(clf, img)
#            print 'a ', num
            if score_two_or_three(num,score_a):
                score_frames.append(pos)
                print 'a from',score_a,'to',num, pos
            if not noise(num, score_a):
                score_a = num

            # score b
            img = op.get_subimage(frame, rects[2][0], rects[2][1])
            num = model.recognize_number(clf, img)
#            print 'b ', num
            if score_two_or_three(num,score_b):
                score_frames.append(pos)
                print 'b from',score_b,'to', num, pos
            if not noise(num, score_b):
                score_b = num

            # 24 second board
            img = op.get_subimage(frame, rects[3][0], rects[3][1])
            sec = model.recognize_number(clf, img)
            if last_sec != 24 and sec == 24:
                print 'twenty four',pos
                twenty_four_frames.append(pos) 
            last_sec = sec

        pos += 1 
#        print pos

    print score_frames, twenty_four_frames, camera_change_frames
    return score_frames,twenty_four_frames,camera_change_frames