session_id)
mask = cv2.imread(mask_path, 0)

vp_path = '../data/gt/2016-ITS-BrnoCompSpeed/results/{}/system_dubska_bmvc14.json'.format(
    session_id)
with open(vp_path, 'r') as f_buff:
    """
    vp has 2 keys
    - cars, list of cars detected, its frame and posX and posY
    - camera_calibration, the calibration parameter result (pp, vp1, and vp2)
    """
    vp = json.load(f_buff)
vp1 = vp['camera_calibration']['vp1']
vp2 = vp['camera_calibration']['vp2']

fi = FrameIterator('../data/sample/{}'.format(session_id))
bg = BackgroundModel(fi)

print("Learning")
bg.learn(tot_frame_init=1)
print("Done")

ctr_frame = 0

# what I want is the function to find most left tangen and right tangen of
# given blob list and a point
while True:
    img = next(fi)
    frame = img
    fg = bg.apply(img)
    session_id)
mask = cv2.imread(mask_path, 0)

vp_path = '../data/gt/2016-ITS-BrnoCompSpeed/results/{}/system_dubska_bmvc14.json'.format(
    session_id)
with open(vp_path, 'r') as f_buff:
    """
    vp has 2 keys
    - cars, list of cars detected, its frame and posX and posY
    - camera_calibration, the calibration parameter result (pp, vp1, and vp2)
    """
    vp = json.load(f_buff)
vp1 = vp['camera_calibration']['vp1']
vp2 = vp['camera_calibration']['vp2']

fi = FrameIterator('../data/sync/{}'.format(session_id))
ctr_frame = 1
while True:
    img = next(fi)

    div_line = Line.from_two_points(vp2, (10, img.shape[0] / 2))

    frame = div_line.draw(img)

    loc = (20, img.shape[0] - 20)
    cv2.putText(frame, 'Frame - {}'.format(ctr_frame), loc,
                cv2.FONT_HERSHEY_PLAIN, 3, (0, 128, 128), 4)

    cv2.imshow('default', frame)

    if (cv2.waitKey(1) & 0xFF == ord('q')):
Exemple #3
0
    def get_session (ses_id) : 
        fi = {}
        for view in (("left", "right", "center")) : 
            fi[view] = FrameIterator (os.path.join (path_this, 'data/sync_25fps/session{}_{}'.format (ses_id, view)))

        return fi
    with open (vp_path, 'r') as f_buff :
        """
        vp has 2 keys
        - cars, list of cars detected, its frame and posX and posY
        - camera_calibration, the calibration parameter result (pp, vp1, and vp2)
        """
        vp = json.load (f_buff)
        vps[_id][view] = {
                'vp1' : vp['camera_calibration']['vp1'],
                'vp2' : vp['camera_calibration']['vp2']
            }

# generate frame iterator
fi = {_id : {}}
for view in session[_id] : 
    fi[_id][view] = FrameIterator ('../data/sync_25fps/session{}_{}'.format (_id, view))

# define background model
bms = {_id : {}} 
for view in session[_id] : 
    bms[_id][view] = BackgroundModel (fi[_id][view])
    print ("Learning for session {}-{}".format (_id, view))
    bms[_id][view].learn (tot_frame_init=2)
    print ("Done")

# initialing prev blobs
prev_fg = {_id : {}}
for view in session[_id] : 
    prev_fg[_id][view] = [None, None]
    for i in range (2) : 
        img = next (fi[_id][view])
            """
            vp has 2 keys
            - cars, list of cars detected, its frame and posX and posY
            - camera_calibration, the calibration parameter result (pp, vp1, and vp2)
            """
            vp = json.load (f_buff)
            vps[_id][view] = {
                    'vp1' : vp['camera_calibration']['vp1'],
                    'vp2' : vp['camera_calibration']['vp2']
                }

bms = {}
for _id in session: 
    bms[_id] = {} 
    for view in session[_id] : 
        fi = FrameIterator ('../data/sample/session{}_{}'.format (_id, view))
        bms[_id][view] = BackgroundModel (fi)
        print ("Learning for session {}-{}".format (_id, view))
        bms[_id][view].learn (tot_frame_init=1)
        print ("Done")

_id = 0
ctr = 0
while True :
    view_frame = None

    # load image from each view
    for view in session[_id] :
        fpath = '../data/sample/session0_{}/{:04d}.jpg'.format (
                view,
                session[_id][view] + ctr