Ejemplo n.º 1
0
mask = cv2.imread(mask_path, 0)

vp_path = '../data/gt/2016-ITS-BrnoCompSpeed/results/{}/system_dubska_bmvc14.json'.format(
    session_id)
with open(vp_path, 'r') as f_buff:
    """
    vp has 2 keys
    - cars, list of cars detected, its frame and posX and posY
    - camera_calibration, the calibration parameter result (pp, vp1, and vp2)
    """
    vp = json.load(f_buff)
vp1 = vp['camera_calibration']['vp1']
vp2 = vp['camera_calibration']['vp2']

fi = FrameIterator('../data/sample/{}'.format(session_id))
bg = BackgroundModel(fi)

print("Learning")
bg.learn(tot_frame_init=1)
print("Done")

ctr_frame = 0

# what I want is the function to find most left tangen and right tangen of
# given blob list and a point
while True:
    img = next(fi)
    frame = img
    fg = bg.apply(img)

    # remove shadows, i.e value 127
        """
        vp = json.load (f_buff)
        vps[_id][view] = {
                'vp1' : vp['camera_calibration']['vp1'],
                'vp2' : vp['camera_calibration']['vp2']
            }

# generate frame iterator
fi = {_id : {}}
for view in session[_id] : 
    fi[_id][view] = FrameIterator ('../data/sync_25fps/session{}_{}'.format (_id, view))

# define background model
bms = {_id : {}} 
for view in session[_id] : 
    bms[_id][view] = BackgroundModel (fi[_id][view])
    print ("Learning for session {}-{}".format (_id, view))
    bms[_id][view].learn (tot_frame_init=2)
    print ("Done")

# initialing prev blobs
prev_fg = {_id : {}}
for view in session[_id] : 
    prev_fg[_id][view] = [None, None]
    for i in range (2) : 
        img = next (fi[_id][view])

        # by background subtraction
        fg = bms[_id][view].apply (img)
        # remove shadows, i.e value 127
        fg = cv2.threshold (fg, 200, 255, cv2.THRESH_BINARY)[1]
Ejemplo n.º 3
0
masks = {}
HEIGHT = 400 # constant height

for view in VIEW : 
    img = cv2.imread (img_path.format (ses_id, view), 1)

    points = GT['session{}'.format (ses_id)][view]
    corner = get_corner_ground (vp[view]['vp1'], vp[view]['vp2'], points)

    # get rectangular homography mapping
    corner_gt = np.float32 (corner)
    corner_wrap = np.float32 ([[0,300],[0,0], [1000,0], [1000, 300]])
    M[view] = cv2.getPerspectiveTransform (corner_gt, corner_wrap)

    # background subtraction
    bms[view] = BackgroundModel (fi[view], detectShadows=False)
    bms[view].learn (tot_frame_init=2)

    # for 3 frame difference
    prev_img[view] = [None, None]
    for i in range (2) : 
        img = next (fi[view])
        img_color = img.copy ()
        img = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)

        # save background
        prev_img[view][i] = None

    mask_path = '../../data/gt/2016-ITS-BrnoCompSpeed/dataset/session{}_{}/video_mask.png'.format (ses_id, view)
    masks[view] = cv2.imread (mask_path, 0)
Ejemplo n.º 4
0
        img = next(fi[view])
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        dst = cv2.warpPerspective(img, M[view], (1000, 300))

        # save background
        prev_img[i] = img

        # save tsi
        prev_tsi[i] = tsi_object[view].apply(img)
        prev_epi[i] = dst[70:80:, :]

    # fdiff_view[view] = FrameDifference (*prev_img)
    fdiff_tsi[view] = FrameDifference(*prev_tsi)
    fdiff_epi[view] = FrameDifference(*prev_epi)
    bm_epi[view] = BackgroundModel(iter(prev_epi))
    bm_epi[view].learn(tot_frame_init=2)

    mask_path = '../../data/gt/2016-ITS-BrnoCompSpeed/dataset/session{}_{}/video_mask.png'.format(
        ses_id, view)
    masks[view] = cv2.imread(mask_path, 0)
"""
-------------------------------------------------------------------------------
Main Program
-------------------------------------------------------------------------------
"""
# what I want is a function to generate 3D using Vanishing Point (it already exist)
ctr = 0
while True:
    ctr += 1