def test_KF_Color(self): file_path = "input_images/input_test/color" template = cv2.imread("input_images/input_test/template.jpg") images, centers = load_data(file_path) kf = ps5.KalmanFilter(*centers[0, :]) assert_scene(kf, images, centers, NOISE_0, template, lambda x: self.assertAlmostEqual(x, 0, delta=15))
def part_1b(): print("Part 1b") template_loc = {'y': 72, 'x': 140, 'w': 50, 'h': 50} # Define process and measurement arrays if you want to use other than the # default. Pass them to KalmanFilter. Q = 0.2 * np.eye(4) # Process noise array R = 0.5 * np.eye(4) # Measurement noise array kf = ps5.KalmanFilter(template_loc['x'], template_loc['y']) save_frames = { 10: os.path.join(output_dir, 'ps5-1-b-1.png'), 30: os.path.join(output_dir, 'ps5-1-b-2.png'), 59: os.path.join(output_dir, 'ps5-1-b-3.png'), 99: os.path.join(output_dir, 'ps5-1-b-4.png') } run_kalman_filter(kf, os.path.join(input_dir, "circle"), NOISE_2, "matching", save_frames, template_loc)
def part_1c(): print("Part 1c") init_pos = {'x': 311, 'y': 217} # Define process and measurement arrays if you want to use other than the # default. Pass them to KalmanFilter. Q = 0.02 * np.eye(4) # Process noise array R = 0.05 * np.eye(4) # Measurement noise array kf = ps5.KalmanFilter(init_pos['x'], init_pos['y']) save_frames = { 10: os.path.join(output_dir, 'ps5-1-c-1.png'), 33: os.path.join(output_dir, 'ps5-1-c-2.png'), 84: os.path.join(output_dir, 'ps5-1-c-3.png'), 159: os.path.join(output_dir, 'ps5-1-c-4.png') } run_kalman_filter(kf, os.path.join(input_dir, "walking"), NOISE_1, "hog", save_frames)
def part_5(): """Tracking multiple Targets. Use either a Kalman or particle filter to track multiple targets as they move through the given video. Use the sequence of images in the TUD-Campus directory. Follow the instructions in the problem set instructions. Place all your work in this file and this section. """ template_loc1 = {'y': 180, 'x': 75, 'w': 50, 'h': 160} template_loc2 = {'y': 200, 'x': 300, 'w': 35, 'h': 100} # Define process and measurement arrays if you want to use other than the # default. Pass them to KalmanFilter. Q = 0.5 * np.eye(4) # Process noise array R = 0.5 * np.eye(2) # Measurement noise array kf1 = ps5.KalmanFilter(template_loc1['x'], template_loc1['y']) kf2 = ps5.KalmanFilter(template_loc2['x'], template_loc2['y']) #Params noise = NOISE_2 sensor = "matching" imgs_dir = os.path.join(input_dir, "TUD-Campus") save_frames = { 29: os.path.join(output_dir, 'ps5-5-a-1.png'), 55: os.path.join(output_dir, 'ps5-5-a-2.png'), 70: os.path.join(output_dir, 'ps5-5-a-3.png') } # RUN KF imgs_list = [ f for f in os.listdir(imgs_dir) if f[0] != '.' and f.endswith('.jpg') ] imgs_list.sort() frame_num = 0 if sensor == "matching": frame = cv2.imread(os.path.join(imgs_dir, imgs_list[0])) template1 = frame[template_loc1['y']:template_loc1['y'] + template_loc1['h'], template_loc1['x']:template_loc1['x'] + template_loc1['w']] template2 = frame[template_loc2['y']:template_loc2['y'] + template_loc2['h'], template_loc2['x']:template_loc2['x'] + template_loc2['w']] else: raise ValueError("Unknown sensor name. Choose between 'hog' or " "'matching'") for img in imgs_list: frame = cv2.imread(os.path.join(imgs_dir, img)) if frame_num == 29: template_loc3 = {'y': 180, 'x': 0, 'w': 50, 'h': 150} template3 = frame[template_loc3['y']:template_loc3['y'] + template_loc3['h'], template_loc3['x']:template_loc3['x'] + template_loc3['w']] kf3 = ps5.KalmanFilter(template_loc3['x'], template_loc3['y'], Q, R) # Sensor if sensor == "matching": if frame_num < 60: corr_map1 = cv2.matchTemplate(frame, template1, cv2.TM_SQDIFF) z_y1, z_x1 = np.unravel_index(np.argmin(corr_map1), corr_map1.shape) z_w1 = template_loc1['w'] z_h1 = template_loc1['h'] z_x1 += z_w1 // 2 + np.random.normal(0, noise['x']) z_y1 += z_h1 // 2 + np.random.normal(0, noise['y']) if frame_num < 45: corr_map2 = cv2.matchTemplate(frame, template2, cv2.TM_SQDIFF) z_y2, z_x2 = np.unravel_index(np.argmin(corr_map2), corr_map2.shape) z_w2 = template_loc2['w'] z_h2 = template_loc2['h'] z_x2 += z_w2 // 2 + np.random.normal(0, noise['x']) z_y2 += z_h2 // 2 + np.random.normal(0, noise['y']) if frame_num > 28: corr_map3 = cv2.matchTemplate(frame, template3, cv2.TM_SQDIFF) z_y3, z_x3 = np.unravel_index(np.argmin(corr_map3), corr_map3.shape) z_w3 = template_loc3['w'] z_h3 = template_loc3['h'] z_x3 += z_w3 // 2 + np.random.normal(0, noise['x']) z_y3 += z_h3 // 2 + np.random.normal(0, noise['y']) x1, y1 = kf1.process(z_x1, z_y1) x2, y2 = kf2.process(z_x2, z_y2) if frame_num > 28: x3, y3 = kf3.process(z_x3, z_y3) if False: # For debugging, it displays every frame out_frame = frame.copy() cv2.circle(out_frame, (int(z_x), int(z_y)), 20, (0, 0, 255), 2) cv2.circle(out_frame, (int(x), int(y)), 10, (255, 0, 0), 2) cv2.rectangle(out_frame, (int(z_x) - z_w // 2, int(z_y) - z_h // 2), (int(z_x) + z_w // 2, int(z_y) + z_h // 2), (0, 0, 255), 2) cv2.imshow('Tracking', out_frame) cv2.waitKey(1) # Render and save output, if indicated if frame_num in save_frames: print('Working on frame {}'.format(frame_num)) frame_out = frame.copy() if frame_num < 60: cv2.circle(frame_out, (int(x1), int(y1)), 10, (255, 0, 0), 2) cv2.rectangle(frame_out, (int(z_x1) - z_w1 // 2, int(z_y1) - z_h1 // 2), (int(z_x1) + z_w1 // 2, int(z_y1) + z_h1 // 2), (0, 0, 255), 2) if frame_num < 45: cv2.circle(frame_out, (int(x2), int(y2)), 10, (255, 0, 0), 2) cv2.rectangle(frame_out, (int(z_x2) - z_w2 // 2, int(z_y2) - z_h2 // 2), (int(z_x2) + z_w2 // 2, int(z_y2) + z_h2 // 2), (0, 0, 255), 2) if frame_num >= 28: cv2.circle(frame_out, (int(x3), int(y3)), 10, (255, 0, 0), 2) cv2.rectangle(frame_out, (int(z_x3) - z_w3 // 2, int(z_y3) - z_h3 // 2), (int(z_x3) + z_w3 // 2, int(z_y3) + z_h3 // 2), (0, 0, 255), 2) cv2.imwrite(save_frames[frame_num], frame_out) # Update frame number frame_num += 1