def test_frame_filter_for_movement_detection(with_godec=False, savegif=False): noise_remover = FrameKalmanFilter() save_path = kalman_pics_path + basename(data_path) create_folder_if_absent(save_path) data_i = get_frame_GREY(data[0]) subplt_titles = ["Original Image", "Movement", "KF Original"] if with_godec: subplt_titles.extend(["With Godec", "Movement", "KF Godec"]) ims = init_comparison_plot(data_i, subplt_titles, 2, 3) M, LS, L, S, width, height = bs_godec(data) noise_remover2 = FrameKalmanFilter() elif not with_godec: ims = init_comparison_plot(data_i, subplt_titles, 1, 3) for i in tqdm(range(len(data))): data_i = get_frame_GREY(data[i]) processed = noise_remover.process_frame(data_i) movement = normalize_frame(processed - data_i) imgs = [data_i, movement, processed] if with_godec: L_frame = normalize_frame(L[:, i].reshape(width, height).T) S_frame = normalize_frame(S[:, i].reshape(width, height).T) data_i2 = cleaned_godec_img(L_frame, S_frame) processed2 = noise_remover2.process_frame(data_i2) movement2 = normalize_frame(processed2 - data_i2) imgs.extend([data_i2, movement2, processed2]) update_comparison_plot(ims, imgs) plt.savefig(save_path + "/{}.png".format(i)) #make png if savegif: gif_pics = [save_path+ "/{}.png".format(i) for i in range(len(data))] gif_path = kalman_gifs_path+basename(data_path)+"_movement.gif" write_gif(gif_pics, gif_path, 0, len(gif_pics), fps=20) optimize_size(gif_path)
def bs_pipeline(files, debug=False, save=False): """Background Subtraction Pipeline process 1. perform godec background subtraction 2. thresholding to make actual pixels representing the person to be more salient in the frame 3. Contour detection to detect centroid of person 4. naive detection / optical flow. Arguments: files {[str]} -- Array obtained from get_all_files(data_path) Keyword Arguments: debug {bool} -- [description] (default: {False}) """ M, LS, L, S, width, height = bs_godec(files) if debug: subplt_titles = ["Original", "After Godec", "Blurred", " Thresholded", "Annotated"] ims = init_comparison_plot(get_frame_GREY(files[0]), subplt_titles, 1, 5, title="Post Processing") for i in tqdm(range(len(files))): L_frame = normalize_frame(L[:, i].reshape(width, height).T) S_frame = normalize_frame(S[:, i].reshape(width, height).T) img = cleaned_godec_img(L_frame, S_frame, get_frame(files[i])) images, centriods = postprocess_img(img) images.insert(0, get_frame_GREY(files[i])) update_comparison_plot(ims, images) plt.savefig(bg_subtraction_pics_path+"{}.png".format(i))
def optical_flow_lk(files, track_length=10, detect_interval=5): print("Performing Lucas-Kanade Optical Flow") plot = get_init_heatmap_plot() # params for ShiTomasi corner detection feature_params = dict(maxCorners=4, qualityLevel=0.2, minDistance=6, blockSize=4) # Parameters for lucas kanade optical flow lk_params = dict(winSize=(3, 3), maxLevel=3, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) # Take first frame and find corners in it first_frame_gray = get_frame_GREY(files[0]) # TODO: instead of using good features to track, possibly just use contour points directly prevPts = cv.goodFeaturesToTrack(first_frame_gray, mask=None, **feature_params) color = np.random.randint(0, 255, (100, 3)) counter = 1 prevImg = first_frame_gray while counter < len(files): frame = get_frame_GREY(files[counter]) nextImg = frame.copy() update_heatmap(get_frame(files[counter]), plot) nextPts, status, err = cv.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, None, **lk_params) displacement = nextPts - prevPts if (abs(displacement) > 3).any(): print(displacement) plt.xlabel("Displacement: {}".format(displacement)) else: plt.xlabel("Displacement in x/y lower than 3 ") if nextPts is None: print("Target not moving") prevPts = cv.goodFeaturesToTrack(frame, mask=None, **feature_params) nextPts, status, err = cv.calcOpticalFlowPyrLK( prevImg, nextImg, prevPts, None, **lk_params) # Select good points # each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0. good_new = nextPts[status == 1] good_old = prevPts[status == 1] # Now update the previous frame and previous points prevImg = nextImg.copy() prevPts = good_new.reshape(-1, 1, 2) counter += 1
def static_clutter_algo(data): # data: numpy array file. Outputs the uncluttered data and backgrounds in the form of an array of numpy arrays # each representing one frame M, LS, L, S, width, height = bs_godec(data[0:2]) first_background = S[:, 0].reshape(width, height).T first_uncluttered = L[:, 1].reshape(width, height).T background = first_background # xxXX__set initialisation parameters here__XXxx m = len(data) result = [] backgrounds = [] for i in range(len(data)): if i == 2: background = generate_background_est(m, background, first_uncluttered, 0.2) elif i > 2: background = generate_background_est(m, backgrounds[i - 2], result[i - 1], 0.2) backgrounds.append(background) r = get_frame_GREY(data[i]) - background result.append(r) return result, backgrounds
def test_foreground_probability(savegif=False): subplt_titles = ["Original", "Foreground %"] ims = init_comparison_plot(get_frame_GREY(data[0]), subplt_titles, 1, 2, title="Probabilistic Foreground Detection") for i in range(len(data)): frame = get_frame(data[i]) L_frame = L[:, i].reshape(width, height).T S_frame = S[:, i].reshape(width, height).T L_probability = foreground_probability(L_frame, frame) # this condition is now abstracted as the function cleaned_godec_img() in background_subtractio.py S_probability = foreground_probability(S_frame, frame) if np.amax(L_probability) > np.amax(S_probability): probability = L_probability img = L_frame else: probability = S_probability img = S_frame images = [normalize_frame(img), normalize_frame(probability)] update_comparison_plot(ims, images) create_folder_if_absent("testpics") plt.savefig("testpics/{}.png".format(i)) if savegif: files = get_all_files("testpics") write_gif_from_pics(files, "probabilistic_foreground_detection.gif", fps=20)
def test_godec_over_multiple_iterations(frames_per_iterations=30): ims = init_comparison_plot(get_frame_GREY(files[0]), subplt_titles=[ "Original", "L_frame", "S_Frame", "Cleaned_Frame", "L_fram_%", "S_frame_%" ], num_rows=2, num_columns=3) for j in range(0, len(files), frames_per_iterations): if j + frames_per_iterations < len(files): end_index = j + frames_per_iterations else: end_index = len(files) M, LS, L, S, width, height = bs_godec(files[j:end_index], normalize=False) for i in range(i, end_index): img = get_frame_GREY(files[i]) L_frame = normalize_frame(L[:, i].reshape(width, height).T) S_frame = normalize_frame(S[:, i].reshape(width, height).T) L_probability = foreground_probability( L[:, i].reshape(width, height).T, get_frame(files[i])) S_probability = foreground_probability( S[:, i].reshape(width, height).T, get_frame(files[i])) print("L_probability") print(L_probability) print("S_probability") print(S_probability) cleaned_frame, prob = cleaned_godec_img(L_frame, S_frame, get_frame(files[i])) update_comparison_plot(ims, [ img, L_frame, S_frame, cleaned_frame, normalize_frame(L_probability), normalize_frame(S_probability) ]) create_folder_if_absent("testpics") plt.savefig("testpics/" + "{}.png".format(i))
def test_postprocess_img(f, plot=False): img = get_frame_GREY(f) images, centroids = postprocess_img(img) if plot: images.insert(0, img) subplt_titles = [ "Original", "After Godec", "Blurred", " Thresholded", "Annotated" ] ims = init_comparison_plot(img, subplt_titles, 1, 5, title="Post Processing") update_comparison_plot(ims, images) print("Centroids found are located at: ", centroids) thresholded_img = images[-2] return thresholded_img, centroids
def create_godec_input(files, normalize=True, rgb=False): i = 0 M = None frame = None for f in files: if type(f) == str: if rgb: frame = get_frame_RGB(f) elif normalize: frame = get_frame_GREY(f) else: frame = get_frame(f) else: frame = files[i] # Stack frames as column vectors F = frame.T.reshape(-1) if i == 0: M = array([F]).T else: M = column_stack((M, F)) i+=1 return M, frame
def test_compare_gaussian_blur(file): compare_gaussian_blur(get_frame_GREY(file))
def test_compare_median_blur(file): compare_median_blur(get_frame_GREY(file))
# backgrounds = [] # # for i in range(len(data)): # # if i == 2: # background = generate_background_est(m, background, first_uncluttered, 0.2) # elif i > 2: # background = generate_background_est(m, backgrounds[i - 2], result[i - 1], 0.2) # # backgrounds.append(background) # r = get_frame_GREY(data[i]) - background # print(r + background == get_frame_GREY(data[i])) # result.append(r) # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@__xxXX__TEST____XXxx@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ data = get_all_files("data/teck_walk_out_and_in") result, backgrounds = static_clutter_algo(data) plt.subplot(131) plt.imshow(get_frame_GREY(data[29])) # plt.title("Original Image %d" % i) plt.subplot(132) plt.imshow(result[29]) # plt.title("clutter removed %d" % i) plt.subplot(133) plt.imshow(backgrounds[29]) # plt.title("backgrounds %d" % i) plt.show()