def optical_flow_dense(files):
    # Perform Godec first on all frames
    M, LS, L, S, width, height = bs_godec(files)
    first_frame = get_frame(files[0])

    # frames to be compared is after godec and postprocessing
    godec_frame, probability = get_godec_frame(M, L, S, width, height, 0)
    img, centroids = postprocess_img(godec_frame, all_images=False)
    prev_gray = img
    ims = init_comparison_plot(first_frame,
                               ["Original", "Thresholded", "FlowS"], 1, 3)
    test = cv.cvtColor(first_frame.astype("uint8"), cv.COLOR_GRAY2BGR)
    hsv_mask = np.zeros_like(test)
    hsv_mask[..., 1] = 255
    window_name = "Dense Optical Flow"

    counter = 1

    while counter < len(files):
        print(counter)
        godec_frame, probability = get_godec_frame(M, L, S, width, height,
                                                   counter)
        img, centroids = postprocess_img(godec_frame, all_images=False)
        next_gray = img
        flow = cv.calcOpticalFlowFarneback(prev_gray,
                                           next_gray,
                                           None,
                                           pyr_scale=0.5,
                                           levels=5,
                                           winsize=11,
                                           iterations=5,
                                           poly_n=5,
                                           poly_sigma=1.1,
                                           flags=0)
        magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
        hsv_mask[
            ...,
            0] = angle * 180 / np.pi / 2  # Set image hue according to the optical flow direction
        hsv_mask[..., 2] = cv.normalize(
            magnitude, None, 0, 255, cv.NORM_MINMAX
        )  # Set image value according to the optical flow magnitude (normalized)

        # plotting of grayscale flowmap and data heatmap
        update_comparison_plot(
            ims, [get_frame(files[counter]), next_gray, hsv_mask])
        plt.title("Max Magnitude :" + str(np.amax(magnitude)) +
                  "\nMax Angle:" + str(np.amax(angle)))
        create_folder_if_absent("optical_flow_pics")
        plt.savefig("optical_flow_pics/{}.png".format(counter))
        prev_gray = next_gray
        k = cv.waitKey(30) & 0xff
        counter += 1
Example #2
0
def test_foreground_probability(savegif=False):

    subplt_titles = ["Original", "Foreground %"]
    ims = init_comparison_plot(get_frame_GREY(data[0]),
                               subplt_titles,
                               1,
                               2,
                               title="Probabilistic Foreground Detection")

    for i in range(len(data)):
        frame = get_frame(data[i])
        L_frame = L[:, i].reshape(width, height).T
        S_frame = S[:, i].reshape(width, height).T
        L_probability = foreground_probability(L_frame, frame)

        # this condition is now abstracted as the function cleaned_godec_img() in background_subtractio.py
        S_probability = foreground_probability(S_frame, frame)
        if np.amax(L_probability) > np.amax(S_probability):
            probability = L_probability
            img = L_frame
        else:
            probability = S_probability
            img = S_frame

        images = [normalize_frame(img), normalize_frame(probability)]
        update_comparison_plot(ims, images)
        create_folder_if_absent("testpics")
        plt.savefig("testpics/{}.png".format(i))

    if savegif:
        files = get_all_files("testpics")
        write_gif_from_pics(files,
                            "probabilistic_foreground_detection.gif",
                            fps=20)
def bs_pipeline(files, debug=False, save=False):
    """Background Subtraction Pipeline process
    1. perform godec background subtraction
    2. thresholding to make actual pixels representing the person to be more salient in the frame
    3. Contour detection to detect centroid of person 
    4. naive detection / optical flow. 

    Arguments:
        files {[str]} -- Array obtained from get_all_files(data_path)

    Keyword Arguments:
        debug {bool} -- [description] (default: {False})
    """

    M, LS, L, S, width, height = bs_godec(files)
    
    if debug:
        subplt_titles = ["Original", "After Godec", "Blurred", " Thresholded", "Annotated"]
        ims = init_comparison_plot(get_frame_GREY(files[0]), subplt_titles, 1, 5, title="Post Processing")
        
        for i in tqdm(range(len(files))):
            L_frame = normalize_frame(L[:, i].reshape(width, height).T)
            S_frame = normalize_frame(S[:, i].reshape(width, height).T)
            img = cleaned_godec_img(L_frame, S_frame, get_frame(files[i]))
            images, centriods = postprocess_img(img)
            images.insert(0, get_frame_GREY(files[i]))
            update_comparison_plot(ims, images)
            plt.savefig(bg_subtraction_pics_path+"{}.png".format(i))
Example #4
0
def test_frame_filter(savegif=False):
    noise_remover = FrameKalmanFilter()
    save_path = kalman_pics_path + basename(data_path)
    create_folder_if_absent(save_path)
    data_i = get_frame(data[0])
    subplt_titles = ["Original Image", "noise removed"]
    ims, axs, fig = init_noise_reduction_plot(data_i, subplt_titles)
    for i in tqdm(range(len(data))):
        data_i = get_frame(data[i])
        processed = noise_remover.process_frame(data_i)
        update_noise_reduction_plot(ims, [data_i, processed])
        plt.savefig(save_path + "/{}.png".format(i)) #make png
    if savegif:
        gif_pics = [save_path+ "/{}.png".format(i) for i in range(len(data))]
        gif_path = kalman_gifs_path+basename(data_path)+".gif"
        write_gif(gif_pics, gif_path, 0, len(gif_pics), fps=30)
        optimize_size(gif_path)
Example #5
0
def test_pixel_filter():
    noise_remover = PixelKalmanFilter()
    temp_over_time = [get_frame(data[i])[0][0] for i in range(len(data))]
    temp_over_time_2 = copy.copy(temp_over_time)
    filtered = [np.array(noise_remover.filter(temp_over_time[i]))[0][0] for i in range(len(data))]

    plt.plot(np.arange(0,255,1), temp_over_time_2,  "r--", np.arange(0,255,1), filtered, "bs")
    plt.show()
Example #6
0
def test_frame_filter_by_specific_pixel():
    temp_over_time = []
    noise_remover = FrameKalmanFilter()
    for i in tqdm(range(len(data))):
        processed = noise_remover.process_frame(get_frame(data[i]))
        temp_over_time.append(copy.copy(processed))  # ADD THE COPY.COPY(). IF YOU DONT,IT APPENDS THE REFERENCE SO YOUR WHOLE ARRAY IS THE SAME
        pixel_over_time = []
    for i in tqdm(range(len(data))):
        frame = get_frame(data[i])  # measurement data
        pixel_over_time.append(frame[20][20])  # adds the values of a specific pixel over time to pixel_over_time

    filtered_over_time = []
    for i in tqdm(range(len(data))):
        frame = temp_over_time[i]  # filtered data
        filtered_over_time.append(frame[20][20])  # adds the values of a specific pixel over time to filtered_over_time

    plt.plot(np.arange(0,255,1), pixel_over_time,  "r--", np.arange(0,255,1), filtered_over_time, "bs")
    plt.show()
def optical_flow_lk(files, track_length=10, detect_interval=5):
    print("Performing Lucas-Kanade Optical Flow")
    plot = get_init_heatmap_plot()

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=4,
                          qualityLevel=0.2,
                          minDistance=6,
                          blockSize=4)
    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(3, 3),
                     maxLevel=3,
                     criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Take first frame and find corners in it
    first_frame_gray = get_frame_GREY(files[0])
    # TODO: instead of using good features to track, possibly just use contour points directly
    prevPts = cv.goodFeaturesToTrack(first_frame_gray,
                                     mask=None,
                                     **feature_params)
    color = np.random.randint(0, 255, (100, 3))
    counter = 1
    prevImg = first_frame_gray
    while counter < len(files):
        frame = get_frame_GREY(files[counter])
        nextImg = frame.copy()
        update_heatmap(get_frame(files[counter]), plot)
        nextPts, status, err = cv.calcOpticalFlowPyrLK(prevImg, nextImg,
                                                       prevPts, None,
                                                       **lk_params)
        displacement = nextPts - prevPts
        if (abs(displacement) > 3).any():
            print(displacement)
            plt.xlabel("Displacement: {}".format(displacement))
        else:
            plt.xlabel("Displacement in x/y lower than 3 ")
        if nextPts is None:
            print("Target not moving")
            prevPts = cv.goodFeaturesToTrack(frame,
                                             mask=None,
                                             **feature_params)
            nextPts, status, err = cv.calcOpticalFlowPyrLK(
                prevImg, nextImg, prevPts, None, **lk_params)

        # Select good points
        # each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0.
        good_new = nextPts[status == 1]
        good_old = prevPts[status == 1]

        # Now update the previous frame and previous points
        prevImg = nextImg.copy()
        prevPts = good_new.reshape(-1, 1, 2)
        counter += 1
def test_plot(files):
    array_shape = (24, 32)
    min_value = 30
    max_value = 40
    plot = init_heatmap("Heatmap",
                        array_shape,
                        min_value,
                        max_value,
                        debug=True)
    for f in files:
        frame = get_frame(f)
        update_heatmap(frame, plot)
Example #9
0
def test_bs_godec_trained_noise(noise_path, gif_name, preview):
    """Test bs_godec Implementation with trained noise

    Keyword Arguments:
        noise_path {string}
        gif_name {string}
        preview {boolean}
    ---
    This test shows you how to run the bs_godec method.
    """
    N = get_frame(noise_path)
    M, R = bs_godec_trained(files, N)
    plot_bs_results(M, N, R, bs_pics_path, preview=preview)
def test_plot_without_labels(files):
    array_shape = (24, 32)
    min_value = 25
    max_value = 40
    plot = init_heatmap("Heatmap",
                        array_shape,
                        min_value,
                        max_value,
                        debug=False)
    for i in range(len(files)):
        frame = get_frame(files[i])
        update_heatmap(frame, plot)
        create_folder_if_absent("testpics")
        plt.savefig("testpics/{}.png".format(i))
Example #11
0
def test_godec_over_multiple_iterations(frames_per_iterations=30):
    ims = init_comparison_plot(get_frame_GREY(files[0]),
                               subplt_titles=[
                                   "Original", "L_frame", "S_Frame",
                                   "Cleaned_Frame", "L_fram_%", "S_frame_%"
                               ],
                               num_rows=2,
                               num_columns=3)
    for j in range(0, len(files), frames_per_iterations):
        if j + frames_per_iterations < len(files):
            end_index = j + frames_per_iterations
        else:
            end_index = len(files)
        M, LS, L, S, width, height = bs_godec(files[j:end_index],
                                              normalize=False)

        for i in range(i, end_index):
            img = get_frame_GREY(files[i])
            L_frame = normalize_frame(L[:, i].reshape(width, height).T)
            S_frame = normalize_frame(S[:, i].reshape(width, height).T)
            L_probability = foreground_probability(
                L[:, i].reshape(width, height).T, get_frame(files[i]))
            S_probability = foreground_probability(
                S[:, i].reshape(width, height).T, get_frame(files[i]))
            print("L_probability")
            print(L_probability)
            print("S_probability")
            print(S_probability)
            cleaned_frame, prob = cleaned_godec_img(L_frame, S_frame,
                                                    get_frame(files[i]))
            update_comparison_plot(ims, [
                img, L_frame, S_frame, cleaned_frame,
                normalize_frame(L_probability),
                normalize_frame(S_probability)
            ])
            create_folder_if_absent("testpics")
            plt.savefig("testpics/" + "{}.png".format(i))
Example #12
0
def write_gif_from_npy(files, name, start=0, end=0, fps=1):
    print("Plotting from {} numpy files and writing gif of {}...".format(
        len(files), fps))
    plot = init_heatmap(name, show=False)
    end = end or len(files)
    with imageio.get_writer(name, mode='I', fps=fps) as writer:
        for i in tqdm(range(start, end)):
            f = get_frame(files[i])
            update_heatmap(f, plot)
            pic_name = "pics/" + files[i] + ".png"
            plt.savefig(pic_name)
            writer.append_data(imageio.imread(pic_name))
    writer.close()
    print("Finished writing gif at {}.".format(name))
    optimize_size(name)
def analyze_by_period(files, num_frames=60 * 30):
    time_person_spent_in_areas = defaultdict(int)
    counter = 0
    while counter < num_frames:
        frame = get_frame(files[counter])
        areas_person_is_in = naive_binary_likelihood_by_frame(frame)
        for area in areas_person_is_in:
            if areas_person_is_in[area]:
                time_person_spent_in_areas[area] += 1
        counter += 1
    total_time_spent_in_room = sum(time_person_spent_in_areas.values())
    time_person_spent_in_areas = {
        i: time_person_spent_in_areas[i] / total_time_spent_in_room
        for i in range(8)
    }
    return time_person_spent_in_areas
def naive_detection_from_files(data_path, startIndex=None, endIndex=None):
    heatmap_plot = get_init_heatmap_plot()
    likelihood_plot = get_init_likelihood_plot()
    files = get_all_files(data_path)
    if startIndex == None:
        startIndex = 0
    if endIndex == None:
        endIndex = len(files)
    print(startIndex, endIndex)
    for i in range(startIndex, endIndex):
        frame = get_frame(files[i])
        areas_person_is_in = naive_detection_by_frame(frame)
        likelihood_array = [[
            areas_person_is_in[i]["likelihood"] for i in range(4)
        ], [areas_person_is_in[i]["likelihood"] for i in range(4, 8)]]

        # if debugging with plot view
        update_heatmap(frame, heatmap_plot)
        update_heatmap(likelihood_array, likelihood_plot)
def create_godec_input(files, normalize=True, rgb=False):
    i = 0
    M = None
    frame = None
    for f in files:
        if type(f) == str:
            if rgb:
                frame = get_frame_RGB(f)
            elif normalize:
                frame = get_frame_GREY(f)
            else:
                frame = get_frame(f)
        else:
            frame = files[i]
        # Stack frames as column vectors
        F = frame.T.reshape(-1)
        
        if i == 0:
            M = array([F]).T
        else:
            M = column_stack((M, F))
        i+=1

    return M, frame
def get_centroid_area_history(files, debug=True, key_format="simple"):
    """
    Primary function to be called for obtaining history in the following format:
    {
        0: {
            0: ...,
            1: ...,
        },
        1: { ... }
    }
    Arguments:
        files {[str]} -- up to 30 mins of files, since we decided that recalibration of godec should be done every 30 mins
    """
    annotated_images = []
    centroid_history = []
    M, LS, L, S, width, height = bs_godec(files)

    for i in range(len(files)):
        img = get_frame_GREY(files[i])
        L_frame = normalize_frame(L[:, i].reshape(width, height).T)
        S_frame = normalize_frame(S[:, i].reshape(width, height).T)
        img = cleaned_godec_img(L_frame, S_frame, get_frame(files[i]))
        images, centroids = postprocess_img(img)

        annotated_img = images[-1]
        annotated_images.append(annotated_img)

        append_centroid_history(centroids, i, centroid_history)

    interpolated_centroid_history = Interpolator(centroid_history).history
    centroid_area_array = [
        get_centroid_area_number(cnt) for cnt in interpolated_centroid_history
    ]
    area_counter = Counter(centroid_area_array)

    for i in range(8):
        if i not in area_counter:
            area_counter[i] = 0

    if key_format == "simple":
        area_movement_counter = Counter()

        for i in range(len(centroid_area_array) - 1):
            from_area = str(centroid_area_array[i])
            to_area = str(centroid_area_array[i + 1])
            label = from_area + "→" + to_area
            area_movement_counter[label] += 1

    elif key_format == "from_to":
        area_movement_counter = {"None": Counter()}
        for i in range(8):
            area_movement_counter[str(i)] = Counter()

        for i in range(len(centroid_area_array) - 1):
            from_area = str(centroid_area_array[i])
            to_area = str(centroid_area_array[i + 1])
            area_movement_counter[from_area][to_area] += 1

    if debug:
        return area_counter, area_movement_counter, centroid_area_array, annotated_images
    return area_movement_counter