Beispiel #1
0
def single_video_calculation(file, file_path, pulse_label_data):
    start_time = time.time()
    w_div = 16
    h_div = 8

    bpm_values = np.zeros((h_div, w_div), dtype='float64')

    video_frames, fps = load_video(file_path)
    video_frames = video_frames[22:310]
    frame_count, width, height = get_video_dimensions(video_frames)
    roi_width = int(width / w_div)
    roi_height = int(height / h_div)


    width = roi_width * w_div
    height = roi_height * h_div
    for x in range(0, width, roi_width):
        for y in range(0, height, roi_height):
            roi_ind_x = int(x / roi_width)
            roi_ind_y = int(y / roi_height)

            roi_time_series = video_frames[:, y:y + roi_height, x:x + roi_width]
            # Spatial Averaging used when ROIs are extracted
            time_series = np.mean(roi_time_series, axis=(1, 2))

            # Pulse-Signal Extraction
            bpm, pruned_fft, fft, heart_rates, raw, H, h, norm_channels, time_series = pos_based_method_improved(time_series, fps)
            # bpm, pruned_fft = extract_pos_based_method_improved(time_series, fps)

            plot_results(bpm, pruned_fft, fft, heart_rates, raw=raw, overlap_signal=H, pulse_signal=h, norm_channels=norm_channels, time_series=time_series)

            bpm_values[roi_ind_y, roi_ind_x] = bpm

        print("Fortschritt: %.2f %%" % ((x+1.0) / width*100.0))
def extr_single_video_calculation(in_file, in_file_path, out_dir):

    # load video
    video_frames, fps = load_video(in_file_path)
    video_frames = video_frames[22:358]
    frame_count, width, height = get_video_dimensions(video_frames)

    # Giant ndarray for pulse-signals for height*width of a Videos
    pulse_signal_data = np.zeros([height, width, 44], dtype='float64')
    # BPM-map only for visualisation
    bpm_map = np.zeros([height, width], dtype='float16')

    # For plotting the skin and pulse matrices
    last_frame = video_frames[frame_count - 1]
    last_frame_clone = last_frame.copy()
    # Load all pulse value belonging to a certain video in array
    pulse_lower, pulse_upper = get_pulse_vals_from_label_data(
        load_reference_data(), in_file)

    for x in range(0, width):
        for y in range(0, height):

            # get pixel sequence
            px_time_series = video_frames[:, y, x]

            # call POS-Function
            bpm, pruned_fft = extract_pos_based_method_improved(
                px_time_series, fps)

            # write extracted pulse frequencies to ndarray
            pulse_signal_data[y, x] = pruned_fft

            # fill up bpm-map
            bpm_map[y, x] = bpm

        print("Completed: %.2f %%" % ((x + 1.0) / width * 100.0))

    # compare bpms with reference bpms lower and upper
    weak_skin_map = compare_pulse_vals(bpm_map, pulse_lower, pulse_upper)
    # check neighbouring BPMs
    strong_skin_map = eliminate_weak_skin(weak_skin_map, skin_neighbors=5)

    # For plotting the skin and pulse matrices
    out_file_path = os.path.join(out_dir, 'no_nan_' + in_file[:-4])
    plot_title = in_file + ' BPM: ' + str(pulse_lower) + '-' + str(pulse_upper)
    plot_and_save_results(plot_title, last_frame_clone, bpm_map, weak_skin_map,
                          strong_skin_map, out_file_path)

    np.save(out_file_path, pulse_signal_data)
    print("--- File Completed after %s seconds ---" %
          (time.time() - start_time))
    print('Saved to ' + out_file_path)
Beispiel #3
0
    return roi_bpm, heart_rates, fft1, hann_window_signal, S


if __name__ == '__main__':
    start_time = time.time()

    dir_path = os.path.join('..', 'assets', 'Vid_Original',
                            'Kuenstliches_Licht')
    file = '00130.MTS'
    file_path = os.path.join(dir_path, file)
    w_div = 16
    h_div = 8
    bpm_values = np.zeros((h_div, w_div), dtype='float64')

    print(file_path)
    vid_data, fps = load_video(file_path)
    vid_data = vid_data[50:300]

    frame_count, width, height = get_video_dimensions(vid_data)
    print('Cutted length: ' + str(frame_count))
    # w_steps = width/w_div
    # h_steps = height/h_div
    roi_mean_frames = np.zeros((frame_count, w_div, h_div, 3), dtype='float64')

    #
    for j, frame in enumerate(vid_data):

        #
        # Spatial Averaging
        roi_means_2DArray, frame_devided = devide_frame_into_roi_means(
            frame, w_div, h_div)
Beispiel #4
0
if __name__ == '__main__':

    face_cascade_path = os.path.join('C:/', 'Anaconda3', 'pkgs',
                                     'opencv3-3.1.0-py35_0', 'Library', 'etc',
                                     'haarcascades',
                                     'haarcascade_frontalface_default.xml')
    dir_path = os.path.join('assets', 'Vid_Original')
    file = '00112.MTS'
    file_path = os.path.join(dir_path, file)

    window_numbers = 6
    window_size = 40
    frame_count = window_numbers * window_size + 1

    video_frames, fps = load_video(file_path)
    video_frames = video_frames[1:frame_count]
    print('Reduced Frame Count: ' + str(len(video_frames)))

    face_cascade = cv2.CascadeClassifier(face_cascade_path)

    # Create time series array of the roi means
    viola_roi_sequence = []

    for j, frame in enumerate(video_frames):

        roi_face = viola(frame)
        viola_roi_sequence.append(roi_face)
        # blurred_roi = cv2.blur(roi_face, (5, 5))

        # cv2.imshow('blurred_roi', roi_face)
def extr_roi_single_video_calculation(in_file, in_file_path, out_dir):

    # determine how to devide frame into rois
    w_div = 16
    h_div = 8

    # load video
    video_frames, fps = load_video(in_file_path)
    video_frames = video_frames[22:358]
    frame_count, width, height = get_video_dimensions(video_frames)
    w_steps = int(width / w_div)
    h_steps = int(height / h_div)

    # Giant ndarray for pulse-signals for height*width of a Videos
    pulse_signal_data = np.zeros([h_div, w_div, 44], dtype='float64')
    # BPM-map only for visualisation
    bpm_map = np.zeros((h_div, w_div), dtype='float64')

    # Load all pulse values belonging to a certain video in array
    pulse_lower, pulse_upper = get_pulse_vals_from_label_data(
        load_reference_data(), in_file)

    # For plotting the skin and pulse matrices
    plot_title = file + ' BPM: ' + str(pulse_lower) + '-' + str(pulse_upper)
    fig = plt.figure(figsize=(20, 15))
    fig.suptitle(plot_title, fontsize=20, fontweight='bold')
    tick_fontsize = 11
    txt_coord_x = 0.05
    txt_coord_y = 0.9
    txt_fontsize = 21

    sub1 = fig.add_subplot(221)
    sub2 = fig.add_subplot(222)
    sub3 = fig.add_subplot(223)
    sub4 = fig.add_subplot(224)

    last_frame = video_frames[frame_count - 1]
    last_frame_clone = last_frame.copy()

    # The odd rest is cutted here
    width = w_steps * w_div
    height = h_steps * h_div
    for x in range(0, width, w_steps):
        for y in range(0, height, h_steps):
            roi_ind_x = int(x / w_steps)
            roi_ind_y = int(y / h_steps)

            # get roi volumes
            roi_time_series = video_frames[:, y:y + h_steps, x:x + w_steps]
            # Spatial Averaging
            roi_time_series_avg = np.mean(roi_time_series, axis=(1, 2))

            # call POS-Function
            bpm, pruned_fft = extract_pos_based_method_improved(
                roi_time_series_avg, fps)

            # fill up bpm-map
            bpm_map[roi_ind_y, roi_ind_x] = bpm

            # write extracted pulse frequencies to ndarray
            pulse_signal_data[roi_ind_y, roi_ind_x] = pruned_fft

            # For plotting the skin and pulse matrices
            sub1.text(x + w_steps / 2,
                      y + h_steps / 2,
                      round(bpm, 1),
                      color=(0.0, 0.0, 0.0),
                      fontsize=7,
                      va='center',
                      ha='center')
            cv2.rectangle(last_frame_clone, (x, y), (x + w_steps, y + h_steps),
                          (0, 0, 0), 2)

        print("Fortschritt: %.2f %%" % ((x + 1.0) / width * 100.0))

    # compare bpms with reference bpms lower and upper
    weak_skin_map = compare_pulse_vals(bpm_map, pulse_lower, pulse_upper)
    # check neighbouring BPMs
    strong_skin_map = eliminate_weak_skin(weak_skin_map, skin_neighbors=3)

    # For plotting the skin and pulse matrices
    out_file_path = os.path.join(out_dir, 'me_' + in_file[:-4])
    bgr_last_frame = cv2.cvtColor(last_frame_clone, cv2.COLOR_RGB2BGR)

    sub1.text(txt_coord_x,
              txt_coord_y,
              '(a)',
              color='white',
              fontsize=txt_fontsize,
              horizontalalignment='center',
              transform=sub1.transAxes)
    sub1.tick_params(axis='both', which='major', labelsize=tick_fontsize)
    sub1.imshow(bgr_last_frame)
    sub2.text(txt_coord_x,
              txt_coord_y,
              '(b)',
              color='white',
              fontsize=txt_fontsize,
              horizontalalignment='center',
              transform=sub2.transAxes)
    sub2.tick_params(axis='both', which='major', labelsize=tick_fontsize)
    sub2.matshow(bpm_map, cmap=plt.cm.gray)
    sub3.text(txt_coord_x,
              txt_coord_y,
              '(c)',
              color='white',
              fontsize=txt_fontsize,
              horizontalalignment='center',
              transform=sub3.transAxes)
    sub3.tick_params(axis='both', which='major', labelsize=tick_fontsize)
    sub3.matshow(weak_skin_map, cmap=plt.cm.gray)
    sub4.text(txt_coord_x,
              txt_coord_y,
              '(d)',
              color='white',
              fontsize=txt_fontsize,
              horizontalalignment='center',
              transform=sub4.transAxes)
    sub4.tick_params(axis='both', which='major', labelsize=tick_fontsize)
    sub4.matshow(strong_skin_map, cmap=plt.cm.gray)

    plt.tight_layout()
    # plt.show()
    fig.savefig(out_file_path + '.png')
    plt.close()

    # tp, fp, fn and tn of POS-Algorithm of one video
    # is summed up with a global variable for all videos
    vid_true_positives, vid_false_positives, vid_false_negatives, vid_true_negatives = compare_with_skin_mask(
        file, weak_skin_map, h_div, w_div)
    global true_positives
    global false_positives
    global false_negatives
    global true_negatives
    true_positives += vid_true_positives
    false_positives += vid_false_positives
    false_negatives += vid_false_negatives
    true_negatives += vid_true_negatives

    print("--- File Completed after %s seconds ---" %
          (time.time() - start_time))
    np.save(out_file_path, pulse_signal_data)
    print('Saved to ' + out_file_path)
Beispiel #6
0
def skin_detection_algorithm_single_video(_file,
                                          _dir_path,
                                          _dest_folder,
                                          show_figure=False):

    file_path = os.path.join(_dir_path, _file)

    # define the upper and lower boundaries of the HSV pixel
    # intensities to be considered 'skin'

    # white skin-tone
    lower = np.array([0, 50, 50], dtype="uint8")
    upper = np.array([25, 255, 255], dtype="uint8")

    # # colored skin-tone 00128
    # lower = np.array([0, 80, 30], dtype="uint8")
    # upper = np.array([15, 255, 180], dtype="uint8")

    video_frames, fps = load_video(file_path)
    frame_count, width, height = get_video_dimensions(video_frames)

    video_frames = video_frames[22:50]

    skin_arr = np.ones([height, width])

    max_vals = []
    min_vals = []

    # keep looping over the frames in the video
    for i, frame in enumerate(video_frames):

        # To check which HSV values are needed for a certain person
        # cv2.rectangle(frame, (850, 530), (860, 550), (0, 255, 0), 2)
        # cv2.rectangle(frame, (600, 1000), (860, 1080), (0, 255, 0), 2)
        # frame[1000:1080, 600:860]
        rect_hsv = cv2.cvtColor(frame[230:250, 750:760], cv2.COLOR_BGR2HSV)
        # print(rect_hsv)q
        max_in_rect = np.amax(rect_hsv, axis=(0, 1))
        min_in_rect = np.amin(rect_hsv, axis=(0, 1))
        # print(np.mean(rect_hsv, axis=(0, 1)))

        max_vals.append(max_in_rect)
        min_vals.append(min_in_rect)

        # resize the frame, convert it to the HSV color space,
        # and determine the HSV pixel intensities that fall into
        # the speicifed upper and lower boundaries
        converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        skin_mask = cv2.inRange(converted, lower, upper)

        # apply a series of erosions and dilations to the mask
        # using an elliptical kernel
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
        skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
        skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)

        # blur the mask to help remove noise, then apply the
        # mask to the frame
        skin_mask = cv2.GaussianBlur(skin_mask, (3, 3), 0)
        skin = cv2.bitwise_and(frame, frame, mask=skin_mask)

        # show the skin in the image along with the mask
        cv2.imshow("images", np.hstack([frame, skin]))
        # cv2.imshow("images", skin)

        # if the 'q' key is pressed, stop the loop
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

        # Reduce by 1 if pixel is not a skin pixel
        mean_skin = np.mean(skin, axis=2)
        low_values_indices = mean_skin < 1
        skin_arr[low_values_indices] -= 1

    # Where values are lower than threshold
    final_mask = np.ones([height, width])
    low_values_indices = skin_arr < -2
    final_mask[low_values_indices] = 0

    fig = plt.figure(figsize=(17, 9))
    sub1 = fig.add_subplot(111)
    sub1.set_title('Norm. Avg.')
    sub1.imshow(final_mask, cmap=plt.cm.gray)

    file_path_out = os.path.join(_dest_folder, 'Skin_' + _file[:-4])

    fig.savefig(file_path_out + '.jpg')
    if show_figure:
        plt.show()

    # Save it as .npy file
    # np.save(file_path_out, final_mask)
    print('Saved to ' + file_path_out)

    # cleanup the camera and close any open windows
    cv2.destroyAllWindows()
Beispiel #7
0
def normalize_frames(stddev_of_frames, mean, frame):

    frame = frame / mean

    return frame


# def calculate_chrom():


if __name__ == '__main__':

    filename = 'assets\\output_1.1.mp4'

    vid_frames, fps, width, height = load_video(filename)

    blue_vals = []
    green_vals = []
    red_vals = []

    mean_of_frames = calculate_mean_over_interval(vid_frames)
    print("Mean: " + str(mean_of_frames[50, 145]))
    stddev_of_frames = calculate_std_deviation_over_interval(vid_frames)
    print(stddev_of_frames)

    for frame in vid_frames:

        # red, green, blue = split_into_rgb_channels(frame)
        # green_image = np.zeros((green.shape[0], green.shape[1], 3), dtype=green.dtype)
        # green_image[:, :, 1] = green
import numpy as np
import cv2

from matplotlib import pyplot as plt
from Video_Tools import load_video
from Video_Tools import get_frames_dimension

start_time = time.time()

dir = 'assets\\Videos Original'
file = '00101.mts'

path = dir + '\\' + file
print(path)

vid_data, fps = load_video(path)

L, width, height = get_frames_dimension(vid_data)
print('Frames: ' + str(L))

intensity_frames = np.zeros((L, height, width, 1), dtype='float64')
intensity_array = []

i = 1
for frame in vid_data:

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray_frame_nxt = cv2.cvtColor(vid_data[i], cv2.COLOR_BGR2GRAY)

    diff = gray_frame - gray_frame_nxt
    intensity_between_frames = np.sum(np.abs(diff)) / (width * height)
Beispiel #9
0
    bound_low = (np.abs(frequencies - freq_min)).argmin()
    bound_high = (np.abs(frequencies - freq_max)).argmin()
    fft[:bound_low] = 0
    fft[bound_high:-bound_high] = 0
    fft[-bound_low:] = 0

    result = np.ndarray(shape=data.shape, dtype='float')
    result[:] = scipy.fftpack.ifft(fft, axis=0)
    result *= amplification_factor
    return result


if __name__ == '__main__':

    filename = 'assets\\output_1.mp4'
    vid_data, fps = load_video(filename)
    """Graph the average value of the video as well as the frequency strength"""
    averages = []

    height, width = vid_data.shape[1:3]
    for frame in vid_data:

        gray_frames = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blured_frame = cv2.blur(gray_frames, (5, 5))
        center = (int(width / 2), int(height / 2))
        print(center)
        cv2.circle(blured_frame, (0, 0), 1, (0, 0, 255), -1)
        cv2.imshow("blured_frame", blured_frame)
        bandpassed = temporal_bandpass_filter(blured_frame, fps)
        averages.append(bandpassed[(0, 0)])
Beispiel #10
0
    cv2.rectangle(frame, (x, y), (x + roi_width, y + roi_height), (0, 255, 0),
                  1)

    return roi, frame


if __name__ == '__main__':

    filename = '00073.MTS'
    out_filename = 'new_00073.avi'

    input_path = os.path.join('assets', filename)
    output_path = os.path.join('assets', out_filename)
    # path = "assets\\ROIs\\Original\\" + filename

    vid_frames, fps = load_video(input_path)
    fourcc = cv2.VideoWriter_fourcc('L', 'A', 'G', 'S')
    out = cv2.VideoWriter(output_path, fourcc, fps, (1080, 1920))

    # zum Verkürzen des Videos
    # [x:] = remove from beginning
    cutted_frames = vid_frames[400:808]

    for frame in cutted_frames:

        # # Show results
        # cv2.imshow('Frames', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break