Esempio n. 1
0
def dct_blur_segmentation(in_dir, block_size, blur_kernel=None):
    """
    todo:   + make num_planes_to_keep a parameter, optimize over it
            + make multi-scale: incorporate info from multiple block sizes; see if it leads to performance improvement
            + make diff_blurmaps threshold a parameter; optimize over it to see if it leads to performance improvement
    """
    if isinstance(block_size, int):
        block_size_tuple = (block_size, block_size)
    else:
        block_size_tuple = block_size

    if isinstance(blur_kernel, int):
        blur_kernel = (blur_kernel, blur_kernel)

    files = list(sorted(os.listdir(in_dir)))
    filenames = list(sorted([os.path.join(in_dir, f) for f in files]))

    img0 = lm(cv2.imread(filenames[0]))
    if blur_kernel:
        img0 = cv2.blur(img0, blur_kernel)
    mask_zeros = np.zeros((int(img0.shape[0] / 3) + 12, img0.shape[1]))
    mask_ones = np.ones(
        (img0.shape[0] - 12 - int(img0.shape[0] / 3), img0.shape[1]))
    mask = np.vstack((mask_zeros, mask_ones)).astype(int)
    # img0 = slice_bit_planes(img=img0, num_planes_to_keep=5)
    blurmap0 = stretch_histogram(
        dct_motion_blur_detection(img=img0, block_size=block_size))
    for j, f in tqdm(enumerate(filenames[1:150])):
        img1 = lm(cv2.imread(f))
        if blur_kernel:
            img1 = cv2.blur(img1, blur_kernel)
        img_stacked = np.dstack((img1, img1, img1))
        # img1 = slice_bit_planes(img=img1, num_planes_to_keep=5)
        blurmap1 = stretch_histogram(
            dct_motion_blur_detection(img=img1, block_size=block_size))
        diff_blurmaps = blurmap1 - blurmap0
        diff_blurmaps = np.where(diff_blurmaps > 20, 255, 0)
        diff_blurmaps = closing(diff_blurmaps, selem=np.ones((21, 21)))
        # diff_blurmaps = closing(diff_blurmaps, selem=np.ones((15,15)))
        # diff_blurmaps = dilation(diff_blurmaps, selem=np.ones((11,11)))
        # diff_blurmaps = closing(diff_blurmaps, selem=np.ones((11,11)))
        # diff_blurmaps = closing(diff_blurmaps, selem=np.ones((17,17)))
        diff_blurmaps = opening(diff_blurmaps, selem=np.ones((19, 19)))
        # diff_blurmaps = dilation(diff_blurmaps, selem=np.ones((11,11)))
        diff_blurmaps = closing(diff_blurmaps, selem=np.ones((27, 27)))

        diff_blurmaps *= mask

        diff_blurmaps = np.dstack(
            (diff_blurmaps, np.zeros(diff_blurmaps.shape),
             np.zeros(diff_blurmaps.shape)))
        # diff_blurmaps = np.where(diff_blurmaps > 0, 255, img_stacked)
        # mask_diff = np.where(diff_blurmaps > 0, 255, 0)

        cv2.imwrite('../output_data/final_systems/dct_blur/' + files[j + 1],
                    diff_blurmaps)
        cv2.imwrite(
            '../output_data/final_systems/masks/dct_blur/' + files[j + 1],
            mask_diff)
        img0, blurmap0 = img1, blurmap1
Esempio n. 2
0
def main(img_file='../input_data/tunnels/tunnel_1.png', out_dir='../output_data/frequency_domain/original_tunnel/'):
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    img = cv2.imread(img_file)
    
    # this method will automatically convert `img` to grayscale. set param `make_gray` = `False` if you don't want this
    dft_img = to_frequency_domain(img)
    dft_shift = np.fft.fftshift(dft_img)
    magnitude = 20 * np.log(np.abs(dft_shift.real) + 1)
    phase = dft_shift.imag

    #dims = int(img.shape[0] / 8), int(img.shape[1] / 8)
    dims = 5
    if isinstance(dims, int):
        dims = (dims, dims)

    start_x = int((dft_img.shape[0] - dims[0]) / 2)
    start_y = int((dft_img.shape[1] - dims[1]) / 2)

    # highpass filtering

    highpass_kernel = construct_highpass_filter(dims, dft_img.shape, start_x, start_y)
    highpass_fft_filtered = dft_shift * highpass_kernel
    highpass_fft_filtered_magnitude = highpass_fft_filtered.real
    highpass_fft_filtered_phase = highpass_fft_filtered.imag

    highpass_filtered = np.fft.ifft2(highpass_fft_filtered)
    # get magnitude
    highpass_filtered = np.abs(highpass_filtered.real)
    print(highpass_filtered)

    # lowpass filtering

    lowpass_kernel = construct_lowpass_filter(dims, dft_img.shape, start_x, start_y)
    lowpass_fft_filtered = dft_shift * lowpass_kernel
    lowpass_fft_filtered_magnitude = lowpass_fft_filtered.real
    lowpass_fft_filtered_phase = lowpass_fft_filtered.imag

    lowpass_filtered = np.fft.ifft2(lowpass_fft_filtered)
    # get magnitude
    lowpass_filtered = np.abs(lowpass_filtered.real)
    print(lowpass_filtered)
    
    cv2.imwrite(os.path.join(out_dir, 'highpass_fft_filtered_magnitude.png'), np.abs(highpass_fft_filtered_magnitude))
    cv2.imwrite(os.path.join(out_dir, 'lowpass_fft_filtered_magnitude.png'), np.abs(lowpass_fft_filtered_magnitude))
    cv2.imwrite(os.path.join(out_dir, 'highpass_fft_filtered_phase.png'), np.abs(highpass_fft_filtered_phase))
    cv2.imwrite(os.path.join(out_dir, 'lowpass_fft_filtered_phase.png'), np.abs(lowpass_fft_filtered_phase))
    cv2.imwrite(os.path.join(out_dir, 'lowpass_filter.png'), stretch_histogram(lowpass_kernel))
    cv2.imwrite(os.path.join(out_dir, 'highpass_filter.png'), stretch_histogram(highpass_kernel))
    cv2.imwrite(os.path.join(out_dir, 'magnitude.png'), magnitude)
    cv2.imwrite(os.path.join(out_dir, 'phase.png'), phase)
    cv2.imwrite(os.path.join(out_dir, 'highpass_filtered.png'), stretch_histogram(highpass_filtered))
    cv2.imwrite(os.path.join(out_dir, 'lowpass_filtered.png'), stretch_histogram(lowpass_filtered))
Esempio n. 3
0
def _prepare_frame_segment(seed_img,
                           threshold=20,
                           filename=None,
                           img=None,
                           blur=None,
                           as_numeric=True,
                           stretched=True):
    if filename:
        img = lm(cv2.imread(filename))
    elif img is not None:
        if len(img.shape) == 3:
            img = lm(img)
    else:
        raise Exception(
            'Must specify either a path to the image file or a numpy-array image; got neither'
        )

    if blur:
        if isinstance(blur, int):
            blur = (blur, blur)
        img = cv2.blur(img, blur)
        #seed_img = cv2.blur(seed_img, blur)

    img = slice_bit_planes(img, 4)
    seed_img = slice_bit_planes(seed_img, 4)
    diff_img = np.abs(img - seed_img)
    segmented = diff_img > threshold
    if as_numeric:
        segmented = segmented.astype(int)
        if stretched:
            segmented = stretch_histogram(segmented)
    return segmented
Esempio n. 4
0
def stretch_histograms(dir='../output_data/denoised/'):
    for f in os.listdir(dir):
        file_name = os.path.join(dir, f)
        print(file_name)
        img = cv2.imread(file_name)
        img = color_to_gray_operations.luminosity_method(img)

        stretched = histogram_processing.stretch_histogram(img)
        cv2.imwrite('../output_data/denoised_stretched/' + f, stretched)
Esempio n. 5
0
def test_abstract_painting():
    out_dir = '../output_data/abstract_painting_nopadding/'
    img = lm(cv2.imread(config['image_paths']['original']))
    start_num = 1
    for t in tqdm(range(11, 151, 2)):
        #padded_counter = pad_int(t, 3)
        thresholded = threshold_niblack(img, t)
        segmented = (img < thresholded).astype(int)
        segmented = stretch_histogram(segmented)
        cv2.imwrite(os.path.join(out_dir, str(start_num) + '.jpg'), segmented)
        start_num += 1
Esempio n. 6
0
def get_binary_segmentation(img, threshold, numeric=True, stretch=True):
    segmented = img >= threshold

    if numeric:
        segmented = segmented.astype(int)

    if stretch:
        segmented = stretch_histogram(segmented)

    print(segmented)
    return segmented
Esempio n. 7
0
def bi_gamma_correction(img_file, thresh=180, lower_g=1.0, higher_g=1.0):
    img = luminosity_method(cv2.imread(img_file))
    #img = gaussian(img, sigma=2)
    new_img = np.where(img < thresh, img, 2 * (img / np.log(img + 1)))
    hist, bins = histogram_processing.compute_image_histogram(img)

    new_img = histogram_processing.stretch_histogram(new_img)

    histogram_processing.plot_histogram(
        img, out_file='../output_data/test_output/tunnel_hist_orig.png')
    histogram_processing.plot_histogram(
        new_img,
        out_file='../output_data/test_output/tunnel_hist_equalized.png')
    return new_img
Esempio n. 8
0
f2 = (luminosity_method(f2)).astype(int)
#f2 = exposure.adjust_gamma(f1, 1)
cv2.imwrite(
    '/Users/adamcatto/SRC/dippy/input_data/motion/gray_second_frame.png', f2)

#cv2.imwrite('/Users/adamcatto/SRC/dippy/output_data/decomposition/motion/subtract.png', f2-f1)
#print(f1)
detector = MovingObjectDetector([f1, f2])

bg = detector.model_background(2, max_frames=2)
zipped_bit_planes = list(zip(*bg))
compared = [cv2.bitwise_xor(x[0], x[1]) for x in zipped_bit_planes]
compared = compared[1]
cv2.imwrite(
    '/Users/adamcatto/SRC/dippy/output_data/decomposition/motion/bitwise_xor.png',
    stretch_histogram(compared))


def compute_nms(ns, compared):
    """
    ns: neighborhood_size
    """
    nms = np.zeros(compared.shape)
    #print(nms.shape)

    for _ in range(5):
        for i in range(ns, compared.shape[0] - ns):
            for j in range(ns, compared.shape[1] - ns):
                if compared[i, j] > 0:
                    neighborhood = [
                        1 if compared[i + x, j + y] > 0 else 0
Esempio n. 9
0
def combined_edge_dct_blur_segmentation(in_dir,
                                        block_size,
                                        num_frames,
                                        num_prev_frames,
                                        blur_kernel=None):
    if isinstance(block_size, int):
        block_size_tuple = (block_size, block_size)
    else:
        block_size_tuple = block_size

    if isinstance(blur_kernel, int):
        blur_kernel = (blur_kernel, blur_kernel)

    files = list(sorted(os.listdir(in_dir)))
    filenames = list(sorted([os.path.join(in_dir, f) for f in files]))

    img0 = lm(cv2.imread(filenames[0]))
    if blur_kernel:
        img0 = cv2.blur(img0, blur_kernel)

    mask_zeros = np.zeros((int(img0.shape[0] / 3) + 12, img0.shape[1]))
    mask_ones = np.ones(
        (img0.shape[0] - 12 - int(img0.shape[0] / 3), img0.shape[1]))
    mask = np.vstack((mask_zeros, mask_ones)).astype(int)

    blurmap0 = stretch_histogram(
        dct_motion_blur_detection(img=img0, block_size=block_size))
    sobel_edges0 = sobel(img0)
    sobel_edges0 = np.where(sobel_edges0 > 10, 255, 0)

    for j, f in tqdm(enumerate(filenames[num_prev_frames:num_frames])):
        img1 = lm(cv2.imread(f))
        if blur_kernel:
            img1 = cv2.blur(img1, blur_kernel)
        img_stacked = np.dstack((img1, img1, img1))
        blurmap1 = stretch_histogram(
            dct_motion_blur_detection(img=img1, block_size=block_size))
        diff_blurmaps = blurmap1 - blurmap0
        diff_blurmaps = np.where(diff_blurmaps > 30, 255, 0)

        # edge map component
        sobel_edges1 = sobel(img1)
        sobel_edges1 = np.where(sobel_edges1 > 10, 255, 0)
        sobel_diff = sobel_edges1 - sobel_edges0

        # combine
        diff = np.clip(diff_blurmaps + sobel_diff, 0, 255)
        diff = closing(diff, selem=np.ones((13, 13)))
        diff = opening(diff, selem=np.ones((19, 19)))
        diff = dilation(diff, selem=np.ones((21, 21)))
        diff *= mask

        diff = np.dstack((diff, np.zeros(diff.shape), np.zeros(diff.shape)))
        diff = np.where(diff > 0, 255, img_stacked)
        mask_diff = np.where(diff > 0, 255, 0)

        cv2.imwrite('../output_data/final_systems/combined/' + files[j + 1],
                    diff)
        cv2.imwrite(
            '../output_data/final_systems/masks/combined/' + files[j + 1],
            mask_diff)
        img0, blurmap0, sobel_edge0 = img1, blurmap1, sobel_edges1
Esempio n. 10
0
def write_thresholds(img, out_dir):
    img = lm(img)
    for t in tqdm(range(256)):
        segmented = img <= t
        segmented = stretch_histogram(segmented.astype(int))
        cv2.imwrite(os.path.join(out_dir, str(t) + '.jpg'), segmented)