return objects if __name__ == '__main__': try: edge_filter_dict = { 'sobel': apply_sobel, 'prewitt': apply_prewitt, 'canny': apply_canny, } filter_func = edge_filter_dict.get(sys.argv[1]) if len(sys.argv) < 3 and filter_func: start_cv_video(img_filter=filter_func) elif len(sys.argv) < 4 and filter_func: img_path = sys.argv[2] img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) new_img = filter_func(img) plt_show_img(new_img) else: raise Exception('Method not implemented') except Exception as error: print(error) print( 'Usage: python3 edge_detection[canny | sobel | prewitt] [file_path]' )
for (x, y, r) in circles: # draw the circle in the output image, then draw a rectangle # corresponding to the center of the circle cv2.circle(output, (x, y), r, RGB_GREEN, 4) cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), RGB_GREEN, -1) return output if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Emoji segmentation') parser.add_argument('-i', '--image', type=str, action='store', dest='src_img', help='The image to apply the filter') parser.add_argument('-ks', '--kernel_size', type=int, default=5, action='store', dest='kernel_size', help='The size of the kernel when applying dilatation or erotion') parser.add_argument('-d', '--dilate', type=int, default=0, action='store', dest='dilate_iter', help='Number of times to apply the Dilate operation') parser.add_argument('-e', '--erode', type=int, default=0, action='store', dest='erode_iter', help='Number of times to apply the Erode operation') args = parser.parse_args() if args.src_img: try: img = cv2.imread(args.src_img, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) rgb_img = filter(img) plt_show_img(rgb_img) except Exception as error: print(error) else: start_cv_video(0, filter)
nimg[:rows, :cols] = img return nimg if __name__ == '__main__': img = cv2.imread('images/ciel_bw.jpg', cv2.IMREAD_GRAYSCALE) #img = optimize_img_for_DFT(img) #spectrum = get_spectrum(img) hpf = apply_hpf(img) #lpf = apply_lpf(img) #bpf = apply_bpf(img) #img_comparison = np.concatenate((img, spectrum), axis=1) helpers.plt_show_img(hpf) def grab_frame(cap): _, frame = cap.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = optimize_img_for_DFT(frame) spectrum = get_spectrum(frame) img_comparison = np.concatenate((frame, spectrum), axis=1) return img_comparison DEFAULT_CAM = 0 cap = cv2.VideoCapture(DEFAULT_CAM) frame_view = plt.imshow(grab_frame(cap), cmap='gray') def update(i):
def slow_binarization(img, threshold=127): img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) height, width = img.shape binary = np.zeros([height, width, 1], 'uint8') for row in range(0, height): for col in range(0, width): if img[row][col] > threshold: binary[row][col] = 255 return binary if __name__ == '__main__': if len(sys.argv) < 2: start_cv_video(img_filter=otsu_binarization) else: try: img_path = sys.argv[1] img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) binarized_img = otsu_binarization(img) plt_show_img(binarized_img) if (sys.argv[2] == '-h'): plt_hist(img.ravel(), 'Color Histogram') except Exception as error: print(error)