def __init__(self, imdb): self.imdb = imdb # all filters self.filters = odict([ ('Background Subtraction (mean)', imdb.pipeline().use_window().single_bgsub3(method='mean')), ('Background Subtraction (median)', imdb.pipeline().use_window().single_bgsub3(method='median')), ('Background Subtraction (min)', imdb.pipeline().use_window().single_bgsub3(method='min')), ('Background Subtraction (max)', imdb.pipeline().use_window().single_bgsub3(method='mean')), ('Original', imdb.pipeline()), ('Greyscale', imdb.pipeline().grey()), ('Edges', imdb.pipeline().grey().pipe( lambda im: cv2.Laplacian(im, cv2.CV_64F)).invert()), # https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/ ('Stylization', imdb.pipeline().pipe( lambda im: cv2.stylization(im, sigma_s=10, sigma_r=0.4))), ('Pencil Sketch', imdb.pipeline().pipe(lambda im: cv2.pencilSketch( im, sigma_s=10, sigma_r=0.1, shade_factor=0.02)[1])), ('Detail Enhance', imdb.pipeline().pipe( lambda im: cv2.detailEnhance(im, sigma_s=20, sigma_r=0.15))), ('Edge Preserving', imdb.pipeline().pipe(lambda im: cv2.edgePreservingFilter( im, flags=1, sigma_s=30, sigma_r=0.4))), ]) for name in self.filters: self.filters[name].fake_crop()
def pencil_scatch(self): img = self.image dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.05) return dst_color
def pencil_sketch(): global imgs global edited_image img = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) dst_gray, dst_color = cv2.pencilSketch(img, sigma_s= (5 * 15), sigma_r= (5 /50), shade_factor= 0.04) dst_gray = cv2.cvtColor(dst_gray, cv2.COLOR_BGR2RGB) im_pil = Image.fromarray(dst_gray) edited_image = im_pil width, height = im_pil.size if width > height: im_pil = im_pil.resize((550, 450), Image.ANTIALIAS) elif width < height: im_pil = im_pil.resize((450, 550), Image.ANTIALIAS) else: im_pil = im_pil.resize((500, 500), Image.ANTIALIAS) photo = ImageTk.PhotoImage(im_pil) img_canvas.image = photo img_canvas.itemconfig(blank_image_init,image = "") if width > height: img_canvas.create_image(310,120, image = photo, anchor = N) else: img_canvas.create_image(310,70, image = photo, anchor = N)
def pencil(img, sigma_s, sigma_r, shade_factor, color=True): dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=sigma_s, sigma_r=sigma_r, shade_factor=shade_factor) if color: return dst_color return dst_gray
def sek(self): image = cv2.imread(self.filename) Sketch, sketch2 = cv2.pencilSketch(image, sigma_s=40, sigma_r=0.4, shade_factor=0.02) cv2.imshow("Black&White Sketch", Sketch) cv2.imshow("Colored Sketch", sketch2)
def black_and_white_sketch(self): frame_status, frame = self.video.read() #modify frame here frame, color_sketch = cv2.pencilSketch(frame, sigma_s=60, sigma_r=0.07, shade_factor=0.05) frame = cv2.resize(frame,None, fx = self.ds_factor, fy = self.ds_factor, interpolation = cv2.INTER_AREA) ret, jpeg = cv2.imencode(".jpg", frame) return jpeg.tobytes()
def on_ps_change(_): sigma_s = cv.getTrackbarPos('sigma_s', 'pencil_sketch') sigma_r = cv.getTrackbarPos('sigma_r', 'pencil_sketch') * 0.01 print('pencil_sketch: sigma_s, sigma_r =', sigma_s, sigma_r) dst_gray, dst_color = cv.pencilSketch(img, sigma_s=sigma_s, sigma_r=sigma_r) cv.imshow('pencil_sketch', dst_color) cv.imshow('pencil_sketch_gray', dst_gray)
def pencil_color(self): if self.raw_image is None: return 0 if self.ui.horizontalSlider_12.value() == 0: self.current_img = self.raw_image self.show_image() return 0 value = self.ui.horizontalSlider_12.value() * 0.05 dst1_gray, dst1_color = cv2.pencilSketch(self.current_img, sigma_s=50, sigma_r=value, shade_factor=0.04) self.current_img = dst1_color
def sketchColor(frame, param): print(frame) img = cv2.imread(frame) res, dst_color = cv2.pencilSketch(img, sigma_s=30, sigma_r=0.03, shade_factor=param) dst_color = cv2.resize(dst_color, (960, 540)) return dst_color
def applyAdvance(): global imageData, appliedFilterdata if request.method == "POST": filter = request.form['advance'] try: token = request.form['token'] imageData = session[token]["imageData"] except: return render_template("index.html") session[token]["Time"] = time.time() if filter == 'Cartoonification': cartoon_image = cv2.stylization(imageData, sigma_s=150, sigma_r=0.25) appliedFilterdata = cartoon_image session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Pencil Sketch': dst_gray, dst_color = cv2.pencilSketch(imageData, sigma_s=60, sigma_r=0.07, shade_factor=0.05) appliedFilterdata = dst_gray session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Coloured Pencil Sketch': dst_gray, dst_color = cv2.pencilSketch(imageData, sigma_s=60, sigma_r=0.07, shade_factor=0.05) appliedFilterdata = dst_color session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Oil Paint': dst = cv2.xphoto.oilPainting(imageData, 7, 1) appliedFilterdata = dst session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Water Colour': res = cv2.stylization(imageData, sigma_s=60, sigma_r=0.6) appliedFilterdata = res session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token)
def pencil_sketch(img): pencil_img_gray, pencil_img = cv2.pencilSketch(img, sigma_s=65, sigma_r=0.1, shade_factor=0.03) pencil = cv2.cvtColor(pencil_img_gray, cv2.COLOR_GRAY2BGR) roi1 = np.array([[(0, 0), (224, 168), (276, 0), (640, 0), (640, 480), (0, 480)]], dtype=np.int32) cv2.fillPoly(pencil, roi1, (0, 0, 0)) return pencil
def bw_pencil(img_name, load_folder, save_folder): load_path = os.path.join(load_folder, img_name) print(load_path) save_path = os.path.join(save_folder, 'bw_pencil', img_name) img = cv2.imread(load_path) dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.05) cv2.imwrite(save_path, dst_color) return save_path
def objDetection(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 物体の検出 objs = objDetector.detectMultiScale(gray) if len(objs) > 0: for rect in objs: #検出物体を矩形で囲む # cv2.rectangle(img, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (255, 0, 0) ,2) dst_gray, dst_color = cv2.pencilSketch(img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]], sigma_s=60, sigma_r=0.07, shade_factor=0.05) img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]] = dst_color return img
def sketching(path, mode, sr, sf): img = cv2.imread(path) dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=sr, shade_factor=sf) if mode == 0: res = dst_gray elif mode == 1: res = dst_color os.remove(path) cv2.imwrite(path, res)
def transform(self, renderer_type_id): print("transform {}".format(renderer_type_id)) try: src_image = self.get_opencv_image() self.transformed_image = None if src_image.any() != None: if renderer_type_id == MainView.EdgePreserveSmoothingByNormalizedConvolutionFilter: self.transformed_image = cv2.edgePreservingFilter( src_image, flags=1) if renderer_type_id == MainView.EdgePreserveSmoothingByRecursiveFilter: self.transformed_image = cv2.edgePreservingFilter( src_image, flags=2) if renderer_type_id == MainView.DetailEnhancement: self.transformed_image = cv2.detailEnhance(src_image) if renderer_type_id == MainView.MonochromePencilSketch: self.transformed_image, _ = cv2.pencilSketch( src_image, sigma_s=10, sigma_r=0.1, shade_factor=0.03) if renderer_type_id == MainView.ColorPencilSketch: _, self.transformed_image = cv2.pencilSketch( src_image, sigma_s=10, sigma_r=0.1, shade_factor=0.03) if renderer_type_id == MainView.Stylization: self.transformed_image = cv2.stylization(src_image) if self.transformed_image.all() != None: self.set_opencv_image(self.transformed_image) self.update() except: traceback.print_exc()
def sketch(frame, param): print(frame) img = cv2.imread(frame) # img = cv2.GaussianBlur(img,(3,3),cv2.BORDER_DEFAULT) # img = cv2.resize(img, (160, 200)) res, dst_color = cv2.pencilSketch(img, sigma_s=30, sigma_r=0.06, shade_factor=param) res = cv2.resize(res, (960, 540)) # sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) # res = cv2.filter2D(res, -1, sharpen_kernel) return res
def main(args): image_in = cv2.imread(args.fname_input) image_out = cv2.edgePreservingFilter(image_in) image_out, _ = cv2.pencilSketch(image_out, sigma_r=0.09, shade_factor=0.05) if args.visualize: image_out = cv2.cvtColor(image_out, cv2.COLOR_GRAY2RGB) image_both = np.hstack((image_in, image_out)) cv2.imshow('Pencil sketch', image_both) cv2.waitKey(0) cv2.imwrite(args.fname_output, image_out)
def pencile(self, sigma_s=50, sigma_r=0.15, shade_factor=0.04): ''' 制作铅笔画 sigma_s: 滑动窗口大小(0 ~ 200) sigma_r: 颜色不相似度(0 ~ 1.0, 不相似度越大代表滤波后颜色相似区域越大) shade_factor: 光照因子(0 ~ 0.1), 控制图像亮度, 越大越亮 ''' outGray, outColor = cv2.pencilSketch(self.img, sigma_s=sigma_s, sigma_r=sigma_r, shade_factor=shade_factor) self.showImage("pencileGray.png", outGray) self.showImage("pencileColor.png", outColor)
def cartoon(image): # import cv2 print(image) img = cv2.imread(image) res, dst_color = cv2.pencilSketch(img, sigma_s=30, sigma_r=0.05, shade_factor=0.02) # res = cv2.xphoto.oilPainting(img, 38, 1) res = cv2.resize(res, (960, 540)) cv2.imshow("Frame", res) cv2.waitKey(0) cv2.destroyAllWindows() return res
def colored_sketch_filter(self, path): image = cv.imread(path) _, res = cv.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05) file_extension = path.split('.')[1] edited_image_name = generate_imagename(file_extension) cv.imwrite(os.path.join('uploads', edited_image_name), res) return edited_image_name
def pencilSketchModeFunction(wished_image, in_color): gray_result, dst_result = cv2.pencilSketch(wished_image, sigma_s=60, sigma_r=0.07, shade_factor=0.05) if in_color == True: return dst_result else: return gray_result
def im_filter(im_bgr, filter_name, value): """ 使用指定滤镜对图片进行处理 :param im_bgr: BGR图片 :param filter_name: 滤镜名 :param value: 设定值(0~10) :param gray: 取得灰度图 @return: 处理后BGR图片 """ if im_bgr is None: return 0 if value == 0: return im_bgr value = value * 0.05 # 铅笔灰度滤镜 if filter_name == "pencil_gray": im_gray, im_color = cv2.pencilSketch(im_bgr, sigma_s=50, sigma_r=value, shade_factor=0.04) im_new = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR) # 铅笔彩色滤镜 if filter_name == "pencil_color": im_gray, im_new = cv2.pencilSketch(im_bgr, sigma_s=50, sigma_r=value, shade_factor=0.04) # 风格化滤镜 if filter_name == "stylize": im_new = cv2.stylization(im_bgr, sigma_s=50, sigma_r=value) # 细节增强滤镜 if filter_name == "detail_enhance": im_new = cv2.detailEnhance(im_bgr, sigma_s=50, sigma_r=value) # 边缘保持 if filter_name == "edge_preserve": im_new = cv2.edgePreservingFilter(im_bgr, flags=1, sigma_s=50, sigma_r=value) return im_new
def photo_to_color_pencil(img, sigma_s=60, sigma_r=0.07, shade_factor=0.05, color=True): # sigma_s controls the size of the neighborhood. Range 1 - 200 # sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1 # shade_factor is a simple scaling of the output image intensity. The higher the value, the brighter is the result. Range 0 - 0.1 dst_gray, dst_color = cv2.pencilSketch(img, sigma_s, sigma_r, shade_factor) if color: filename = f"IMG_3477_1_color_pencil_color_{sigma_s}_{sigma_r}_{shade_factor}.png" save_file_out(filename, dst_color) else: filename = f"IMG_3477_1_color_pencil_bnw_{sigma_s}_{sigma_r}_{shade_factor}.png" save_file_out(filename, dst_gray)
def pencil_sketch_color(img): """ converts passed-in image to pencil sketch returns color pencil sketch version of image """ # make image look like pencil sketch # sigma_s controls the size of the neighborhood: range 1 - 200 # sigma_r controls how dissimilar colors within the neighborhood will be averaged # a larger sigma_r results in large regions of constant color: range 0 - 1 # shade_factor is a simple scaling of the output image intensity, # the higher the shade_factor, the brighter the result: range 0 - 0.1 dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.05) return dst_color
def pencil_sketch(image, color=True): """ Convert an image to look as though it has been pencil sketched :param image: Original Image in RGB Format :type image: :param color: True - color pencil sketch, False - Gray scale pencil sketch :type color: :return: Image with a pencil sketch visual effect :rtype: """ # sigma_s and sigma_r are the same as in stylization. # shade_factor is a simple scaling of the output image intensity. The higher the value, the brighter is the result. Range 0 - 0.1 # dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05) dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.04, shade_factor=0.1) if color: return dst_color else: return dst_gray
def cv2_filter(img, flag): if flag == 'None': return img elif flag == 'Pencil': dst1_gray, dst1_color = cv2.pencilSketch(img, sigma_s=50, sigma_r=0.15, shade_factor=0.04) return dst1_color elif flag == 'Style': dst2 = cv2.stylization(img, sigma_s=50, sigma_r=0.15) return dst2 elif flag == 'Detail': dst3 = cv2.detailEnhance(img, sigma_s=50, sigma_r=0.15) return dst3 elif flag == 'Edge': dst4 = cv2.edgePreservingFilter(img, flags=1, sigma_s=50, sigma_r=0.15) return dst4 elif flag == 'udinverse': return img[::-1] elif flag == 'lrinverse': return cv2.flip(img, flipCode=1)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--src') args = parser.parse_args() mat = cv2.imread(args.src) height, width = mat.shape[:2] mat = cv2.resize(mat, (int(0.5 * width), int(0.5 * height)), interpolation=cv2.INTER_AREA) name = "effect" cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.createTrackbar('sigmaS', name, 0, 200, nothing) cv2.createTrackbar('sigmaR', name, 0, 10, nothing) cv2.createTrackbar('shade', name, 0, 10, nothing) cv2.setTrackbarPos('sigmaS', name, 25) cv2.setTrackbarPos('sigmaR', name, 2) cv2.setTrackbarPos('shade', name, 1) while True: k = cv2.waitKey(1) & 0xFF if k == 27: break sigma_s = cv2.getTrackbarPos('sigmaS', name) sigma_r = cv2.getTrackbarPos('sigmaR', name) shade = cv2.getTrackbarPos('shade', name) dst_mono, dst_color = cv2.pencilSketch(mat, sigma_s=sigma_s, sigma_r=sigma_r * 0.1, shade_factor=shade * 0.01) cv2.imshow(name, dst_color)
def get_image_converted(self,path_import, image_name, path): ############################################################################## ############# Import images ############################################################################## print(path_import) color_image = cv2.imread(path_import) ############################################################################## ############# cartoonify images ############################################################################## cartoon_image = cv2.stylization(color_image, sigma_s=150, sigma_r=0.01) ############################################################################## ############# pencilfy images ############################################################################## img_cartoon, img_pencil = cv2.pencilSketch(color_image, sigma_s=60, sigma_r=0.5, shade_factor=0.01) ############################################################################## ############# export results images ############################################################################## img_cartoon_url = ''.join([image_name,'_cartoon','.jpg']) img_pencil_url = ''.join([image_name,'_pencil','.jpg']) cv2.imwrite(os.path.join(path, img_cartoon_url), img_cartoon) cv2.imwrite(os.path.join(path, img_pencil_url), img_pencil)
for i in range(hen): for j in range(hen): pixel = im[i, j] r += pixel[0] g += pixel[1] b += pixel[2] r = r/(hen**2) g = g/(hen**2) b = b/(hen**2) if sum(map(abs, [r - g, g - b, b - r ]) ) < 12. : print('モノクロの可能性があります、スキップします') continue gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(im, ksize=(21, 21), sigmaX=0, sigmaY=0) inv = 255 - blur dst_gray, dst_color = cv2.pencilSketch(im, sigma_s=250, sigma_r=0.07, shade_factor=0.05) th3 = cv2.adaptiveThreshold(dst_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\ cv2.THRESH_BINARY,3,2) """ kernel = np.ones((1,1),np.uint8) rosion = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kernel) th2 = cv2.adaptiveThreshold(im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 3) edges = cv2.blur(gray, (3, 3)) edges = gray edges = cv2.Canny(dst_gray, 100, 100, apertureSize=3) kernel = numpy.ones((3,3), dtype=numpy.float) / 12.0 edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR) shifted = cv2.pyrMeanShiftFiltering(im, 5, 100) shifted = cv2.cvtColor(shifted,cv2.COLOR_BGR2GRAY) result = cv2.subtract(shifted, edges) invedges = 255 - edges
raise RuntimeError( f"{real_path} is not a valid image type. It could be damaged, corrupted or file is empty." ) base_name = os.path.basename(real_path) directory = os.path.dirname(real_path) sketch_filename = "{}_{}".format(PREFIX, base_name) sketch_path = os.path.join(directory, sketch_filename) cv2.namedWindow(sketch_filename, cv2.WINDOW_NORMAL) cv2.setWindowProperty(sketch_filename, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.setWindowProperty(sketch_filename, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL) sk_gray, sk_color = cv2.pencilSketch(cv2.imread(source), sigma_s=SIGMA_S, sigma_r=SIGMA_R, shade_factor=SHADE_FACTOR) cv2.imwrite(sketch_path, sk_gray) print("sketch version of your image written to: {}".format(sketch_path)) print( "DO NOT CLOSE THE WINDOW....using your keyboard press any key to exit gracefully" ) cv2.imshow(sketch_filename, cv2.imread(sketch_path)) cv2.waitKey(0) cv2.destroyAllWindows() print("good bye...") exit(0)
def sketch_img(img): # outimg = cv2.stylization(img, sigma_s=60, sigma_r=0.07) outimg = cv2.edgePreservingFilter(img, flags=1, sigma_s=60, sigma_r=0.4) outimg = cv2.detailEnhance(outimg, sigma_s=10, sigma_r=0.15) dst_gray, dst_color = cv2.pencilSketch(outimg, sigma_s=60, sigma_r=0.07, shade_factor=0.05) return dst_gray
M = cv2.getRotationMatrix2D(center=(w // 2, h // 2), angle=theta, scale=1.0) M[0, 2] += x M[1, 2] += y img = cv2.warpAffine(img, M=M, dsize=(w, h)) img = cv2.GaussianBlur(img, ksize=(9, 9), sigmaX=0) ''' sigma_s: Range between 0 to 200. Default 60. sigma_r: Range between 0 to 1. Default 0.07. shade_factor: Range between 0 to 0.1. Default 0.02. ''' gray, color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.05, shade_factor=0.015) cv2.imshow('gray', gray) # cv2.imshow('color', color) out.write(cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)) if cv2.waitKey(1) == ord('q'): break out.release() cap.release()
OpenCV Non-Photorealistic Rendering Python Example Copyright 2015 by Satya Mallick <*****@*****.**> ''' import cv2 # Read image im = cv2.imread("cow.jpg"); # Edge preserving filter with two different flags. imout = cv2.edgePreservingFilter(im, flags=cv2.RECURS_FILTER); cv2.imwrite("edge-preserving-recursive-filter.jpg", imout); imout = cv2.edgePreservingFilter(im, flags=cv2.NORMCONV_FILTER); cv2.imwrite("edge-preserving-normalized-convolution-filter.jpg", imout); # Detail enhance filter imout = cv2.detailEnhance(im); cv2.imwrite("detail-enhance.jpg", imout); # Pencil sketch filter imout_gray, imout = cv2.pencilSketch(im, sigma_s=60, sigma_r=0.07, shade_factor=0.05); cv2.imwrite("pencil-sketch.jpg", imout_gray); cv2.imwrite("pencil-sketch-color.jpg", imout); # Stylization filter cv2.stylization(im,imout); cv2.imwrite("stylization.jpg", imout);