def Cartoonify(): image_name = input( "Please enter the name of the image file that you want to process: " ) ## User input for the name of the image file. image_directory = input( "Please enter the directory that may contain the image: " ) ## User input for the path of the image file. ## This function looks for and finds the desired file. You can specify a parent directory for the fundtion to look for, however if you have no idea where a file is; this functio will find it for you, just slower. If you have no idea where a file is, just type "/". def find_the_image(file_name, directory_name): files_found = [] for path, subdirs, files in os.walk(directory_name): for name in files: if (file_name == name): file_path = os.path.join(path, name) files_found.append(file_path) print(files_found[0]) return files_found[0] ## Return the path. image_path = Path(find_the_image( image_name, image_directory)) ## Inıtialize the path of the image file. new_working_directory = image_path.parent ## Initialize the parent directory of the image path. os.chdir( new_working_directory ) ## Change the working directory of the script to the parent directory of the image path. color_image = cv2.imread(find_the_image(image_name, image_directory)) ##cv2.imshow("my_image",color_image) ##cv2.waitKey() ##cv2.destroyAllWindows() cartoon_style_selection = input( "This script currently has 2 sytles. Please type 1 or 2. ") if (cartoon_style_selection == "1"): cartoon_image_style_1 = cv2.stylization(color_image, sigma_s=150, sigma_r=0.25) cv2.imshow('cartoon_1', cartoon_image_style_1) cv2.waitKey() cv2.destroyAllWindows() elif (cartoon_style_selection == "2"): cartoon_image_style_2 = cv2.stylization(color_image, sigma_s=60, sigma_r=0.5) cv2.imshow('cartoon_2', cartoon_image_style_2) cv2.waitKey() cv2.destroyAllWindows() else: print("Invalid style selection.")
def __init__(self): img = io.imread("original_image.jpg") # 1) Edges gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 5) edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 7) # 2) Color color = cv2.bilateralFilter(img, 10, 300, 300) RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 3) Cartoon cartoon = cv2.bitwise_and(color, color, mask=edges) cartoon_img = cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB) cartoon_image = cv2.stylization(RGB_img, sigma_s=150, sigma_r=0.25) # Re-sizeing resize1 = cv2.resize(RGB_img, (600, 450)) resize2 = cv2.resize(cartoon_img, (600, 450)) resize3 = cv2.resize(cartoon_image, (600, 450)) self.resize1 = resize1 self.resize2 = resize2 self.resize3 = resize3
async def toonize(client, message): tgi = await edit_or_reply( message, "`Using My Toonize Algo To Make Him A Cartoon!`") img = await convert_to_image(message, client) if not img: await tgi.edit("`Reply to a valid media first.`") return if not os.path.exists(img): await tgi.edit("`Invalid Media!`") return imagez = cv2.imread(img) cartoon_image_style_2 = cv2.stylization(imagez, sigma_s=60, sigma_r=0.5) file_name = "Tooned.png" cv2.imwrite(file_name, cartoon_image_style_2) if message.reply_to_message: await client.send_photo( message.chat.id, photo=file_name, reply_to_message_id=message.reply_to_message.message_id, ) else: await client.send_photo(message.chat.id, photo=file_name) await tgi.delete() for files in (file_name, img): if files and os.path.exists(files): os.remove(files)
def stylization(video_filter, frame): width, height = 400, 400 frame = cv2.resize(frame, (width, height)) dst = cv2.stylization(frame, sigma_s=60, sigma_r=0.07) return dst
def enmascaradoPiola(img): #Etapa uno, suavizar fondo e interior de las hojas preservado bordes imgBlur = cv.edgePreservingFilter(img, flags=1, sigma_s=50, sigma_r=0.4) #Deformo los colores preservando solo los bordes, dando un efecto de acuarela, lo q realza el borde imgAc = cv.stylization(imgBlur, sigma_s=200, sigma_r=0.7) #Paso a escala de grises imgGray = cv.cvtColor(imgAc, cv.COLOR_BGR2GRAY) #Filtrado para suavizar mas el fondo imgG = cv.GaussianBlur(imgGray, (5, 5), 0.5) #Expansion de bordes por dilatacion ee = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) imgDil = cv.morphologyEx(imgG, cv.MORPH_DILATE, ee, iterations=1) #Hago un filtrado de promedio para eliminar bordes internos de la hoja debido a la textura img1Fil = cv.boxFilter(imgDil, -1, (5, 5)) #Binarizo con thresh adaptativo img1Bin = cv.adaptiveThreshold(img1Fil, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 1) #Busqueda y dibujo de contornos imgContornos = img.copy() contours, _ = cv.findContours(img1Bin, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Contornos externos cv.drawContours(imgContornos, contours, -1, (0, 0, 255), 2) return contours
def art4(img): background = BGRA('art/back/back2.png') grayimg = graymask(img) grayimg[:, :, :3] = cv2.stylization( grayimg[:, :, :3], sigma_s=60, sigma_r=0.6) return bm.normal(background, grayimg)
def helper(): #taking image input from user file_path = filedialog.askopenfilename() #reading image img = cv2.imread(file_path) #Resizing image img = imutils.resize(img, width=300) #reducing noise by bilateral Filter color = cv2.bilateralFilter(img, 6, 100, 100) #making it non-realistic by stylizing img1=cv2.stylization(color, sigma_s=600, sigma_r=0.4) #converting into gray scale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #removing noise gray = cv2.medianBlur(gray, 5) #getting edges edges = cv2.adaptiveThreshold(gray, 400, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5) #adding edges to stylized image cartoon = cv2.bitwise_and(img1, img1, mask=edges) #Showing images #cv2.imshow("original", img) #cv2.imshow("color", color) #cv2.imshow("edges", edges) #cv2.imshow("stylized", img1) cv2.imshow("cartoon", cartoon) cv2.waitKey(0) cv2.destroyAllWindows()
async def toonize(client, message): engine = message.Engine tgi = await edit_or_reply(message, engine.get_string("PROCESSING")) img = await convert_to_image(message, client) if not img: await tgi.edit(engine.get_string("NEEDS_REPLY").format("A Valid Media")) return if not os.path.exists(img): await tgi.edit(engine.get_string("INVALID_MEDIA")) return imagez = cv2.imread(img) cartoon_image_style_2 = cv2.stylization(imagez, sigma_s=60, sigma_r=0.5) file_name = "Tooned.png" cv2.imwrite(file_name, cartoon_image_style_2) if message.reply_to_message: await client.send_photo( message.chat.id, photo=file_name, reply_to_message_id=message.reply_to_message.message_id, ) else: await client.send_photo(message.chat.id, photo=file_name) await tgi.delete() for files in (file_name, img): if files and os.path.exists(files): os.remove(files)
def water_color(path, wc_value): img = cv2.imread(path) res = cv2.stylization(img, sigma_s=60, sigma_r=wc_value) os.remove(path) cv2.imwrite(path, res)
async def toony(event): if not event.reply_to_msg_id: await event.reply("Reply to any Image.") return hmmu = await event.edit("`Converting Toonized Image..`") sed = await event.get_reply_message() if isinstance(sed.media, MessageMediaPhoto): img = await borg.download_media(sed.media, sedpath) elif "image" in sed.media.document.mime_type.split("/"): img = await borg.download_media(sed.media, sedpath) else: await event.edit("Reply To Image") return imagez = cv2.imread(img) cartoon_image_style_2 = cv2.stylization( imagez, sigma_s=60, sigma_r=0.5 ) ## Cartoonify process. # Save it file_name = "Tooned.png" ok = sedpath + "/" + file_name cv2.imwrite(ok, cartoon_image_style_2) # Upload it await borg.send_file(event.chat_id, ok) await hmmu.delete() # Remove all Files for files in (ok, img): if files and os.path.exists(files): os.remove(files)
def stylization(img, sigma_s, sigma_r): output = img.copy() output[:, :, :3] = cv2.stylization(output[:, :, :3], sigma_s=sigma_s, sigma_r=sigma_r) return output
def __init__(self, imdb): self.imdb = imdb # all filters self.filters = odict([ ('Background Subtraction (mean)', imdb.pipeline().use_window().single_bgsub3(method='mean')), ('Background Subtraction (median)', imdb.pipeline().use_window().single_bgsub3(method='median')), ('Background Subtraction (min)', imdb.pipeline().use_window().single_bgsub3(method='min')), ('Background Subtraction (max)', imdb.pipeline().use_window().single_bgsub3(method='mean')), ('Original', imdb.pipeline()), ('Greyscale', imdb.pipeline().grey()), ('Edges', imdb.pipeline().grey().pipe( lambda im: cv2.Laplacian(im, cv2.CV_64F)).invert()), # https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/ ('Stylization', imdb.pipeline().pipe( lambda im: cv2.stylization(im, sigma_s=10, sigma_r=0.4))), ('Pencil Sketch', imdb.pipeline().pipe(lambda im: cv2.pencilSketch( im, sigma_s=10, sigma_r=0.1, shade_factor=0.02)[1])), ('Detail Enhance', imdb.pipeline().pipe( lambda im: cv2.detailEnhance(im, sigma_s=20, sigma_r=0.15))), ('Edge Preserving', imdb.pipeline().pipe(lambda im: cv2.edgePreservingFilter( im, flags=1, sigma_s=30, sigma_r=0.4))), ]) for name in self.filters: self.filters[name].fake_crop()
def stylize(self, sigma_s=50, sigma_r=0.15): ''' 制作风格化图像 参数与pencil类似 ''' out = cv2.stylization(self.img, sigma_s=sigma_s, sigma_r=sigma_r) self.showImage("stylization.png", out)
def load_images_from_folder(folder, size, bandera): """ Carga las imagenes las comprime, estandariza y aplica efectos """ images = [] size = size count = 0 for filename in os.listdir(folder): img = cv2.imread(os.path.join(folder, filename)) if img is not None: img = cv2.resize(img, size) img = img / 255 # esto centra y estandariza la database, aveces tambien restas el promedio # print(filename) images.append(img) if bandera: # agregamos efectos para lograr mayor cantidad de entradas img2 = cv2.stylization(img, sigma_s=150, sigma_r=0.25) images.append(img2) img2 = cv2.flip(img, 1) images.append(img2) if count == 810: break count = count + 1 return images
def on_press_paint_button(self, instance): self.imagePath = os.path.join('Paint', 'Selfie.png') self.cam.export_to_png(self.imagePath) img = cv2.imread(self.imagePath) res = cv2.stylization(img, sigma_s=30, sigma_r=0.3) cv2.imwrite(os.path.join('Paint', 'painting.png'), res) return 0
def water_color(img_name, load_folder, save_folder): load_path = os.path.join(load_folder, img_name) print(load_path) save_path = os.path.join(save_folder, 'water_color', img_name) img = cv2.imread(load_path) res = cv2.stylization(img, sigma_s=60, sigma_r=0.6) cv2.imwrite(save_path, res) return save_path
def segmentation(detection_graph): vs = WebcamVideoStream(0, 1280, 720).start() resize_ratio = 1.0 * 513 / max(vs.real_width, vs.real_height) target_size = (int(resize_ratio * vs.real_width), int(resize_ratio * vs.real_height)) config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True fps = FPS2(5).start() # background_image = cv2.imread('b.jpg') # resized_background_image = cv2.resize(background_image, target_size) # (384,513) print("Starting...") with detection_graph.as_default(): with tf.Session(graph=detection_graph) as sess: while vs.isActive(): image = cv2.resize(vs.read(), target_size) batch_seg_map = sess.run('SemanticPredictions:0', feed_dict={'ImageTensor:0': [cv2.cvtColor(image, cv2.COLOR_BGR2RGB)]}) # visualization seg_map = batch_seg_map[0] seg_map[seg_map != 15] = 0 # bg_copy = resized_background_image.copy() mask = (seg_map == 15) # car=render(image) car = cv2.stylization(image, sigma_s=60, sigma_r=0.07) # gray0 = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # gray0 = cv2.cvtColor(gray0, cv2.COLOR_GRAY2RGB) print(car.shape) car[mask] = image[mask] # create_colormap(seg_map).astype(np.uint8) seg_image = np.stack( (seg_map, seg_map, seg_map), axis=-1).astype(np.uint8) gray = cv2.cvtColor(seg_image, cv2.COLOR_BGR2GRAY) thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)[1] cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) try: cv2.drawContours( car, cnts, -1, (randint(0, 255), randint(0, 255), randint(0, 255)), 2) except: pass # ir=cv2.resize(car,(vs.real_width,vs.real_height)) ir = car cv2.imshow('segmentation', ir) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.update() fps.stop() vs.stop() cv2.destroyAllWindows()
def stylize(self): if self.raw_image is None: return 0 if self.ui.horizontalSlider_2.value() == 0: self.current_img = self.raw_image self.show_image() return 0 value = self.ui.horizontalSlider_2.value() * 0.05 self.current_img = cv2.stylization(self.current_img, sigma_s=50, sigma_r=value)
def applyAdvance(): global imageData, appliedFilterdata if request.method == "POST": filter = request.form['advance'] try: token = request.form['token'] imageData = session[token]["imageData"] except: return render_template("index.html") session[token]["Time"] = time.time() if filter == 'Cartoonification': cartoon_image = cv2.stylization(imageData, sigma_s=150, sigma_r=0.25) appliedFilterdata = cartoon_image session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Pencil Sketch': dst_gray, dst_color = cv2.pencilSketch(imageData, sigma_s=60, sigma_r=0.07, shade_factor=0.05) appliedFilterdata = dst_gray session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Coloured Pencil Sketch': dst_gray, dst_color = cv2.pencilSketch(imageData, sigma_s=60, sigma_r=0.07, shade_factor=0.05) appliedFilterdata = dst_color session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Oil Paint': dst = cv2.xphoto.oilPainting(imageData, 7, 1) appliedFilterdata = dst session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token) if filter == 'Water Colour': res = cv2.stylization(imageData, sigma_s=60, sigma_r=0.6) appliedFilterdata = res session[token]["appliedFilterdata"] = appliedFilterdata return render_template("output.html", token=token)
def dream(self): image = ImageQt.fromqimage(self.will_change_img) img = self.img_to_cv(image) element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6)) b, g, r = cv2.split(img) rbr_img = cv2.merge((r, b, g)) morphology = cv2.morphologyEx(rbr_img, cv2.MORPH_OPEN, element) canvas = cv2.normalize(morphology, None, 20, 255, cv2.NORM_MINMAX) new_image = cv2.stylization(canvas, sigma_s=60, sigma_r=0.6) self.update_img(new_image)
def sketch_v3(image: Image.Image or str, s_sigma=10, r_sigma=0.1) -> Image.Image: if isinstance(image, str): image = Image.open(image) image = np.array(image) # sketch_gray, sketch_color = cv2.pencilSketch(image, sigma_s=10, sigma_r=0.07, shade_factor=0.05) stylize = cv2.stylization(image, sigma_s=s_sigma, sigma_r=r_sigma) return Image.fromarray(stylize)
def waterColour(frame, param): print(frame) img = cv2.imread(frame) # img = cv2.GaussianBlur(img,(3,3),cv2.BORDER_DEFAULT) # img = cv2.resize(img, (160, 200)) res = cv2.stylization(img, sigma_s=40, sigma_r=param) res = cv2.resize(res, (160, 200)) # sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) # res = cv2.filter2D(res, -1, sharpen_kernel) return res
def watercolor(img): """ converts passed-in image to watercolor returns watercolor version of image """ # make image look like watercolor painting # sigma_s controls the size of the neighborhood: range 1 - 200 # sigma_r controls how dissimilar colors within the neighborhood will be averaged # a larger sigma_r results in large regions of constant color: range 0 - 1 res = cv2.stylization(img, sigma_s=60, sigma_r=0.6) return res
def Cartoon(image_color): output_image = cv2.stylization(image_color, sigma_s=100, sigma_r=0.3) """ sigma_s görüntünün ne kadar düzgünleştiğini kontrol eder - değeri ne kadar büyük olursa görüntü o kadar düzgünleşir, ancak hesaplaması da yavaşlar. sigma_r görüntüyü düzeltirken kenarları korumak istediğimiz zaman çok önemlidir. Sadece çok benzer renklerin ortalaması alınmasına neden olur. (diğer bir deyişle pürüzsüz) çok farklı renkler bozulmadan kalacaktır. """ return output_image
def cartoon_filter(self, path): image = cv.imread(path) res = cv.stylization(image, sigma_s=150, sigma_r=0.25) file_extension = path.split('.')[1] edited_image_name = generate_imagename(file_extension) cv.imwrite(os.path.join('uploads', edited_image_name), res) return edited_image_name
def convert(inputfile, outputfile): img = cv.imread(inputfile) img = cv.GaussianBlur(img, (3, 3), 0) # hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # h, s, v = cv.split(hsv) # lim = 255 - 50 # v[v > lim] = 255 # v[v <= lim] += 50 # final_hsv = cv.merge((h, s, v)) # img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR) cartoon_image = cv.stylization(img, sigma_s=60, sigma_r=0.25) dst = cv.detailEnhance(cartoon_image, sigma_s=30, sigma_r=0.1) cv.imwrite(outputfile, dst)
def water_color(image): """ Convert an image to look as though it has been painted with water colors :param image: Original Image in RGB format :type image: :return: Image with a water color visual effect :rtype: Image """ # sigma_s controls the size of the neighborhood. Range 1 - 200 # sigma_r controls the how dissimilar colors within the neighborhood will # be averaged. A larger sigma_r results in large regions of constant color. # Range 0 - 1 res = cv2.stylization(image, sigma_s=60, sigma_r=0.8) return res
def photo_cartoonify(request, id): photo = get_object_or_404(Photo, id=id) image = os.path.join(settings.BASE_DIR + '/' + photo.file.url) img = cv2.imread(image, 1) if request.method == 'GET': sigma_s = int(request.GET['slider1']) sigma_r = int(request.GET['slider2']) / 100 print("value is") print(sigma_s) cartoon_image = cv2.stylization(img, sigma_s=sigma_s, sigma_r=sigma_r) #cartoon_image = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.5, shade_factor=0.02) fileName = settings.MEDIA_ROOT + '/output/' + 'cartoonImage' + str( uuid.uuid4()) + '.jpg' file = cv2.imwrite(fileName, cartoon_image) output = Cartoonify.objects.create(file=fileName) output.save() return redirect('home:photoView')
def defParametrosStylization(img): cv.namedWindow("control") cv.createTrackbar("sigma_s", "control", 0, 200, track_change) cv.createTrackbar("sigma_r", "control", 0, 100, track_change) while (True): sigma_s = param_trackBar("sigma_s", "control") sigma_r = param_trackBar("sigma_r", "control", delta=0.01) output = cv.stylization(img, sigma_s=sigma_s, sigma_r=sigma_r) cv.imshow("resultado", output) cv.imshow("original", img) key = cv.waitKey(1) & 0XFF if (key == ord('q')): break cv.destroyAllWindows() return output
def live(): video = cv.VideoCapture(0, cv.CAP_DSHOW) video.set(cv.CAP_PROP_FRAME_WIDTH, 1280) video.set(cv.CAP_PROP_FRAME_HEIGHT, 720) while (True): ret, frame = video.read() frame = cv.GaussianBlur(frame, (5, 5), 0) # hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) # h, s, v = cv.split(hsv) # lim = 255 - 50 # v[v > lim] = 255 # v[v <= lim] += 50 # final_hsv = cv.merge((h, s, v)) # img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR) cartoon_image = cv.stylization(frame, sigma_s=60, sigma_r=0.25) dst = cv.detailEnhance(cartoon_image, sigma_s=30, sigma_r=0.1) cv.imshow('cartoon', dst) if cv.waitKey(1) & 0xFF == ord('q'): break
def efecto12(self, frame): #im_color = cv2.pencilSketch(frame, sigma_s=60, sigma_r=0.07, shade_factor=0.05) im_color = cv2.stylization(frame, sigma_s=60, sigma_r=0.07) return im_color
OpenCV Non-Photorealistic Rendering Python Example Copyright 2015 by Satya Mallick <*****@*****.**> ''' import cv2 # Read image im = cv2.imread("cow.jpg"); # Edge preserving filter with two different flags. imout = cv2.edgePreservingFilter(im, flags=cv2.RECURS_FILTER); cv2.imwrite("edge-preserving-recursive-filter.jpg", imout); imout = cv2.edgePreservingFilter(im, flags=cv2.NORMCONV_FILTER); cv2.imwrite("edge-preserving-normalized-convolution-filter.jpg", imout); # Detail enhance filter imout = cv2.detailEnhance(im); cv2.imwrite("detail-enhance.jpg", imout); # Pencil sketch filter imout_gray, imout = cv2.pencilSketch(im, sigma_s=60, sigma_r=0.07, shade_factor=0.05); cv2.imwrite("pencil-sketch.jpg", imout_gray); cv2.imwrite("pencil-sketch-color.jpg", imout); # Stylization filter cv2.stylization(im,imout); cv2.imwrite("stylization.jpg", imout);