def toCanny(bw, gaussian): if gaussian == 0: canny = Canny(bw, 200, 230, 3) return canny else: img1 = GaussianBlur(bw, (gaussian, gaussian), 0) cannyGaus = Canny(img1, 10, 30, 453) return cannyGaus
def cv2_canny(np_array, low, high): from cv2 import Canny low = int(low * 256) high = int(high * 256) temp = np_array * 256 temp = temp.astype(np.uint8) temp = Canny(temp, low, high) np_array = temp.astype(np.float32) / 256 return np_array
def find_template(im_source, im_search, threshold=0.5, rgb=False, bgremove=False): ''' Locate image position with cv2.templateFind Use pixel match to find pictures. Args: im_source(string): 图像、素材 im_search(string): 需要查找的图片 threshold: 阈值,当相识度小于该阈值的时候,就忽略掉 Returns: A tuple of found [(point, score), ...] Raises: IOError: when file read error ''' #本函数来自于 https://github.com/NetEaseGame/aircv ,做了一定修改 method = TM_CCOEFF_NORMED if rgb: s_bgr = cvsplit(im_search) # Blue Green Red i_bgr = cvsplit(im_source) weight = (0.3, 0.3, 0.4) resbgr = [0, 0, 0] for i in range(3): # bgr resbgr[i] = matchTemplate(i_bgr[i], s_bgr[i], method) res = resbgr[0] * weight[0] + resbgr[1] * weight[1] + resbgr[ 2] * weight[2] else: s_gray = cvtColor(im_search, COLOR_BGR2GRAY) i_gray = cvtColor(im_source, COLOR_BGR2GRAY) # 边界提取(来实现背景去除的功能) if bgremove: s_gray = Canny(s_gray, 100, 200) i_gray = Canny(i_gray, 100, 200) res = matchTemplate(i_gray, s_gray, method) w, h = im_search.shape[1], im_search.shape[0] min_val, max_val, min_loc, max_loc = minMaxLoc(res) top_left = max_loc if max_val < threshold: return None # calculator middle point middle_point = (top_left[0] + w / 2, top_left[1] + h / 2) result = dict(result=middle_point, rectangle=(top_left, (top_left[0], top_left[1] + h), (top_left[0] + w, top_left[1]), (top_left[0] + w, top_left[1] + h)), confidence=max_val) return result
def _find_edges(im): gray = cvtColor(im, COLOR_BGR2GRAY) # Apply histogram equalization - The parameters below are good clahe = createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) gray = clahe.apply(gray) gray = GaussianBlur(gray, (5, 5), 0) return Canny(gray, 75, 200)
def EdgeFilter(image, sigma=0.33): ''' Edge Image Filter ''' gray = convertQImageToMat(image) v = np.median(gray) lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) canny = Canny(gray, lower, upper) return convertMatToQImage(canny)
def is_interesting(self, image): gray = cvtColor(image, COLOR_BGR2GRAY) gray = bilateralFilter(gray, 11, 17, 17) edged = Canny(gray, 30, 200) if np.sum(edged[:, :]**2) < 2500: return False else: return True
def detect(image): """Detect marker from the camera image""" markers = [] # Stage 1: Detect edges in image gray = cvtColor(image, COLOR_BGR2GRAY) clahe = createCLAHE(clipLimit=1, tileGridSize=(6, 6)) cl1 = clahe.apply(gray) _, thresh = threshold(cl1, 60, 255, THRESH_OTSU) blurred = GaussianBlur(thresh, (5, 5), 0) edges = Canny(blurred, 75, 100) # Stage 2: Find contours contours = findContours(edges, RETR_TREE, CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=contourArea, reverse=True)[:] for contour in contours: # Stage 3: Shape check perimeter = arcLength(contour, True) approx = approxPolyDP(contour, 0.01 * perimeter, True) if len(approx) == QUADRILATERAL_POINTS: area = contourArea(approx) # (x, y, w, h) = boundingRect(approx) # ar = float(h) / float(w) # if area > 100 and ar >= 0.8 and ar <= 1.2: if area > 700: # putText(image, str(area), (10, 30), FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) drawContours(image, [contour], -1, (0, 255, 0), 1) # Stage 4: Perspective warping topdown_quad = get_topdown_quad(thresh, approx.reshape(4, 2)) # Stage 5: Border check if topdown_quad[int((topdown_quad.shape[0] / 100.0) * 5), int((topdown_quad.shape[1] / 100.0) * 5)] > BLACK_THRESHOLD: continue # Stage 6: Get marker pattern marker_pattern = None try: marker_pattern = get_marker_pattern( topdown_quad, THRESHOLD_PERCENT) except: continue if not marker_pattern: continue # Stage 7: Match marker pattern marker_found, marker_rotation, marker_name = match_marker_pattern( marker_pattern) if marker_found: markers.append([marker_name, marker_rotation]) return markers, image
def preprocess(imgOriginal, PreprocessCvcSel, PreprocessMode, PreprocessGaussKernel, PreprocessThreshBlockSize, PreprocessThreshweight, PreprocessMorphKernel, PreprocessMedianBlurKernel, PreprocessCannyThr): """ CSC, Contrast stretch (morph.), Blurring and Adaptive-Threshold """ # Color-Space-Conversion (CSC): switch from BGR to HSV and take the requested component: imgHSV = cvtColor(imgOriginal, COLOR_BGR2HSV) imgHSV_H, imgHSV_S, imgHSV_V = split(imgHSV) if PreprocessCvcSel == "H": imgGrayscale = imgHSV_H elif PreprocessCvcSel == "S": imgGrayscale = imgHSV_S elif PreprocessCvcSel == "V": imgGrayscale = imgHSV_V else: error("Unsupported PreprocessCvcSel mode: %s" % PreprocessCvcSel) # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. if PreprocessMode == "Legacy": # Increase Contrast (morphological): imgMaxContrastGrayscale = maximizeContrast(imgGrayscale, PreprocessMorphKernel) # Blurring: imgBlurred = GaussianBlur(imgMaxContrastGrayscale, PreprocessGaussKernel, 0) # Adaptive Threshold: imgThresh = adaptiveThreshold(imgBlurred, 255.0, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, PreprocessThreshBlockSize, PreprocessThreshweight) # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. elif PreprocessMode == "BlurAndCanny": # Blurring: imgBlurred = medianBlur(imgGrayscale, PreprocessMedianBlurKernel) # Canny Edge Detection: imgThresh = Canny(imgBlurred, PreprocessCannyThr / 2, PreprocessCannyThr) # -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. else: error("Unsupported PreprocessMode mode: %s" % PreprocessMode) imgGrayscale = imgBlurred return imgGrayscale, imgThresh
def get_bounding_box(im, add_border=False, sortby="contourArea"): if add_border: edges = Canny(FilterBased._add_border(im), 75, 200) else: edges = Canny(im, 75, 200) if len(edges) == 0 or edges is None: raise NoImprovementFound("No edges found") cnts = findContours(edges, RETR_LIST, CHAIN_APPROX_SIMPLE) boxes = map(FilterBased._get_box, cnts[1]) if sortby == "contourArea": areas = map(contourArea, cnts[1]) elif sortby == "box_area": areas = map(contourArea, boxes) else: raise ValueError("Unknown value {} for sortby".format(sortby)) return boxes[areas.index(max(areas))]
def canny_edges(self, low_threshold: int, high_threshold: int, image=None) -> ndarray: ''' Applies the Canny transform :param image: numpy.ndarray :param low_threshold: lower bound :param high_threshold: upper bound :return: <numpy.ndarray> ''' if image is None: self.image_tf = Canny(self.image_tf, low_threshold, high_threshold) return self.image_tf else: assert issubclass( ndarray, type(image)), 'image must be <numpy.ndarray>, for canny edges' return Canny(image, low_threshold, high_threshold)
def auto_canny(self, image, sigma=0.33): # compute the median of the single channel pixel intensities v = median(image) # apply automatic Canny edge detection using the computed median lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edged = Canny(image, lower, upper) # return the edged image return edged
def get_edged(self, G): gray = self.gray_image(self.image) blur = self.get_blurred(gray, G) v = median(blur) #th = adaptiveThreshold(blur, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV,11,2) lower = int(max(0, (1.0 - 0.33) * v)) upper = int(max(255, (1.0 + 0.33) * v)) canny = Canny(gray, lower, upper) return dilate(canny, ones((5,5), uint8), iterations = 1)
def generate_canny_tensor(frames, num_frames_per_tensor): if len(frames) < num_frames_per_tensor: raise Exception('Not enough frames to generate tensors. Please decrease |NUM_FRAMES_PER_TENSOR|.') tensor_frames = [] uniform_dispersion = np.linspace(0, len(frames) - 1, num = num_frames_per_tensor) for i in uniform_dispersion: frame = cvtColor(frames[int(i)], COLOR_RGB2GRAY) frame = Canny(frame, 100, 200)[:,:,np.newaxis] tensor_frames.append(frame) tensor_frames = np.stack(tensor_frames) return tensor_frames
def callback(self, data): cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") gray_img = cvtColor(cv_image, COLOR_BGR2GRAY) print mean(gray_img) img2 = blur(gray_img, (3, 3)) #imshow("blur", img2) img3 = Canny(gray_img, 10, 200) #imshow("canny", img3) imshow("Image window", cv_image)
def get_and_show_edges_from_computed_frame(self): ''' Judge whether to show frame here, because otherwise we lose the frame by setting computed_frame to the edges. ''' edges = Canny(self.computed_frame, 100, 200) edges_rgb = cvtColor(edges, COLOR_GRAY2RGB, edges) if self.showing_frame: self.computed_frame = add(self.computed_frame, edges_rgb) if not self.showing_frame: self.computed_frame = edges_rgb
def reproduce_edge_map(): im = read_image('../img/tek-kimia-03-hand-hi-contrast.png', grey=True, uint8=True) imsml = resize(im, (310, 300), anti_aliasing=True) imsml = np.uint8(imsml * (255/np.max(imsml))) for n in np.arange(120,420,20): p = np.invert(Canny(imsml, 100, n)) save_image(p, (8,8), f'../img/canny/hand-edge-100-{n}.png') call(['convert', '-delay', '20', '../img/canny/hand-edge-*.png', '../img/canny/hand-edge-100-anim.gif']) return
def loop(): for foo in camera.capture_continuous(stream, format='bgr', resize=(640, 480), use_video_port=True): #TODO: check mph and if off by a bit accelerate/decelerate #TODO: write clean acceleration and breaking methods to import from another file. maybe have varying degrees of acceleration or breaking. #to read in mph: need try except because arduino sometimes gives null values # try: # if ser.readline().split()[1] < 2 & braking == False: # thr.start() # except: # pass if input(brake_pin): global braked if not braked: print "breaking" dobrake(speed=1000, time_on=1.5) braked = True kill() stream.truncate() stream.seek(0) #TODO: disengage accelerator motor continue ret3, frame = threshold( bilateralFilter(cvtColor(stream.array, COLOR_BGR2GRAY), 12, 70, 70), 0, 255, THRESH_BINARY + THRESH_OTSU) edges = Canny(frame, 1000, 1000, 5) lines = HoughLines(edges, 1, pi / 180, 100) if lines is None: kill() stream.truncate() stream.seek(0) continue thetas = array([theta for rho, theta in lines[0]]) theta_filtered = thetas[where((thetas >= 0) & (thetas <= pi))] #use list of thetas to figure out degrees to turn if (average(theta_filtered) < half): radians_to_turn = half - average(thetas) print "turning %d degrees right" % degrees(radians_to_turn) turn(degrees(radians_to_turn), dir=True) else: radians_to_turn = average(thetas) - half print "turning %d degrees left" % degrees(radians_to_turn) turn(degrees(radians_to_turn), dir=False) stream.truncate() stream.seek(0)
def ToEdgeByAccuratey(img, rati=3, canny=(50, 100), deg=5): grayImg = cvtColor(img, cv2.COLOR_BGR2GRAY) gauImg = GaussianBlur(grayImg, (rati,rati), 3) cannyImg = Canny(gauImg, canny[0], canny[1]) kernel = np.ones((deg,deg), np.uint8) # openingImg = cv2.morphologyEx(cannyImg, cv2.MORPH_OPEN, kernel) dilateImg = dilate(cannyImg, kernel, iterations =1) ret, thr = threshold(dilateImg, 127, 255, 0) return thr
def EdgeFilter(image, sigma=0.33): """Edge Image Filter @type image: QImage @param image: @return: QImage """ gray = convertQImageToMat(image) v = np.median(gray) lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) canny = Canny(gray, lower, upper) return convertMatToQImage(canny)
def match(img_path1, img_path2): img1_raw = imread(img_path1, 1) img2_raw = imread(img_path2, 1) img1 = Canny(img1_raw, 100, 300) img2 = Canny(img2_raw, 100, 300) if img2.size > img1.size: res = matchTemplate(img2, img1, TM_CCORR_NORMED) img_name = os.path.split(img_path2)[-1] tmpl_name = os.path.split(img_path1)[-1] else: res = matchTemplate(img1, img2, TM_CCORR_NORMED) img_name = os.path.split(img_path1)[-1] tmpl_name = os.path.split(img_path2)[-1] if numpy.median(res) > MATCH_THRESHOLD: min_val, max_val, min_loc, max_loc = minMaxLoc(res) return u"'%s' appears to be cropped from '%s' at %s\n" % (tmpl_name, img_name, max_loc) else: return u"'%s' does not appear to be cropped from '%s'\n" % (tmpl_name, img_name)
def sketch_f2(input_image): # 1. Convert the RGB image from webcame to Grayscale. gray_image = cvtColor(input_image, COLOR_RGB2GRAY) # 2. Apply Gaussian blur to the grayscaled image. blurred_image = GaussianBlur(gray_image, ksize=(5, 5), sigmaX=0) # 3. Applying Canny edge detection algorithm to the blurred image. canny_edges = Canny(blurred_image, threshold1=10, threshold2=70) # 4. Applying binary thresholding. _, mask = threshold(canny_edges, 70, 255, THRESH_BINARY_INV) return mask
def cut_query_img_with_finger(img): imageYCrCb = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2YCR_CB) # skin detection min_YCrCb = np.array([0, 133, 77], np.uint8) max_YCrCb = np.array([255, 173, 127], np.uint8) skinRegionYCrCb = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb) # preprocessing kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)) skinRegionYCrCb = cv2.erode(skinRegionYCrCb, kernel, iterations=2) skinRegionYCrCb = cv2.dilate(skinRegionYCrCb, kernel, iterations=2) skinRegionYCrCb = cv2.GaussianBlur(skinRegionYCrCb, (3, 3), 0) image_canny_filter = Canny(image=img.astype('uint8'), threshold1=20, threshold2=40) image_canny_filter = (image_canny_filter < 100).astype(np.int32) # cv2.imwrite(osp.join(OUTPUT_PATH, '{}_{}_canny_as_filter.jpg'.format(item.split('.')[0], 'skin')), image_canny_filter*255) image_canny_filter_left = np.roll(image_canny_filter, 1, axis=0) image_canny_filter_right = np.roll(image_canny_filter, -1, axis=0) image_canny_filter_up = np.roll(image_canny_filter, 1, axis=1) image_canny_filter_down = np.roll(image_canny_filter, -1, axis=1) # image_sobel = image_sobel * image_canny_filter * image_canny_filter_left * image_canny_filter_right * image_canny_filter_up * image_canny_filter_down image_sobel = skinRegionYCrCb * image_canny_filter * image_canny_filter_left * image_canny_filter_right * image_canny_filter_up * image_canny_filter_down # largest island image_sobel = label(image_sobel) image_sobel = image_sobel == np.argmax(np.bincount( image_sobel.flat)[1:]) + 1 # postprocessing image_sobel = image_sobel.astype(np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) image_sobel = cv2.dilate(image_sobel, kernel, iterations=2) image_sobel = cv2.GaussianBlur(image_sobel, (3, 3), 0) image_sobel = image_sobel.astype(np.int32) if np.count_nonzero(image_sobel) <= NO_FIGURE_PIXEL_NUM_THRESHOLD: return img else: points_x, points_y = np.where(image_sobel == 1) top_line_index = np.min(points_x) bot_line_index = np.max(points_x) if bot_line_index <= 0.95 * img.shape[0]: return img else: mid_y = np.mean(points_y) left_y = max(0, int(mid_y - 0.3 * img.shape[1])) right_y = min(img.shape[1], int(mid_y + 0.3 * img.shape[1])) if top_line_index - int(0.65 * img.shape[0]) >= 0: upper_line = top_line_index - int(0.65 * img.shape[0]) else: upper_line = 0 return img[upper_line:top_line_index, left_y:right_y, :]
def step(self, sess, action, p_info, evaluation=False): if p_info and p_info['p1_X'] - p_info['p2_X'] > 0: if action == 3: action = 4 elif action == 4: action = 3 elif action == 8: action = 9 elif action == 9: action = 8 elif action == 10: action = 11 elif action == 11: action = 10 observation, reward, done, info = self.env.step(action) if info['p1_X'] - info['p2_X'] > 0: observation = flip(observation, 1) p1_round_wins = info['p1_round_wins'] p2_round_wins = info['p2_round_wins'] if info['p1_hp'] != 0 and info['p2_hp'] != 0: reward = info['p1_hp'] - info['p2_hp'] if p1_round_wins > self.p1_current_wins: self.p1_current_wins = p1_round_wins reward = 10000 if p2_round_wins > self.p2_current_wins: self.p2_current_wins = p2_round_wins if not evaluation: if p1_round_wins == 1 or p2_round_wins == 1: done = True if format(info['timer'], '02x') == '00' and info['p1_hp'] == info['p2_hp']: done = True processed_observation = self.frame_processor.process(sess, observation) processed_observation = Canny(processed_observation, 100, 200)[:, :, np.newaxis] new_state = np.append(self.state[:, :, 1:], processed_observation, axis=2) self.state = new_state return processed_observation[:, :, 0], observation, info, reward, done
def image_callback(self, data): namedWindow("Image window") namedWindow("masked") namedWindow("canny") cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") mask = inRange(cv_image, (0, 100, 0), (255, 255, 255)) imshow("mask", mask) gray_img = cvtColor(cv_image, COLOR_BGR2GRAY) img3 = Canny(gray_img, 10, 200) imshow("canny", img3) imshow("Image window", cv_image) waitKey(1)
def image_callback(self, data): namedWindow("Image window") namedWindow("masked") namedWindow("canny") cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") cv_image = resize(cv_image, None, fx=0.2, fy=0.2, interpolation = INTER_CUBIC) mask = inRange(cv_image, (0, 150, 150), (255, 255, 255)) imshow("masked", mask) gray_img = cvtColor(cv_image, COLOR_BGR2GRAY) img3 = Canny(gray_img, 10, 200) imshow("canny", img3) imshow("Image window", cv_image) waitKey(1)
def edges_detection(img, minVal, maxVal): """Preprocessing (gray, thresh, filter, border) + Canny edge detection.""" img = cvtColor(Page.resize(img), COLOR_BGR2GRAY) img = bilateralFilter(img, 9, 75, 75) img = adaptiveThreshold(img, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 115, 4) img = medianBlur(img, 11) img = copyMakeBorder(img, 5, 5, 5, 5, BORDER_CONSTANT, value=[0, 0, 0]) return Canny(img, minVal, maxVal)
def auto_canny(image, sigma=0.4): """ Zero parameter automatic Canny edge detection courtesy of https://www.pyimagesearch.com - use a specified sigma value (taken as 0.4 from Dekel et al. at Google Research, CVPR 2017) to compute upper and lower bounds for the Canny algorithm along with the median of the image, returning the edges. See the post at the following URL: https://www.pyimagesearch.com/2015/04/06/zero-parameter- automatic-canny-edge-detection-with-python-and-opencv/ """ v = np.median(image) lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edged = Canny(image, lower, upper) return edged
def find_possible_contours(self, frame, consecutive_skips): self.diff = absdiff(frame, self.background) _, self.diff = threshold(self.diff, self.diff_thresh, 1, THRESH_BINARY) diff_raw = self.diff.copy() self.diff = self.diff * self.border_mask edges = Canny(self.diff.astype(np.uint8), self.cth1, self.cth2) contours, hier = findContours(edges, RETR_EXTERNAL, CHAIN_APPROX_TC89_L1) #contours = [c for c in contours if not any([pa.contains_point(contour_center(c)) for pa in self.paths_ignore])] if consecutive_skips > self.consecutive_skip_threshold: consecutive_skips = 0 possible = contours else: possible = [ c for c in contours if dist(contour_center( c), self.last_center) < self.translation_max ] return possible, diff_raw
def main(): img = imread("../data/dave.jpg") gray = cvtColor(img,COLOR_BGR2GRAY) edges = Canny(gray,50,150,apertureSize = 3) width, height = edges.shape print("Running Python Hough Transform..\n") t = timer() accumulator, thetas, rhos, acc_votes = houghTransform(edges,1,1,200) py = timer()-t print("Execution completed in %f seconds.\n"%(py)) print("Running Cython Hough Transform..\n") t = timer() accumulator, thetas, rhos, acc_votes = fastHough(edges,width,height,1,1,200) cy = timer()-t print("Execution completed in %f seconds.\n"%(cy)) print("Cython v1 code is %.3f times faster"%(py/cy)) print("Running Cython Hough Transform Version 2..\n") t = timer() xi,yi = np.nonzero(edges) accumulator, thetas, rhos, acc_votes = fastHoughv2(edges,width,height,1,1,200) cy = timer()-t print("Execution completed in %f seconds.\n"%(cy)) print("Cython v2 code is %.3f times faster"%(py/cy))
def runIncrease(): images_to_increase = [ "data/0001.jpg", "data/0002.jpg", "data/0003.jpg", "data/0004.jpg", "data/0005.jpg", "data/0006.jpg", "data/0007.jpg", "data/0008.tif", "data/0009.jpg", ] scale_increase = 2 for image in images_to_increase: OG = plt.imread(image) plt.subplot(2, 3, 1) plt.xticks([]) plt.yticks([]) plt.imshow(OG) plt.subplot(2, 3, 4) plt.xticks([]) plt.yticks([]) edges = Canny(OG, 100, 200) plt.imshow(edges, cmap='gray') og_height = OG.shape[0] og_width = OG.shape[1] new_height_increase = math.floor(OG.shape[0] * scale_increase) new_width_increase = math.floor(OG.shape[1] * scale_increase) if (len(OG.shape) < 3): nn_resize = np.zeros((new_height_increase, new_width_increase)) id_resize = np.zeros((new_height_increase, new_width_increase)) else: nn_resize = np.zeros( (new_height_increase, new_width_increase, OG.shape[2]), dtype=np.uint8) id_resize = np.zeros( (new_height_increase, new_width_increase, OG.shape[2]), dtype=np.uint8) nn_resize = nearest_neighbour(OG, nn_resize, new_width_increase, new_height_increase, scale_increase) plt.subplot(2, 3, 2) plt.xticks([]) plt.yticks([]) plt.imshow(nn_resize) plt.subplot(2, 3, 5) plt.xticks([]) plt.yticks([]) edges = Canny(nn_resize, 100, 200) plt.imshow(edges, cmap='gray') id_resize = interpolate_bilinear(OG, og_width, og_height, id_resize, new_width_increase, new_height_increase) plt.subplot(2, 3, 3) plt.imshow(id_resize) plt.xticks([]) plt.yticks([]) plt.subplot(2, 3, 6) plt.xticks([]) plt.yticks([]) edges = Canny(id_resize, 100, 200) plt.imshow(edges, cmap='gray') plt.show() im = Image.fromarray(nn_resize) im.save("nn_" + image) im = Image.fromarray(id_resize) im.save("id_" + image)