def process(self, image): """ Use the Fast nl Means Denoising algorithm from the opencv package to the current image Args: | *image* : image instance """ channels = cv2.split(image) if self.channel1.value: channels[0] = cv2.fastNlMeansDenoising(channels[0], self.filter_strength.value, self.template_window_size.value*2+1, self.search_window_size.value*2+1) if self.channel2.value: channels[1] = cv2.fastNlMeansDenoising(channels[1], self.filter_strength.value, self.template_window_size.value*2+1, self.search_window_size.value*2+1) if self.channel3.value: channels[2] = cv2.fastNlMeansDenoising(channels[2], self.filter_strength.value, self.template_window_size.value*2+1, self.search_window_size.value*2+1) self.result = cv2.merge(channels)
def test_process_whole_channels(self): obj1 = AlgBody() obj2 = AlgBody() obj2.integer_sliders[0].set_value(4) obj2.integer_sliders[1].set_value(9) obj2.float_sliders[0].set_value(10.0) test_image = cv2.imread("NEFI1_Images/p_polycephalum.jpg") input = [test_image] ref_image1 = cv2.fastNlMeansDenoising(test_image,1.0,7,21) ref_image2 = cv2.fastNlMeansDenoising(test_image,10.0,9,19) obj1.process(input) obj2.process(input) h,w,d = obj1.result["img"].shape for i in range(h): for j in range(w): for k in range(d): test_val1 = obj1.result["img"].item(i,j,k) test_val2 = obj2.result["img"].item(i,j,k) ref_val1 = ref_image1.item(i,j,k) ref_val2 = ref_image2.item(i,j,k) diff1 = abs(test_val1-ref_val1) diff2 = abs(test_val2-ref_val2) # Less equal due to numerical issues when bilateral filter is processed on the whole color image self.assertEqual(diff1,0) self.assertEqual(diff2,0)
def nl_means_for_each_color(img, param_h, patch_size, search_size): """ 色毎にnl-meansを実行 """ dst = np.zeros(img.shape, np.uint8) dst[:, :, 0] = cv2.fastNlMeansDenoising(img[:, :, 0], param_h, patch_size, search_size) dst[:, :, 1] = cv2.fastNlMeansDenoising(img[:, :, 1], param_h, patch_size, search_size) dst[:, :, 2] = cv2.fastNlMeansDenoising(img[:, :, 2], param_h, patch_size, search_size) return dst
def process(self, args): """ Use the Fast nl Means Denoising algorithm from the opencv package to the current image. Args: | *args* : a list of arguments, e.g. image ndarray """ def fastNLMeans(chnls): """ Fast NL-Means Denoising cv2 filter function Args: *chnls* (ndarray) -- image array Returns: result of cv2.fastNLMeansDenoising """ return cv2.fastNlMeansDenoising(chnls, self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) if (len(args[0].shape) == 2): self.result['img'] = cv2.fastNlMeansDenoising(args[0], self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) else: channels = cv2.split(args[0]) if all([self.channel1.value, self.channel2.value, self.channel3.value]): self.result['img'] = fastNLMeans(args[0]) else: if self.channel1.value: val = cv2.fastNlMeansDenoising(channels[0], self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) channels[0] = val if self.channel2.value: val = cv2.fastNlMeansDenoising(channels[1], self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) channels[1] = val if self.channel3.value: val = cv2.fastNlMeansDenoising(channels[2], self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) channels[2] = val self.result['img'] = cv2.merge(channels)
def medianFilter(path, i=0): import cv2 spath = path+'Batch260615-001-#17-100FPS-50mlPmin-2-%04d.png' %i # spath = path+'Check\\imgSubbed-%d.jpg' %i if os.path.exists(spath): print("File found!") else: print("File not found!") print(os.listdir(path)) img = cv2.imread(spath,0) im1 =img.copy() im2 = img.copy() for ii in range(40): im1 = cv2.medianBlur(im1, ksize=5) if ii%2 == 0: im2 = cv2.medianBlur(im2, ksize=1+ii) cv2.imshow('im1', im1) cv2.imshow('im2', im2) # cv2.waitKey(5) # img = cv2.imread('die.png') print('Denoising') dst = cv2.fastNlMeansDenoising(img,None,10,7,21) cv2.imshow('img', img) cv2.imshow('dst', dst) cv2.imwrite(path + 'img.jpg', img) cv2.imwrite(path + 'dst.jpg', dst) cv2.waitKey(5) print('finished')
def image_preprocessing(imageName): # Open the screenshot png image as a grayscale image img = cv2.imread( imageName, 0 ) # Record the dimensions of the image height, width = img.shape # Denoise the image img = cv2.fastNlMeansDenoising( img, 10, 10, 7, 21 ) # We enlarge the image to the following dimensions: 200 pixels in height # Note that we resize images while maintaining the aspect ratio. if float( height ) != 100: baseheight = 100 # Determine the percentage of the new basewidth from the original width hpercent = ( baseheight / float( height ) ) # Multiply the original height by the width percentage wsize = int( (float(width * float(hpercent))) ) # Resize the image based on the basewidth and new height img = cv2.resize(img, (int(wsize), int(baseheight))) # Perform binarization of the image (covert to black and white) ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY) # Save and replace the old image with the new image cv2.imwrite( imageName, img) # Now sharpen the image and improve resolution with PIL im = Image.open( imageName ) # Save the image with higher resolution im.save(imageName, dpi=(600,600)) # Wait for image to save time.sleep(10)
def adaptive_threshold(image_gray, blur=True, verbose=False): if verbose: print "Thresholding" if blur: img = cv2.medianBlur(image_gray, 3) img = cv2.fastNlMeansDenoising(img, None, 10, 7, 21) return cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
def calculate_cell_count(frame): # print "calculating" # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) blur = cv2.medianBlur(frame,7) blurdst = cv2.fastNlMeansDenoising(blur,10,10,7,21) # half_show('dst', dst) # half_show('blur', blur) # half_show('dstblur', dstblur) # half_show('blurdst', blurdst) # cv2.waitKey(0) grey = cv2.cvtColor(blurdst, cv2.COLOR_RGB2GRAY); circles = cv2.HoughCircles(grey,cv2.cv.CV_HOUGH_GRADIENT,1,20,param1=15,param2=5,minRadius=2,maxRadius=10) if circles is None: return (0,None) filteredcircles = [] for i in circles[0,:]: if i[0]>150: filteredcircles.append(i) for i in filteredcircles: cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),3) # draw the outer circle cv2.circle(blurdst,(i[0],i[1]),i[2],(0,255,0),1) # draw the outer circle cv2.circle(blurdst,(i[0],i[1]),2,(0,0,255),3) # draw the center of the circle numCells = len(filteredcircles) # half_show("preview", frame) # half_show("preview-blue", blurdst) # cv2.waitKey(100) # print "circles: ", filteredcircles return (numCells,filteredcircles)
def revert_image(image_path): """ 将一张灰度图片进行锐化、二值化、黑白变换、边界切割 """ # img_array = cv2.imread(image_path) # img_array = cv2.fastNlMeansDenoising(img, None, 3, 7, 21) img = Image.open(image_path) filter_img = img.filter(ImageFilter.EDGE_ENHANCE) img_array = np.array(filter_img) img_array = cv2.fastNlMeansDenoising(img_array, None, 10, 7, 21) source_height = img_array.shape[0] source_width = img_array.shape[1] source_mean = img_array.mean() expand = get_expand(source_width) img_array = cv2.resize(img_array, (int(source_width*expand), int(source_height*expand)), interpolation=cv2.INTER_CUBIC) # dilate_img = dilate(img_array) threshold_img = binary(img_array, source_mean=source_mean) revert_array = cv2.bitwise_not(threshold_img) surround_img = surround(revert_array) cut_image = cut_image_from_array(surround_img) return cut_image
def _preprocessing(self): """Preprocessing done on the picture in order to facilitate the computation (denoising and threshold) """ self.img = cv2.fastNlMeansDenoising(self.img, None, 10, 7, 21) self.img = cv2.adaptiveThreshold(self.img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
def filter_image(img, itr=10, temp=7, search=21): """ Converts image to cielab. Filters the L-channel with means denoising. Returns the filtered image. """ img_cielab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) img_l = img_cielab[:,:,0] img_filtered = cv2.fastNlMeansDenoising(img_l,None,itr,temp,search) return img_filtered
def cleanDenoise(self, processed): self.newStep() options = self.options clean = cv2.fastNlMeansDenoising(numpy.array(processed), None, 17, 4, 7) # Moldcell processed = Image.fromarray(numpy.uint8(clean)) if options.verbose: processed.save(options.output_dir + '/{}-denoise.png'.format(self.getStep())) return processed
def random_crop_fft(img, CROP_SIZE=256): nr, nc = img.shape[:2] r1, c1 = np.random.randint(nr-CROP_SIZE), np.random.randint(nc-CROP_SIZE) img1 = img[r1:r1+CROP_SIZE, c1:c1+CROP_SIZE, :] #img1 -= cv2.GaussianBlur(img1, (3,3), 0) #img1 -= cv2.medianBlur(img1, 3) #img1 -= cv2.blur(img1, (3,3)) for chan in range(3): img1[:,:,chan] -= cv2.fastNlMeansDenoising(img1[:,:,chan], None, 15, 5, 11) #img1 -= cv2.fastNlMeansDenoisingColored(img1, None, 10, 10, 7, 21) sf = np.stack([np.abs( np.fft.fftshift(np.fft.fft2(img1[:,:,c])) ) for c in range(3)], axis=-1) return np.abs(sf)
def fastNlMeansDenoising(image, h=float(3), template_window_size=7, search_window_size=21): """The function performs denoising using the Non-local Means Denoising algorithm provided by the OpenCV2 library. :Parameters: :Returns: :Notes: """ return numpy.int_(cv2.fastNlMeansDenoising( image, h, template_window_size, search_window_size))
def edgeDetection(cropImg,name,path): # Apply a Canny to the crop Img edges = cv2.Canny(cropImg,0,100) #cpy img canny = edges.copy() # Kernel to apply dilation to canny kernel = np.ones((3,3),np.uint8) """ (thresh, canny) = cv2.threshold(edges, 150, 255, cv2.THRESH_BINARY) """ # Dilating image to see borders clearer cv2.fastNlMeansDenoising(canny, canny, 3, 7, 21) dilation = cv2.dilate(canny,kernel,iterations = 1) # Storing canny edges images cv2.imwrite(path+'\CannyEdges\CannyEdges'+name,dilation) # Parameters to use Sobel filter scale = 5 delta = 0 ddepth = cv2.CV_16S # X_Sobel grad_x = cv2.Sobel(cropImg,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT) edges = cv2.convertScaleAbs(grad_x) (thresh, sobel) = cv2.threshold(edges, 150, 255, cv2.THRESH_BINARY) kernel = np.ones((3,3),np.uint8) cv2.fastNlMeansDenoising(edges, edges, 3, 7, 21) # Dilating sobel dilation = cv2.dilate(sobel,kernel,iterations = 1) # Storing images cv2.imwrite(path+'\SobelEdges\SobelEdges'+name,dilation)
def light_normalization(img): """ Normalizes light conditions in the given B&W image """ #Histogram equalization img = cv.equalizeHist(img) #Gamma correction with factor 0.8 (smaller factors -> more bright) img = img/255.0 img = cv.pow(img,0.8) img = np.uint8(img*255) img = cv.fastNlMeansDenoising(img,10,10,7,21) #Gaussian filter to smooth img = cv.GaussianBlur(img,(3,3),0) return img
def cv_edge_detect(image): """Use OpenCV2 to perform Canny Edge detection on an input image. Returns as a new pygame surface.""" cv_surface = cv2.imread(image, cv2.CV_LOAD_IMAGE_COLOR) cv_surface = cv2.cvtColor(cv_surface, cv2.COLOR_BGR2GRAY) cv_surface = cv2.fastNlMeansDenoising(cv_surface) edges = cv2.Canny(cv_surface, 100, 100, apertureSize=5) edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) new_surface = pygame.image.frombuffer( edges.tostring(), edges.shape[1::-1], 'RGB' ) new_surface.set_colorkey(0x000000) return new_surface
def cleanGeneralDenoise(self, processed): self.newStep() options = self.options processed = processed.convert("LA") processed = processed.convert("P") clean = cv2.fastNlMeansDenoising(numpy.array(processed), None, 17, 5, 7) processed = Image.fromarray(numpy.uint8(clean)) if options.verbose: processed.save(options.output_dir + '/{}-general-denoise.png'.format(self.getStep())) return processed
def fastNLMeans(chnls): """ Fast NL-Means Denoising cv2 filter function Args: *chnls* (ndarray) -- image array Returns: result of cv2.fastNLMeansDenoising """ return cv2.fastNlMeansDenoising(chnls, self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1)
def process(self, args): if (len(args[0].shape) == 2): self.result['img'] = cv2.fastNlMeansDenoising(args[0], self.f_strength.value, self.template_size.value*2+1, self.search_size.value*2+1) else: ts = self.template_size.value*2+1 ss = self.search_size.value*2+1 result = cv2.fastNlMeansDenoisingColored(src=args[0], h=self.f_strength.value, hColor=self.f_col.value, templateWindowSize=ts, searchWindowSize=ss) self.result['img'] = result
def extract_text(self): # Convert to gray binarized = cv2.cvtColor(self.text_section, cv2.COLOR_BGR2GRAY) # Filter the noise binarized = cv2.fastNlMeansDenoising(binarized, h=8, searchWindowSize=50) # binarized = cv2.medianBlur(binarized, 11) # binarized = cv2.adaptiveThreshold(binarized, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 4) # Detect the black lines (actually detect well text too) binarized = detect_riddes(binarized) # Clean up to only keep the vertical and horizontal lines mask = get_line_mask(binarized) + get_line_mask(binarized, num_iterations=3, vertical=True) # Close the boundaries header_mask = close_lines(mask) # Get the area contours contours = get_enclosing_contours(255-header_mask) if len(contours) != 8: self.document_info.logger.warning('Number of area contours unusual : {}'.format(len(contours))) self._area_contours = contours # Get the text contours self._extracted_data = [] for i in range(len(contours)): tmp_cnt_mask = cv2.drawContours(np.zeros_like(binarized), contours, i, 1, cv2.FILLED) txt_mask = binarized*tmp_cnt_mask txt_mask = cv2.morphologyEx(txt_mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))) txt_mask = cv2.morphologyEx(txt_mask, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (80, 80))) inside_contours = get_enclosing_contours(txt_mask) for cnt in inside_contours: if cv2.contourArea(cnt) > 600: bb = cv2.boundingRect(cnt) _, _, w, h = bb if h > 25 and w > 25: self._extracted_data.append(TextFragment(i, bb)) self.document_info.logger.debug('Number of text boxes : {}'.format(len(self._extracted_data))) # Recognize text margin = 40 for d in self._extracted_data: x, y, w, h = d.box txt_img = self.text_section[ max(y-margin, 0):min(y+h+margin, self.text_section.shape[0]), max(x-margin, 0):min(x+w+margin, self.text_section.shape[1])] d.text = pytesseract.image_to_string(Image.fromarray(txt_img))
def erodeAndDilate(self, processed): self.newStep() options = self.options contr = ImageEnhance.Contrast(processed) processed = contr.enhance(1.2) sharp = ImageEnhance.Sharpness(processed) processed = sharp.enhance(1.1) clean = cv2.fastNlMeansDenoising(numpy.array(processed), None, 33, 3, 7) processed = Image.fromarray(numpy.uint8(clean)) if options.verbose: processed.save(options.output_dir + '/{}-erode-and-dilate.png'.format(self.getStep())) return processed
def denoise(img): #dst = cv2.fastNlMeansDenoisingColored(src=img,dst=None,h=10,h_color=10,templ_win_size==7,win_size=21) #dst=cv2.fastNlMeansDenoising(src=img, dst=None,h=p,templateWindowSize = 7, searchWindowSize = 21 ) plt.subplot(121),plt.imshow(img) i=122 destA=None dst=None for p in xrange(1,10): for t in xrange(1,7): for s in xrange(1,21): destA=dst dst=cv2.fastNlMeansDenoising(src=img, dst=None,h=p,templateWindowSize = t, searchWindowSize = s ) print "p:%d, t:%d, s:%d"%(p,t,s) if destA!=None: print str(dst==destA) cv2.imshow("pepe",dst) cv2.waitKey(2000)
def bg_color_removal(im): import cv2 import numpy as np from PIL import Image #im.save("Sample.png") image = np.array(im) #print (image) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) alpha = 2.5 beta = -0.0 denoised = alpha * gray + beta denoised = np.clip(denoised, 0, 255).astype(np.uint8) denoised = cv2.fastNlMeansDenoising(denoised, None, 31, 7, 21) return Image.fromarray(denoised)
def get_bbox(ann_filepath): # ann = np.genfromtxt(ann_filepath, delimiter=',') contained_classes = [int(i) for i in np.unique(ann).tolist()] # print contained_classes # contour_img = np.zeros((ann.shape[0],ann.shape[1],3), np.uint8) bbox_list = [] for each_class in contained_classes: if each_class==0: #background continue # make BW image img = make_bw_img_for_one_class(each_class, ann) # cv2.imwrite(out_dir+'/noisy_bw_img_'+str(each_class)+'.png',img) # denoise, see http://docs.opencv.org/modules/photo/doc/denoising.html # TODO this denoising makes boundingRect become larger, may need to resize the (final) boundingRect mul = 1 clean_img = cv2.fastNlMeansDenoising(img, None, h=1000, templateWindowSize=7*mul, searchWindowSize=21*mul) # cv2.imwrite(out_dir+'/bw_img_'+str(each_class)+'.png',img) # determine number of objects in this class, using contour noisy_contours, hierarchy = cv2.findContours(clean_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # remove unreasonably small contours # TODO should we use cv2.contourArea(c) min_n_pixel = 75 # a magic number contours = [i for i in noisy_contours if i.shape[0]>=min_n_pixel] # get bbox for each contours local_bbox_list = [] for c in contours: bbox = {} bbox['rectangle'] = cv2.boundingRect(c) bbox['contour'] = c local_bbox_list.append(bbox) bbox_list = bbox_list + local_bbox_list return bbox_list
def find_lowest_horizontal_line(gray_image, mask=None, canny_threshold=50): gray_filtered = cv2.fastNlMeansDenoising(gray_image) edge_map = cv2.Canny(gray_filtered, canny_threshold, 3 * canny_threshold, apertureSize=3) if mask is not None: edge_map = edge_map*mask # plt.imshow(mask) lines = cv2.HoughLines(edge_map, 1, (np.pi / 180)/2, threshold=int(gray_image.shape[1]/4)) horizontal_lines = [] for l in lines[:, 0, :]: if np.pi/2 - np.pi / 12 < l[1] < np.pi/2 + np.pi / 12: horizontal_lines.append(l) def get_y(line): return abs(line[0]) # print(sorted([get_y(h) for h in horizontal_lines])) lowest_y = get_y(sorted(horizontal_lines, key=get_y, reverse=True)[0]) return lowest_y
def main(): # c_main() #cdef c_main(): if len(sys.argv) != 3: print "./binarization_for_ocr.py <in_file> <out_file>" quit() # cdef unicode filename = sys.argv[1] # cdef unicode outfile = sys.argv[2] # cv2.ocl.setUseOpenCL(True) # cdef np.ndarray[DTYPE_t, ndim=2] image_color = cv2.imread(filename, cv2.IMREAD_COLOR) if image_color is None: print "input file is not found" quit() channels = cv2.split(image_color) image = channels[1] # drop blue channel for yellowish books mode_0 = mode(channels[0].flat)[0] mode_1 = mode(channels[1].flat)[0] mode_2 = mode(channels[2].flat)[0] channels[0] = cv2.absdiff(channels[0], mode_0) channels[1] = cv2.absdiff(channels[1], mode_1) channels[2] = cv2.absdiff(channels[2], mode_2) img_diff = cv2.max(channels[0], channels[1]) img_diff = cv2.max(img_diff, channels[2]) img_diff = cv2.fastNlMeansDenoising(img_diff, 100, 7, 21) ### process(image, img_diff, outfile, True, 3)
def update(val): # Get parameter vmin = slider_cutoff.val borderType = borderTypes[int(np.around(slider_bordertype.val)) ] # Border type d = int(np.around(slider_diameter.val)) # Diameter sigma = slider_sigma.val # Sigma sigma_color = slider_sigma_color.val # Sigma color (Bilateral filter) sigma_space = slider_sigma_space.val # Sigma space (Bilateral filter) nlm_h = slider_nlm.val # Apply filters box = cv2.blur(img, (d,d), borderType=borderType) # Box filter gaussian = cv2.GaussianBlur(img, (d,d), sigma, borderType=borderType) # Gaussian median = cv2.medianBlur(img, d) # Median bilateral = cv2.bilateralFilter(img, d, sigma_color, sigma_space, borderType=borderType) # Bilateral kernel_opening = cv2.getStructuringElement(cv2.MORPH_CROSS,(d,d)) opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel_opening) masked = ma.masked_greater(opening, 0) opening2 = np.copy(img) opening2[~masked.mask] = 0 non_local_means = cv2.fastNlMeansDenoising(img, None, h=nlm_h, templateWindowSize=d, searchWindowSize=d) # Show results original.set_clim(vmin, 255.0) box_filter.set_data(box) box_filter.set_clim(vmin, 255.0) gaussian_filter.set_data(gaussian) gaussian_filter.set_clim(vmin, 255.0) median_filter.set_data(median) median_filter.set_clim(vmin, 255.0) bilateral_filter.set_data(bilateral) bilateral_filter.set_clim(vmin, 255.0) opening_filter.set_data(opening) opening_filter.set_clim(vmin, 255.0) opening_filter2.set_data(opening2) opening_filter2.set_clim(vmin, 255.0) nlm_filter.set_data(non_local_means) nlm_filter.set_clim(vmin, 255.0)
def write_clean_image(in_filepath, out_filepath): ''' Applies denoising to the image so that specs, etc. do not show up in the thresholded image. ''' img = cv2.imread(in_filepath, 0) # Read in as grayscale # Third argument controls blur... higher is more blurry img = cv2.fastNlMeansDenoising(img, None, 23, 7 ,21) # Last argument is what you want to adjust img = cv2.adaptiveThreshold( img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 5 ) cv2.imwrite(out_filepath, img)
currentFrame = startFrame + frameNum # current frame position if currentFrame > endFrame: break frame = Image_data[currentFrame] bilat = cv2.bilateralFilter(frame, 7, 75, 75) sharpenBil = cv2.filter2D(bilat, ddepth=-1, kernel=kernelSharp) dilated = cv2.dilate(sharpenBil, smallMorph) eroded = cv2.erode(dilated, largeMorph) dilatedFinal = cv2.dilate(eroded, smallMorph) fastDen = cv2.fastNlMeansDenoising(dilatedFinal, None, 3, 3, 9) sharpNoise = cv2.filter2D(fastDen, ddepth=-1, kernel=kernelSharp) sharpDil = cv2.dilate(sharpNoise, smallMorph) sharpEro = cv2.erode(sharpDil, largeMorph) sharpDilFinal = cv2.dilate(sharpEro, smallMorph) gBlurred = cv2.GaussianBlur(sharpDilFinal, (3, 3), 0) denoisedGray = cv2.fastNlMeansDenoising(gBlurred, None, 8, 7, 21) output = cv2.cvtColor(denoisedGray, cv2.COLOR_GRAY2BGR) if makingVideo is True: newVideo.write(output) elif makingVideo is False: imagename = directory + imagePrefix + str(currentFrame +
# cv2.filterSpeckles() # h, status = cv2.findHomography(initFrame, frame) # ok, bbox = tracker.update(frameDelta) # # if ok: # # Tracking success # p1 = (int(bbox[0]), int(bbox[1])) # p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) # cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1) # else: # # Tracking failure # cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2) rgbGray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY) rgbGray = cv2.fastNlMeansDenoising(rgbGray, None, 7, 21) thresh = cv2.threshold(rgbGray, 25, 255, cv2.THRESH_BINARY)[1] # thresh = cv2.threshold(fgmask, 0, 255, cv2.THRESH_BINARY)[1] # kernel = np.ones((50, 50), np.uint8) # erosion = cv2.dilate(thresh, kernel, iterations=1) # closing = cv2.morphologyEx(thresh, cv2.MORPH_ERODE, kernel) # img_, cnts, hie_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) img_, contours, hie_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(cnt, 2, True) for cnt in contours] contours = [c for c in contours if cv2.contourArea(c) > 200] # thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
def denoising_NlMeans(img): print("[DEBUG] Executing Non Local Means Denoising Algorithm") return cv2.fastNlMeansDenoising(img, None, 10, 7, 21)
all_about_palette.append(all_in_one_palette) all_connected_point = find_connected_point(checkpoint_center, all_about_palette, 200) for cp in all_connected_point: c_field = cv2.circle(c_field, (cp[0][0], cp[0][1]), radius=0, color=(255, 0, 0), thickness=10) c_field = cv2.circle(c_field, (cp[1][0], cp[1][1]), radius=0, color=(255, 0, 0), thickness=10) cv2.imshow('check connected point', c_field) cv2.waitKey(0) cv2.destroyAllWindows() full_path_image = draw_full_path(all_connected_point, all_about_palette, field_image) cv2.imshow('full skeleton path', full_path_image) cv2.waitKey(0) cv2.destroyAllWindows() denoised_gray_field = cv2.fastNlMeansDenoising(field_gray, None, 10, 7, 21) cv2.imshow('test', denoised_gray_field) cv2.waitKey(0) cv2.destroyAllWindows() find_min_max(full_path_image, all_about_palette, denoised_gray_field) full_path_with_height = find_path_height(full_path_image, all_about_palette, denoised_gray_field) from mpl_toolkits import mplot3d fig = plt.figure() ax = plt.axes(projection="3d") x = [] y = [] z = [] for co in full_path_with_height: # for co in co_list: x.append(int(co[1]))
#------------------ sigma_color = 10000 # Sigma color (Bilateral filter) sigma_space = 10 # Sigma space (Bilateral filter) bilateral = cv2.bilateralFilter(tsframe, d, sigma_color, sigma_space, borderType=cv2.BORDER_REPLICATE) #------------------ # Non local means #------------------ nlm_h = 50 non_local_means_8U = cv2.fastNlMeansDenoising(tsframe_8U, None, h=nlm_h, templateWindowSize=d, searchWindowSize=d) non_local_means = non_local_means_8U.astype(np.float32) * (tsframe_max / 255.0) ##################### # Temporal Filtering ##################### #------------------------- # Per taxel Kalman filter #------------------------- from pykalman import KalmanFilter tsframes3D_kalman = np.empty( (height, width, numTSFrames)) # height, width, depth for x in range(width):
while numberOfImage <= 44: infile = r'C:\Users\Mario\Desktop\Testowe\4\DSC_00%s.jpg' % (numberOfImage) outfile = r"C:\Users\Mario\Desktop\Testowe\4\Edited\DSC_%d.jpg" % numberOfImage img1 = cv2.imread(infile, 1) # img1 = cv2.imread(r"C:\Users\Mario\Desktop\test3.jpg", 1) # height, width = img1.shape img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) # img1[:,:,1] = 0 #green to zero plt.imshow(img1) # plt.show() img1 = cv2.fastNlMeansDenoising(img1) plt.imshow(img1) # plt.show() hsv = cv.cvtColor(img1, cv.COLOR_BGR2HSV) green = np.uint8([[[255, 0, 255]]]) hsv_green = cv.cvtColor(green, cv.COLOR_BGR2HSV) print(hsv_green) lower_blue = np.array([0, 0, 200]) upper_blue = np.array([10, 255, 255]) mask = cv.inRange(hsv, lower_blue, upper_blue) res = cv.bitwise_and(img1, img1, mask=mask)
Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1GNkW4K6gyYibV4U39vAc5k6JglY50CAP """ # import Required pacakages import cv2 import numpy as np from matplotlib import pyplot as plt import imutils # input image img = cv2.imread('8.png', 0) # Noise filtering img = cv2.fastNlMeansDenoising(img, None, 10, 7, 21) # For Clearing the shades in the photo dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 21) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) #output the new shadeless image cv2.imwrite('shadows_out.png', diff_img) # normalized image gives higher contrast then normal
# Copyright (c) Peter Majko. """ """ from PIL import Image, ImageOps import pytesseract import cv2 as cv import numpy as np image_path = r'test_3.png' img_rgb = cv.imread(image_path) img_gray = cv.cvtColor(img_rgb, cv.COLOR_RGB2GRAY) img_gray = cv.fastNlMeansDenoising(img_gray, h=25) inv_img = ImageOps.invert(Image.fromarray(img_gray).convert('L')) inv_img.format = "BMP" # img_gray = np.asarray(inv_img, dtype=np.uint8) # img_gray s = pytesseract.image_to_string(inv_img, lang='eng+fra') print(s) # print(s.replace('\n', ' ').replace(',', '').split(' ')) print("=" * 40)
def ShowHistogram(): img = cv.imread('/home/pi/Documents/pic.jpg', 0) dst = cv.fastNlMeansDenoising(img) plt.hist(dst.ravel(), 256, [0, 256]) plt.show() return
plt.imshow(image, cmap='Greys_r') plt.title('Noisy Image') plt.show() params = {'theta': 0.7, 'gamma': 0.3} # for i in list(np.arange(0, 1, 0.1)): # for j in list(np.arange(0, 1, 0.1)): # params = {'theta':0.5, 'gamma':0.2} L = Loopy(image, 20, params) denoised, _ = L.messages_sync() LBP_time = datetime.now() print('LBP done in {}'.format(LBP_time - start)) dst = cv2.fastNlMeansDenoising(image.astype('uint8'), None, 1, 3, 5) NLM_time = datetime.now() print('NLM done in {}'.format(NLM_time - LBP_time)) plt.figure(figsize=(10, 30)) plt.subplot(1, 3, 1) plt.axis('off') # plt.title('Noisy Image') plt.imshow(noiseless_image, cmap='Greys_r') plt.subplot(1, 3, 2) plt.axis('off') # plt.title('Noisy Image') plt.imshow(denoised, cmap='Greys_r') plt.subplot(1, 3, 3) plt.axis('off') # plt.title('Denoised Image \n Accuracy = NA, Theta = {}, Gamma = {}'.format(params['theta'], params['gamma'])) plt.imshow(dst, cmap='Greys_r')
def nonloc_denoising(img, denoise_strength): return cv.fastNlMeansDenoising(img, h=denoise_strength)
def DeNoise_gray(thresh, control): test = PIL.Image.fromarray(thresh.astype('uint8'), 'L') de = cv2.fastNlMeansDenoising(np.array(test), None, control, 7, 21) de[de > 0] = 255 return de
def denoise(input_img, denoise_lvl): gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) # convert image color to gray cv2.fastNlMeansDenoising(gray, gray, denoise_lvl) # denoise image return gray
def noise(noise_typ, image): if noise_typ == "gauss": #Se agrega ruido gauss a la imagen original en grises row, col = image.shape mean = 0 var = 0.002 sigma = var**0.5 gauss = np.random.normal(mean, sigma, (row, col)) gauss = gauss.reshape(row, col) lena_gauss_noisy = image + gauss cv2.imshow('lena_gauss_noisy', lena_gauss_noisy) # se muestra la imagen con ruido gauss cv2.waitKey(0) N = 7 # se define N = 7 tiempo_gauss_gauss_inicial = time( ) # se toma el tiempo antes de hacer el filtro gauss image_gauss_lp_gauss = cv2.GaussianBlur( (255 * lena_gauss_noisy).astype(np.uint8), (N, N), 1.5, 1.5) # se realiza el filtro gauss tiempo_gauss_gauss_final = time( ) # se toma el tiempo despues de hacer el filtro gauss cv2.imshow( 'lena_gauss_noisy_filtro_gauss', image_gauss_lp_gauss ) # se muestra la imagen con ruido gauss luego de ser filtrada por un filtro gauss cv2.waitKey(0) tiempo_gauss_gauss = tiempo_gauss_gauss_final - tiempo_gauss_gauss_inicial # se calcula el tiempo de ejecución del filtro gauss print(tiempo_gauss_gauss ) # se imprime el tiempo de ejecución del filtro gauss errorestimate_gauss_gauss = abs( (255 * lena_gauss_noisy).astype(np.uint8) - image_gauss_lp_gauss ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_gauss_gauss', errorestimate_gauss_gauss.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_gauss_gauss = math.sqrt( np.square(np.subtract(lena_gauss_noisy, image_gauss_lp_gauss)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático gauss_gauss", errorcuadratico_gauss_gauss ) # se imprime el valor del sqrt del error cuadratico tiempo_gauss_median_inicial = time( ) # se toma el tiempo antes de hacer el filtro median image_median_gauss = cv2.medianBlur( (255 * lena_gauss_noisy).astype(np.uint8), 7) # se realiza el filtro median tiempo_gauss_median_final = time( ) # se toma el tiempo despues de hacer el filtro median cv2.imshow( 'lena_gauss_noisy_filtro_median', image_median_gauss ) # se muestra la imagen con ruido gauss luego de ser filtrada por un filtro median cv2.waitKey(0) tiempo_gauss_median = tiempo_gauss_median_final - tiempo_gauss_median_inicial # se calcula el tiempo de ejecución del filtro median print(tiempo_gauss_median ) # se imprime el tiempo de ejecución del filtro median errorestimate_gauss_median = abs( (255 * lena_gauss_noisy).astype(np.uint8) - image_median_gauss ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_gauss_median', errorestimate_gauss_median.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_gauss_median = math.sqrt( np.square(np.subtract(lena_gauss_noisy, image_median_gauss)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático gauss_median", errorcuadratico_gauss_median ) # se imprime el valor del sqrt del error cuadratico tiempo_gauss_bilateral_inicial = time( ) # se toma el tiempo antes de hacer el filtro bilateral image_bilateral_gauss = cv2.bilateralFilter( (255 * lena_gauss_noisy).astype(np.uint8), 15, 25, 25) # se realiza el filtro bilateral tiempo_gauss_bilateral_final = time( ) # se toma el tiempo despues de hacer el filtro bilateral cv2.imshow( 'lena_gauss_noisy_filtro_bilateral', image_bilateral_gauss ) # se muestra la imagen con ruido gauss luego de ser filtrada por un filtro bilateral cv2.waitKey(0) tiempo_gauss_bilateral = tiempo_gauss_bilateral_final - tiempo_gauss_bilateral_inicial # se calcula el tiempo de ejecución del filtro bilateral print(tiempo_gauss_bilateral ) # se imprime el tiempo de ejecución del filtro bilateral errorestimate_gauss_bilateral = abs( (255 * lena_gauss_noisy).astype(np.uint8) - image_bilateral_gauss ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_gauss_bilateral', errorestimate_gauss_bilateral.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_gauss_bilateral = math.sqrt( np.square(np.subtract(lena_gauss_noisy, image_bilateral_gauss)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático gauss_bilateral", errorcuadratico_gauss_bilateral ) # se imprime el valor del sqrt del error cuadratico tiempo_gauss_nlm_inicial = time( ) # se toma el tiempo antes de hacer el filtro nlm image_nlm_gauss = cv2.fastNlMeansDenoising( (255 * lena_gauss_noisy).astype(np.uint8), 5, 15, 25) # se realiza el filtro nlm tiempo_gauss_nlm_final = time( ) # se toma el tiempo despues de hacer el filtro nlm cv2.imshow( 'lena_gauss_noisy_filtro_nml', image_nlm_gauss ) # se muestra la imagen con ruido gauss luego de ser filtrada por un filtro nlm cv2.waitKey(0) tiempo_gauss_nlm = tiempo_gauss_nlm_final - tiempo_gauss_nlm_inicial # se calcula el tiempo de ejecución del filtro nlm print(tiempo_gauss_nlm ) # se imprime el tiempo de ejecución del filtro nlm errorestimate_gauss_nlm = abs( (255 * lena_gauss_noisy).astype(np.uint8) - image_nlm_gauss ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_gauss_nlm', errorestimate_gauss_nlm.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_gauss_nlm = math.sqrt( np.square(np.subtract(lena_gauss_noisy, image_nlm_gauss)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático gauss_nlm", errorcuadratico_gauss_nlm ) # se imprime el valor del sqrt del error cuadratico if tiempo_gauss_gauss < ( tiempo_gauss_median and tiempo_gauss_bilateral and tiempo_gauss_nlm ): # se pregunta si el tiempo de ejecucion del filtro gauss fue mayor a los otros timeprint = tiempo_gauss_gauss * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro gauss print("ruido gauss, filtro gauss", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timegaussmedian = ( tiempo_gauss_median * 100 ) / tiempo_gauss_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro gauss print("ruido gauss, filtro median", timegaussmedian, "%") # se imprime el porcentaje timegaussbilateral = ( tiempo_gauss_bilateral * 100 ) / tiempo_gauss_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro gauss print("ruido gauss, filtro bilateral", timegaussbilateral, "%") # se imprime el porcentaje timegaussnlm = ( tiempo_gauss_nlm * 100 ) / tiempo_gauss_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro gauss print("ruido gauss, filtro nlm", timegaussnlm, "%") # se imprime el porcentaje elif tiempo_gauss_median < ( tiempo_gauss_gauss and tiempo_gauss_bilateral and tiempo_gauss_nlm ): # se pregunta si el tiempo de ejecucion del filtro median fue mayor a los otros timeprint = tiempo_gauss_median * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro median print("ruido gauss, filtro median", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timegaussgauss = ( tiempo_gauss_gauss * 100 ) / tiempo_gauss_median # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro median print("ruido gauss, filtro gauss", timegaussgauss, "%") # se imprime el porcentaje timegaussbilateral = ( tiempo_gauss_bilateral * 100 ) / tiempo_gauss_median # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro median print("ruido gauss, filtro bilateral", timegaussbilateral, "%") # se imprime el porcentaje timegaussnlm = ( tiempo_gauss_nlm * 100 ) / tiempo_gauss_median # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro median print("ruido gauss, filtro nlm", timegaussnlm, "%") # se imprime el porcentaje elif tiempo_gauss_bilateral < ( tiempo_gauss_median and tiempo_gauss_gauss and tiempo_gauss_nlm ): # se pregunta si el tiempo de ejecucion del filtro bilateral fue mayor a los otros timeprint = tiempo_gauss_bilateral * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro bilateral print("ruido gauss, filtro bilateral", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timegaussmedian = ( tiempo_gauss_median * 100 ) / tiempo_gauss_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro bilateral print("ruido gauss, filtro median", timegaussmedian, "%") # se imprime el porcentaje timegaussgauss = ( tiempo_gauss_gauss * 100 ) / tiempo_gauss_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro bilateral print("ruido gauss, filtro gauss", timegaussgauss, "%") # se imprime el porcentaje timegaussnlm = ( tiempo_gauss_nlm * 100 ) / tiempo_gauss_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro bilateral print("ruido gauss, filtro nlm", timegaussnlm, "%") # se imprime el porcentaje elif tiempo_gauss_nlm < ( tiempo_gauss_median and tiempo_gauss_bilateral and tiempo_gauss_gauss ): # se pregunta si el tiempo de ejecucion del filtro nlm fue mayor a los otros timeprint = tiempo_gauss_nlm * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro nlm print("ruido gauss, filtro nlm", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timegaussmedian = ( tiempo_gauss_median * 100 ) / tiempo_gauss_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro nlm print("ruido gauss, filtro median", timegaussmedian, "%") # se imprime el porcentaje timegaussbilateral = ( tiempo_gauss_bilateral * 100 ) / tiempo_gauss_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro nlm print("ruido gauss, filtro bilateral", timegaussbilateral, "%") # se imprime el porcentaje timegaussgauss = ( tiempo_gauss_gauss * 100 ) / tiempo_gauss_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro nlm print("ruido gauss, filtro gauss", timegaussgauss, "%") # se imprime el porcentaje elif noise_typ == "s&p": # Se agrega ruido s&p a la imagen original en grises # row, col = image.shape s_vs_p = 0.5 amount = 0.01 lena_sip_noisy = np.copy(image) # Salt mode num_salt = np.ceil(amount * image.size * s_vs_p) coords = [ np.random.randint(0, i - 1, int(num_salt)) for i in image.shape ] lena_sip_noisy[tuple(coords)] = 1 # Pepper mode num_pepper = np.ceil(amount * image.size * (1. - s_vs_p)) coords = [ np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape ] lena_sip_noisy[tuple(coords)] = 0 cv2.imshow('lena_s&p_noisy', lena_sip_noisy) # se muestra la imagen con ruido s&p cv2.waitKey(0) N = 7 # se define N = 7 tiempo_sip_gauss_inicial = time( ) # se toma el tiempo antes de hacer el filtro gauss image_gauss_lp_sip = cv2.GaussianBlur( (255 * lena_sip_noisy).astype(np.uint8), (N, N), 1.5, 1.5) # se realiza el filtro gauss tiempo_sip_gauss_final = time( ) # se toma el tiempo despues de hacer el filtro gauss cv2.imshow( 'lena_s&p_noisy_filtro_gauss', image_gauss_lp_sip ) # se muestra la imagen con ruido s&p luego de ser filtrada por un filtro gauss cv2.waitKey(0) tiempo_sip_gauss = tiempo_sip_gauss_final - tiempo_sip_gauss_inicial # se calcula el tiempo de ejecución del filtro gauss print(tiempo_sip_gauss ) # se imprime el tiempo de ejecución del filtro gauss errorestimate_sip_gauss = abs( (255 * lena_sip_noisy).astype(np.uint8) - image_gauss_lp_sip ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_sip_gauss', errorestimate_sip_gauss.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_sip_gauss = math.sqrt( np.square(np.subtract(lena_sip_noisy, image_gauss_lp_sip)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático s&p_gauss", errorcuadratico_sip_gauss ) # se imprime el valor del sqrt del error cuadratico tiempo_sip_median_inicial = time( ) # se toma el tiempo antes de hacer el filtro median image_median_sip = cv2.medianBlur( (255 * lena_sip_noisy).astype(np.uint8), 7) # se realiza el filtro median tiempo_sip_median_final = time( ) # se toma el tiempo despues de hacer el filtro median cv2.imshow( 'lena_s&p_noisy_filtro_median', image_median_sip ) # se muestra la imagen con ruido s&p luego de ser filtrada por un filtro median cv2.waitKey(0) tiempo_sip_median = tiempo_sip_median_final - tiempo_sip_median_inicial # se calcula el tiempo de ejecución del filtro median print(tiempo_sip_median ) # se imprime el tiempo de ejecución del filtro median errorestimate_sip_median = abs( (255 * lena_sip_noisy).astype(np.uint8) - image_median_sip ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_sip_median', errorestimate_sip_median.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_sip_median = math.sqrt( np.square(np.subtract(lena_sip_noisy, image_median_sip)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático s&p_median", errorcuadratico_sip_median ) # se imprime el valor del sqrt del error cuadratico tiempo_sip_bilateral_inicial = time( ) # se toma el tiempo antes de hacer el filtro bilateral image_bilateral_sip = cv2.bilateralFilter( (255 * lena_sip_noisy).astype(np.uint8), 15, 25, 25) # se realiza el filtro bilateral tiempo_sip_bilateral_final = time( ) # se toma el tiempo despues de hacer el filtro bilateral cv2.imshow( 'lena_s&p_noisy_filtro_bilateral', image_bilateral_sip ) # se muestra la imagen con ruido s&p luego de ser filtrada por un filtro bilateral cv2.waitKey(0) tiempo_sip_bilateral = tiempo_sip_bilateral_final - tiempo_sip_bilateral_inicial # se calcula el tiempo de ejecución del filtro bilateral print(tiempo_sip_bilateral ) # se imprime el tiempo de ejecución del filtro bilateral errorestimate_sip_bilateral = abs( (255 * lena_sip_noisy).astype(np.uint8) - image_bilateral_sip ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_sip_bilateral', errorestimate_sip_bilateral.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_sip_bilateral = math.sqrt( np.square(np.subtract(lena_sip_noisy, image_bilateral_sip)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático s&p_bilateral", errorcuadratico_sip_bilateral ) # se imprime el valor del sqrt del error cuadratico tiempo_sip_nlm_inicial = time( ) # se toma el tiempo antes de hacer el filtro nlm image_nlm_sip = cv2.fastNlMeansDenoising( (255 * lena_sip_noisy).astype(np.uint8), 5, 15, 25) # se realiza el filtro nlm tiempo_sip_nlm_final = time( ) # se toma el tiempo despues de hacer el filtro nlm cv2.imshow( 'lena_s&p_noisy_filtro_nml', image_nlm_sip ) # se muestra la imagen con ruido s&p luego de ser filtrada por un filtro nlm cv2.waitKey(0) tiempo_sip_nlm = tiempo_sip_nlm_final - tiempo_sip_nlm_inicial # se calcula el tiempo de ejecución del filtro nlm print( tiempo_sip_nlm) # se imprime el tiempo de ejecución del filtro nlm errorestimate_sip_nlm = abs( (255 * lena_sip_noisy).astype(np.uint8) - image_nlm_sip ) # se calcula el error estimado entre la imagen con ruido y la filtrada cv2.imshow('errorestimado_sip_nlm', errorestimate_sip_nlm.astype(np.float) / 255) # se muestra la imagen del error estimado cv2.waitKey(0) errorcuadratico_sip_nlm = math.sqrt( np.square(np.subtract(lena_sip_noisy, image_nlm_sip)).mean() ) # se calcula el sqrt del error cuadratico entre la imagen con ruido y la filtrada print("error cuadrático s&p_nlm", errorcuadratico_sip_nlm ) # se imprime el valor del sqrt del error cuadratico if tiempo_sip_gauss < ( tiempo_sip_median and tiempo_sip_bilateral and tiempo_sip_nlm ): # se pregunta si el tiempo de ejecucion del filtro gauss fue mayor a los otros timeprint = tiempo_sip_gauss * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro gauss print("ruido s&p, filtro gauss", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timesipmedian = ( tiempo_sip_median * 100 ) / tiempo_sip_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro gauss print("ruido s&p, filtro median", timesipmedian, "%") # se imprime el porcentaje timesipbilateral = ( tiempo_sip_bilateral * 100 ) / tiempo_sip_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro gauss print("ruido s&p, filtro bilateral", timesipbilateral, "%") # se imprime el porcentaje timesipnlm = ( tiempo_sip_nlm * 100 ) / tiempo_sip_gauss # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro gauss print("ruido s&p, filtro nlm", timesipnlm, "%") # se imprime el porcentaje elif tiempo_sip_median < ( tiempo_sip_gauss and tiempo_sip_bilateral and tiempo_sip_nlm ): # se pregunta si el tiempo de ejecucion del filtro median fue mayor a los otros timeprint = tiempo_sip_median * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro median print("ruido s&p, filtro median", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timesipgauss = ( tiempo_sip_gauss * 100 ) / tiempo_sip_median # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro median print("ruido s&p, filtro gauss", timesipgauss, "%") # se imprime el porcentaje timesipbilateral = ( tiempo_sip_bilateral * 100 ) / tiempo_sip_median # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro median print("ruido s&p, filtro bilateral", timesipbilateral, "%") # se imprime el porcentaje timesipnlm = ( tiempo_sip_nlm * 100 ) / tiempo_sip_median # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro median print("ruido s&p, filtro nlm", timesipnlm, "%") # se imprime el porcentaje elif tiempo_sip_bilateral < ( tiempo_sip_median and tiempo_sip_gauss and tiempo_sip_nlm ): # se pregunta si el tiempo de ejecucion del filtro bilateral fue mayor a los otros timeprint = tiempo_sip_bilateral * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro bilateral print("ruido s&p, filtro bilateral", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timesipmedian = ( tiempo_sip_median * 100 ) / tiempo_sip_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro bilateral print("ruido s&p, filtro median", timesipmedian, "%") # se imprime el porcentaje timesipgauss = ( tiempo_sip_gauss * 100 ) / tiempo_sip_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro bilateral print("ruido s&p, filtro gauss", timesipgauss, "%") # se imprime el porcentaje timesipnlm = ( tiempo_sip_nlm * 100 ) / tiempo_sip_bilateral # se encuentra el porcentaje del tiempo de ejecucion del filtro nlm con respecto al filtro bilateral print("ruido s&p, filtro nlm", timesipnlm, "%") # se imprime el porcentaje elif tiempo_sip_nlm < ( tiempo_sip_median and tiempo_sip_bilateral and tiempo_sip_gauss ): # se pregunta si el tiempo de ejecucion del filtro nlm fue mayor a los otros timeprint = tiempo_sip_nlm * 1000 # se pasa a milisegundos el tiempo de ejecucion del filtro nlm print("ruido s&p, filtro nlm", timeprint, "ms") # se imprime el tiempo de ejecucion en ms timesipmedian = ( tiempo_sip_median * 100 ) / tiempo_sip_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro median con respecto al filtro nlm print("ruido s&p, filtro median", timesipmedian, "%") # se imprime el porcentaje timesipbilateral = ( tiempo_sip_bilateral * 100 ) / tiempo_sip_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro bilateral con respecto al filtro nlm print("ruido s&p, filtro bilateral", timesipbilateral, "%") # se imprime el porcentaje timesipgauss = ( tiempo_sip_gauss * 100 ) / tiempo_sip_nlm # se encuentra el porcentaje del tiempo de ejecucion del filtro gauss con respecto al filtro nlm print("ruido s&p, filtro gauss", timesipgauss, "%") # se imprime el porcentaje
def reduce_noise(img, h=10): return cv2.fastNlMeansDenoising(img, None, h, templateWindowSize=7, searchWindowSize=21)
def deskew(im, max_skew=10): height, width, a = im.shape # Create a grayscale image and denoise it im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_gs = cv2.fastNlMeansDenoising(im_gs, h=3) # Create an inverted B&W copy using Otsu (automatic) thresholding im_bw = cv2.threshold(im_gs, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) #[1] # Detect lines in this image. Parameters here mostly arrived at by trial and error. lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 200, minLineLength=width / 12, maxLineGap=width / 150) # Collect the angles of these lines (in radians) angles = [] for line in lines: x1, y1, x2, y2 = line[0] angles.append(np.arctan2(y2 - y1, x2 - x1)) # If the majority of our lines are vertical, this is probably a landscape image landscape = np.sum([abs(angle) > np.pi / 4 for angle in angles]) > len(angles) / 2 # Filter the angles to remove outliers based on max_skew if landscape: angles = [ angle for angle in angles if np.deg2rad(90 - max_skew) < abs(angle) < np.deg2rad(90 + max_skew) ] else: angles = [ angle for angle in angles if abs(angle) < np.deg2rad(max_skew) ] if len(angles) < 5: # Insufficient data to deskew return im # Average the angles to a degree offset angle_deg = np.rad2deg(np.median(angles)) # If this is landscape image, rotate the entire canvas appropriately if landscape: if angle_deg < 0: im = cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) angle_deg += 90 elif angle_deg > 0: im = cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) angle_deg -= 90 # Rotate the image by the residual offset M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1) im = cv2.warpAffine(im, M, (width, height), borderMode=cv2.BORDER_REPLICATE) return im
path = '/home/adventum/Cairo_Data_Original_bw/*.*' files = glob.glob(path) new = "" # traverse in the string for x in files: new = new + x + "~" new = list(new.split("~")) l = len(new) - 1 new.pop(l) new2 = "" # traverse in the string for y in files: new2 += y + '~' new2 = new2.replace('/home/adventum/Cairo_Data_Original_bw', '') new2 = list(new2.split("~")) new2.pop() z = 0 for file in new: print(file) print("\n") a = cv2.imread(file) cv2.fastNlMeansDenoising(a, None, 4, 7, 21) cv2.imwrite('/home/adventum/Desktop/new_denoised' + str(new2[z]), a) z = z + 1
def binarization(gray, key): ret, gray = cv2.threshold(gray, key, 255, cv2.THRESH_BINARY) gray = cv2.fastNlMeansDenoising(gray) return gray
def touchdowns(image, n): """ Function to obtain the locations of the touchdown passes from the image of the pass chart using k-means, and DBSCAN to account for difficulties in extracting touchdown passes, since they have the are the same color as both the line of scrimmage and the attached touchdown trajectory lines. Input: image: image from the folder 'Cleaned_Pass_Charts' n: number of toucndowns, from the corresponding data of the image Return: call to map_pass_locations: centers: list of pass locations in pixels col: width of image from which the pass locations were extracted pass_type: "TOUCHDOWN" """ im = Image.open(image) pix = im.load() col, row = im.size img = Image.new('RGB', (col, row), 'black') p = img.load() for i in range(col): for j in range(row): r = pix[i, j][0] g = pix[i, j][1] b = pix[i, j][2] if (col < 1370) and (j < row - 105) and (j > row - 111): if (b > 2 * g) and (b > 60): p[i, j] = (0, 0, 0) elif (col > 1370) and (j < row - 81) and (j > row - 86): if (b > 2 * g) and (b > 60): p[i, j] = (0, 0, 0) else: p[i, j] = pix[i, j] r = p[i, j][0] g = p[i, j][1] b = p[i, j][2] f = ((r - 20)**2 + (g - 80)**2 + (b - 200)**2)**0.5 if f < 32 and b > 100: p[i, j] = (255, 255, 0) scipy.misc.imsave('temp.jpg', img) imag = cv2.imread('temp.jpg') os.remove('temp.jpg') hsv = cv2.cvtColor(imag, cv2.COLOR_BGR2HSV) lower = np.array([20, 100, 100]) upper = np.array([30, 255, 255]) mask = cv2.inRange(hsv, lower, upper) res = cv2.bitwise_and(imag, imag, mask=mask) res = cv2.cvtColor(res, cv2.COLOR_HSV2RGB) res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) res = cv2.fastNlMeansDenoising(res, h=10) x = np.where(res != 0)[0] y = np.where(res != 0)[1] pairs = zip(x, y) X = map(list, pairs) if (len(pairs) != 0): db = DBSCAN(eps=10, min_samples=n).fit(X) labels = db.labels_ coords = pd.DataFrame([x, y, labels]).T coords.columns = ['x', 'y', 'label'] clusters = Counter(labels).most_common(n) td_labels = np.array([clust[0] for clust in clusters]) km_coords = coords.loc[coords['label'].isin(td_labels)] km = map(list, zip(km_coords.iloc[:, 0], km_coords.iloc[:, 1])) kmeans = KMeans(n_clusters=n, random_state=0).fit(km) centers = kmeans.cluster_centers_ return map_pass_locations(centers, col, "TOUCHDOWN") else: return map_pass_locations([], col, "TOUCHDOWN", n)
averages = [] for tif in imgs: #read in image as grayscale img = cv2.imread(directory+"/"+tif, cv2.IMREAD_GRAYSCALE) #apply gaussian blur to exaggerate edges blurred = cv2.GaussianBlur(img, (5,5), 0) #apply adaptive gaussian threshold and binarize picture thresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,33,2) #denoise to remove small artifacts denoised = cv2.fastNlMeansDenoising(thresh, None, 75, 45, 20) #apply canny edge detection from processed image edges = auto_canny(denoised, 0.33) #convert edges to contours img2, cnts, hier = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) all_curv = [] high_curv = 0 #record curvature for all contours for each in cnts: all_curv.extend(getCurvature(each[:,0,:],1)) #count number of high curvature points
def denoise(image): res_im = cv2.fastNlMeansDenoising(image, None, 6, 7, 20) return res_im
def deskew(im, save_directory, direct, max_skew=10): if direct == "Y": height, width = im.shape[:2] print(height) print(width) # Create a grayscale image and denoise it if channels != 0: im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_gs = cv2.fastNlMeansDenoising(im_gs, h=3) else: im_gs = cv2.fastNlMeansDenoising(im, h=3) # print("De-noise ok.") # Create an inverted B&W copy using Otsu (automatic) thresholding im_bw = cv2.threshold(im_gs, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # print("Otsu ok.") # Detect lines in this image. Parameters here mostly arrived at by trial and error. # If the initial threshold is too high, then settle for a lower threshold value try: lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 200, minLineLength=width / 12, maxLineGap=width / 150) # Collect the angles of these lines (in radians) angles = [] for line in lines: x1, y1, x2, y2 = line[0] geom = np.arctan2(y2 - y1, x2 - x1) print(np.rad2deg(geom)) angles.append(geom) except: lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 150, minLineLength=width / 12, maxLineGap=width / 150) # Collect the angles of these lines (in radians) angles = [] for line in lines: x1, y1, x2, y2 = line[0] geom = np.arctan2(y2 - y1, x2 - x1) print(np.rad2deg(geom)) angles.append(geom) angles = [angle for angle in angles if abs(angle) < np.deg2rad(max_skew)] if len(angles) < 5: # Insufficient data to deskew print("Insufficient data to deskew. Cropped image might already be straight. Cropped image saved.") cv2.imwrite(img=im, filename=save_directory + cropped_jpeg_list[pg_count]) #im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) #im_pil = Image.fromarray(im) #im_pil.save(save_directory + cropped_jpeg_list[pg_count]) print("Cropped image saved.") return im else: # Average the angles to a degree offset angle_deg = np.rad2deg(np.median(angles)) # Rotate the image by the residual offset M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1) im = cv2.warpAffine(im, M, (width, height), borderMode=cv2.BORDER_REPLICATE) # Plot if a full run # Always save deskewed image if args.type == "full": plt.subplot(111),plt.imshow(im) plt.title('Deskewed Image'), plt.xticks([]), plt.yticks([]) plt.show() cropped_jpeg = cropped_jpeg_list[pg_count] cv2.imwrite(img = im, filename = save_directory + cropped_jpeg[:-5] + "_rotated.jpeg") print("Only de-skewed cropped image saved.") return im else: height, width = im.shape[:2] print(height) print(width) # Create a grayscale image and denoise it im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_gs = cv2.fastNlMeansDenoising(im_gs, h=3) # Create an inverted B&W copy using Otsu (automatic) thresholding im_bw = cv2.threshold(im_gs, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # Detect lines in this image. Parameters here mostly arrived at by trial and error. # If the initial threshold is too high, then settle for a lower threshold value try: lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 200, minLineLength=width / 12, maxLineGap=width / 150) # Collect the angles of these lines (in radians) angles = [] for line in lines: x1, y1, x2, y2 = line[0] geom = np.arctan2(y2 - y1, x2 - x1) print(np.rad2deg(geom)) angles.append(geom) except TypeError: lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 150, minLineLength=width / 12, maxLineGap=width / 150) # Collect the angles of these lines (in radians) angles = [] for line in lines: x1, y1, x2, y2 = line[0] geom = np.arctan2(y2 - y1, x2 - x1) print(np.rad2deg(geom)) angles.append(geom) except: print ("TypeError encountered with HoughLines. Check cropped image output. Only cropped image saved.") return angles = [angle for angle in angles if abs(angle) < np.deg2rad(max_skew)] if len(angles) < 5: # Insufficient data to deskew print("Insufficient data to deskew. Cropped image might already be straight.") return im else: # Average the angles to a degree offset angle_deg = np.rad2deg(np.median(angles)) # Rotate the image by the residual offset M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1) im = cv2.warpAffine(im, M, (width, height), borderMode=cv2.BORDER_REPLICATE) # Plot if a full run # Always save deskewed image if args.type == "full": plt.subplot(111), plt.imshow(im) plt.title('Deskewed Image'), plt.xticks([]), plt.yticks([]) plt.show() cropped_jpeg = cropped_jpeg_list[pg_count] cv2.imwrite(img=im, filename=save_directory + cropped_jpeg[:-5] + "_rotated.jpeg") print("Rotated cropped image saved") return im
import cv2 import numpy as np from skimage import io from skimage.morphology import skeletonize from skimage.util import invert from skimage import img_as_ubyte img = cv2.imread('MH2.jpeg', 0) dst = cv2.fastNlMeansDenoising(img, None, 10, 10, 21) # histogram equalization equ = cv2.equalizeHist(dst) cv2.imwrite('eq.png', equ) # image gradients laplacian = cv2.Laplacian(dst, cv2.CV_64F) sobelx = cv2.Sobel(dst, cv2.CV_64F, 1, 0, ksize=5) sobely = cv2.Sobel(dst, cv2.CV_64F, 0, 1, ksize=5) cv2.imwrite('laplacian.png', laplacian) cv2.imwrite('sobelx.png', sobelx) cv2.imwrite('sobely.png', sobely) #erosion kernel = np.ones((5, 5), np.uint8) erosion = cv2.erode(dst, kernel, iterations=1) cv2.imwrite('erosion.png', erosion) #dilation dilation = cv2.dilate(dst, kernel, iterations=1) cv2.imwrite('dilation.png', dilation)
def preprocess(image): denoised = cv2.fastNlMeansDenoising(image, None, 10, 10, 5) preprocessed = denoised return preprocessed
padLength + j - f:padLength + j + f + width] w = np.exp((I2_ - I_)**2 / h)[f:f + height, f:f + width] #边缘各有四行四列未处理,516*516转为512*512,高斯加权欧氏距离 w = np.exp(-cv2.filter2D( (I2_ - I_)**2, -1, kernel) / h)[f:f + height, f:f + width] #归一化因子 nx += w #w(x,y)*v(y) average += (w * I2_[f:f + height, f:f + width]) return average / nx if __name__ == '__main__': I = cv2.imread(r'E:\2020-2021-2\CV\NL_Means\lena.png') #cv2.imshow(r'E:\2020-2021-2\CV\NL_Means\lena.png',I) #cv2.imwrite(r'E:\2020-2021-2\CV\NL_Means\gray.png',I) #原图每个像素点增加一个sigma乘与原图相同大小的随机矩阵,产生噪声图 #*I.shape返回一个list sigma = 20.0 I1 = double2uint8(I + np.random.randn(*I.shape) * sigma) print(u'噪声图像PSNR{}'.format(psnr(I, I1))) cv2.imwrite(r'E:\2020-2021-2\CV\NL_Means\Noise.png', I1) R1 = cv2.medianBlur(I1, 5) print(u'中值滤波PSNR', psnr(I, R1)) R2 = cv2.fastNlMeansDenoising(I1, None, sigma, 5, 11) print(u'opencv的NLM算法', psnr(I, R2)) cv2.imwrite(r'E:\2020-2021-2\CV\NL_Means\NLM1.png', R2) R3 = double2uint8(NLmeansfilter(I1.astype(np.float), sigma, 5, 11)) print(u'NLM PSNR', psnr(I, R3)) cv2.imwrite(r'E:\2020-2021-2\CV\NL_Means\NLM2.png', R3)
partimg = image_list[:99] ti = time() #working with images to present them in the right form for the classifier for img in partimg: im = cv2.imread(img) im = cv2.medianBlur(im, 5) im_bin = 5 im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) tih = time() im_gray = cv2.fastNlMeansDenoising( im_gray, None, 7, ) im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0) ret, im_th = cv2.threshold(im_gray, 127, 255, cv2.THRESH_BINARY) im_th = cv2.adaptiveThreshold(im_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ cv2.THRESH_BINARY,11,2) ret, th1 = cv2.threshold(im_gray, 127, 255, cv2.THRESH_BINARY) th2 = cv2.adaptiveThreshold(im_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ cv2.THRESH_BINARY,11,2) th3 = cv2.adaptiveThreshold(im_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\ cv2.THRESH_BINARY,11,2)
import cv2 import Preprocessing_img as pre import Similarity_img as sim # img_org = cv2.imread('test/eeeeeeeeeeeeeeeeee.jpg') img_org = cv2.imread('D:/test_darknet/FLASK-API-Y4/forV4/test/s32.jpg') print(img_org.shape) img_org = pre.resize_with_max(img_org, ) print(img_org.shape) img_gamma = pre.gamma_correction(img_org, 0.7) img_gray = cv2.cvtColor(img_gamma, cv2.COLOR_BGR2GRAY) print(img_gray.shape) img_denoise = cv2.fastNlMeansDenoising(img_gray, None, 8, 7, 21) # img_blur = cv2.medianBlur(img_gray, 5) # img_blur = cv2.GaussianBlur(img_gray, (3, 3), cv2.BORDER_DEFAULT) th3 = cv2.adaptiveThreshold(img_denoise, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) cv2.imshow('img_org', pre.resize_perten(img_org, 7)) cv2.imshow('img_grey', pre.resize_perten(img_gray, 7)) cv2.imshow('filtered', pre.resize_perten(th3, 7)) print(sim.compare_img(img_org, th3)) cv2.waitKey(0) cv2.destroyAllWindows()
(h, w) = image.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, angle, 1.0) rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) cv2.putText(rotated, "Angle: {:.2f} degrees".format(angle), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.imshow("Rotated", rotated) #Remove Salt and pepper noise saltpep = cv2.fastNlMeansDenoising(gray, None, 9, 13) # original_resized = cv2.resize(saltpep, (0,0), fx=.2, fy=.2) cv2.imshow('Grayscale', saltpep) cv2.waitKey(0) #blur blured = cv2.blur(gray, (3, 3)) # original_resized = cv2.resize(blured, (0,0), fx=.2, fy=.2) cv2.imshow('blured', blured) cv2.waitKey(0) #binary ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV) # original_resized = cv2.resize(thresh, (0,0), fx=.2, fy=.2) cv2.imshow('Threshold', thresh) cv2.waitKey(0)
def image_denoising(self, img): denoised_image = cv2.fastNlMeansDenoising(img, 10, 30, 2, 100) kernel = np.ones((2, 2), np.uint8) denoised_image = cv2.morphologyEx(denoised_image, cv2.MORPH_OPEN, kernel) return denoised_image
#img_adapteq3 = img - np.min(img) img_adapteq3 = exposure.equalize_adapthist(log_img, clip_limit=0.5,kernel_size=(4,4)) img_adapteq4 = exposure.equalize_adapthist(log_img, clip_limit=0.01,kernel_size=(2,2),nbins=12) #denois_img = img - np.min(img) denois_img = img_adapteq - np.min(img_adapteq) denois_img /= np.max(denois_img) denois_img = np.uint8(denois_img*255) tv_coins = restoration.richardson_lucy(log_img,) better_contrast = exposure.rescale_intensity(denois_img) dst = cv2.fastNlMeansDenoising(better_contrast) dst2 = cv2.fastNlMeansDenoising(denois_img) im_med = median_filter(img, 5) #sift = cv2.Feat .SIFT_create() print hog(img) #print des.shape plt.subplot(131),plt.imshow(real) plt.subplot(132),plt.imshow(tv_coins ) plt.subplot(133),plt.imshow( img_adapteq4) plt.show()
import cv2 as cv from matplotlib import pyplot as plt img = cv.imread('scan1.png') dst = cv.fastNlMeansDenoising(img, None, 10, 7, 21) plt.subplot(121), plt.imshow(img) plt.subplot(122), plt.imshow(dst) plt.show() cv.imshow('Output Image', dst) cv.imshow('Original Image', img) cv.waitKey(0) cv.destroyAllWindows()