def sift(img1, img2, k):
    img1 = preprocess(img1)
    img2 = preprocess(img2)

    img1 = cv2.fastNlMeansDenoisingColored(img1, h=10)
    img2 = cv2.fastNlMeansDenoisingColored(img2, h=10)

    img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    sift = cv2.SIFT()

    cornor1 = detectCornor(img1)
    cornor2 = detectCornor(img2)
    kp1, des1 = sift.compute(img1_gray, cornor1)
    kp2, des2 = sift.compute(img1_gray, cornor2)
    #kp1, des1 = sift.detectAndCompute(img1_gray, None)
    #kp2, des2 = sift.detectAndCompute(img2_gray, None)

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    good = []
    for m,n in matches:
        if m.distance < k*n.distance:
            good.append([m])

    ratio = float(len(good))/float(len(matches))
    ratios.append(ratio)

    return ratio
def imageDenoise(): #Find all the files in the working directory -- in this case looking for .tif extension only
    for dirname, dirnames, filenames in os.walk(directory):
        for file in filenames:
            if '.tif' in file:
                
                filePath =  os.path.abspath(os.path.join(dirname, file))
            
                outputDirectory = output + file
                
                workingMessage = 'Successfully read image file, denoising ...'
                
                completeMessage = outputDirectory + ' written'
            
                img = cv2.imread(filePath)
                
                if img is not None:
                    #Print a message once the image matrix is loaded
                    print workingMessage
                    #track how long it takes to denoise
                    start_time = time.time()
                    #Input matrix, output matrix, strength for luminance component, strength for chrominance component, template patch in pixels, window size for weighted average of pixels
                    dst = cv2.fastNlMeansDenoisingColored(img,None,3,10,7,21)
                    #write the denoised file and report on it
                    cv2.imwrite(outputDirectory, dst)

                    print completeMessage

                    print("%s seconds" % (time.time() - start_time))

    return outputDirectory
    
    print 'Starting ffmpeg'
Example #3
0
def auto_fix(im, noise_removal=False):
    im = auto_crop_hsv(im)
    im = auto_crop_hsv(im, crop_white=True)
    min_face_size = (int(im.shape[0]*0.05), int(im.shape[1]*0.05))
    faces = face_detect.detect_faces(im, min_neighbors=5, min_size=min_face_size, max_size=None)
    # keep faces above the fold
    faces = [face for face in faces if face[1] < im.shape[0]*0.4]
    # find dresses
    dresses = [dress_box2(face, im.shape[:2]) for face in faces]
    # if len(dresses) > 0:
    #     print('grabcut!')
    #     im = grabCut(im, bounding_box(dresses+faces))
    if len(faces) > 0:
        im = crop_to_human(im, faces, dresses)
    # limit max size (after cropping)
    im = fit_in(im, 1800, 1200)
    if noise_removal:
        im = cv2.fastNlMeansDenoisingColored(im)
    im = face_detect.skin_detect2(im, marks=True)
    # im = simplest_color_balance(im)
    # print('retinex starting...')
    # im = colorcorrect.algorithm.retinex_with_adjust(im)
    # print('retinex complete.')
    #face_detect.draw_boxes(im, faces)
    #face_detect.draw_boxes(im, dresses, (255, 0, 0))
    # face_detect.draw_boxes(im, people, (255, 0, 0))
    return im, faces
Example #4
0
def format_img(img, points, size):
    img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)

    new_height, new_width = size

    # Resize
    rect = cv2.boundingRect(np.array([points], np.int32))
    scale = scale_amount(rect, size)
    cur_height, cur_width = img.shape[:2]
    new_scaled_height = int(scale * cur_height)
    new_scaled_width = int(scale * cur_width)
    img = cv2.resize(img, (new_scaled_width, new_scaled_height))

    # Align rect to center
    cur_height, cur_width = img.shape[:2]
    roi_x, roi_y, border_x, border_y = rect_coords(rect, size, scale)
    roi_h = np.min([new_height - border_y, cur_height - roi_y])
    roi_w = np.min([new_width - border_x, cur_width - roi_x])

    # Crop
    crop = np.zeros((new_height, new_width, 3), img.dtype)
    crop[border_y:border_y + roi_h, border_x:border_x + roi_w] = (
        img[roi_y:roi_y + roi_h, roi_x:roi_x + roi_w]
    )

    # Scale and align points
    points[:, 0] = (points[:, 0] * scale) + (border_x - roi_x)
    points[:, 1] = (points[:, 1] * scale) + (border_y - roi_y)

    return crop, points
def procesamiento_imagen():    
	## Convertir a grayscale
	img = Image.open(rostro).convert('LA')
	img.save('greyscale.png')

	## Resize

	foo = Image.open("greyscale.png")
	foo = foo.resize((256,256),Image.ANTIALIAS)
	foo.save("greyscale.png",optimize=True,quality=95)	


	##  Eliminar ruido
	img = cv2.imread('greyscale.png')
	dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)

	## Canny detector
	img = cv2.imread('greyscale.png',0)
	edges = cv2.Canny(img,256,256)

	plt.subplot(121),plt.imshow(img,cmap = 'gray')
	plt.title('Original Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(edges,cmap = 'gray')
	plt.title('Edge Image'), plt.xticks([]), plt.yticks([])

	plt.show()
Example #6
0
def denoise(image, FM=True):
    if FM:
        return cv2.fastNlMeansDenoisingColored(image, templateWindowSize=7,
                                                      searchWindowSize=21,
                                                      h=3,
                                                      hColor=10)

    return cv2.bilateralFilter(self.orig_hsv, 5, 50, 50)
Example #7
0
    def findLabelText(self):
        for i in range(0,2):
            flag=0
            if i==0:
                try:                  
                    img = Image("images/temp_x2.png",0)
                except  IOError:
                    continue    
            else:
                try:
                    img = Image("images/temp_y1.png",0)
                except IOError:
                    continue
            #rotate image for y axis label
            if img.width<img.height:
                img = img.rotate(-90,fixed = False)
                flag=1
            if img.height<50 :
                #print "ok"
                img = img.resize(img.width*2,img.height*4)
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()
                img_bin = img_bin.dilate(2)
                img_bin = img_bin.erode(1)

            else:    
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()


            
            
            img_bin.save("images/temp.png")

            img = cv2.imread("images/temp.png") 
            dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) 
            

            try:
                label = image_to_string(IMAGE.fromarray(dst),lang='eng')    #OCR on image
                
                if i==0:
                    self.xlabel = label
                    self.isxlabel=True
                    print label
                else:
                    self.ylabel = label
                    self.isylabel=True    
                    print label

            except:
                if i==0:
                    self.xlabel = None
                    
                else:
                    self.ylabel = None    
 def imageSmooth(self):
   if self.choice == "single":
     for i in self.imageList:
       self.image = cv2.imread(i)
       self.newx,self.newy = self.image.shape[1]/2,self.image.shape[0]
       self.newimage = cv2.resize(self.image,(self.newx,self.newy))
       self.dst = cv2.fastNlMeansDenoisingColored(self.newimage,None,5,5,7,21) 
       self.vis = np.concatenate((self.newimage, self.dst), axis=1)
       self.imageShow()
Example #9
0
    def process_image_roi(self, img, roi_hist, track_window, color_lower_hue, color_lower_sat, color_lower_val,
                          color_upper_hue,
                          color_upper_sat,
                          color_upper_val, dilation):

        center_points = []  # Center Points

        # Noise Reduction
        if self.noiseReduction_on is True:
            img = cv2.fastNlMeansDenoisingColored(img, h=self.deNoise_val)

        # Generate Color Mask
        color_binary = self.map_binary_color(img, color_lower_hue, color_lower_sat, color_lower_val,
                                             color_upper_hue,
                                             color_upper_sat, color_upper_val, dilation)

        ## pre-processing image
        # blur image
        self.t = self.t+1



        # ########
        masked_img = cv2.bitwise_and(img, img, mask=color_binary)
        # #cv2.imshow('masked_img',masked_img)
        #
        # dst = cv2.addWeighted(masked_img,0.2,img_fg,0.8,0)
        # #cv2.imshow('dst',dst)


        # Back Projection
        BP = cv2.calcBackProject([masked_img], [0], roi_hist, [0, 180], 1)
        # apply meanshift to get the new location
        object, track_window = cv2.CamShift(BP, track_window, self.term_crit)

        x = int(object[0][0])
        y = int(object[0][1])
        center_points.append((x, y))


        # add judge condition to avoid sudden change

        # if self.t>1:
        #     # movement in certain area
        #     if abs(x-self.old_center[0][0])<20 or abs(y-self.old_center[0][1])<20:
        #         center_points.append((x, y))
        #         self.old_center  = center_points
        #     else:
        #         center_points = self.old_center
        #     #print "center_points: ", center_points
        #     #print "center_points xy:", x,y
        # else:
        #     #print "center_points11 xy:", x,y
        #     center_points.append((x, y))
        #     self.old_center  = center_points

        return center_points, track_window
    def getData(self, vals):
        
        vals = cv2.fastNlMeansDenoisingColored(vals,None,10,10,7,21)
        bw = cv2.cvtColor(vals, cv2.COLOR_RGB2GRAY)
        vals = cv2.medianBlur(vals, 13)

        c = self.getColorDist(vals, show=False)

        x = c

        return x
Example #11
0
    def findLabelText(self):
        for i in range(0,2):
            flag=0
            if i==0:                  
                img = Image("images/temp_x2.jpg",0)
            else:
                img = Image("images/temp_y1.jpg",0)
                        
            #print str(img.width) +"  " + str(img.height)
            if img.width<img.height:
                img = img.rotate(-90,fixed = False)
                flag=1
            #if img.width/img.height<3 : #crop image 

            
            #print str(img.width) +"  " + str(img.height)
            #if img.width<400 :
                #img = img.resize(img.width*5,img.height*5) 

            if img.height<50 :
                #print "ok"
                img = img.resize(img.width*2,img.height*4)
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()
                img_bin = img_bin.dilate(2)
                img_bin = img_bin.erode(1)



            else:    
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()


            
            #elif flag!=1:
            img_bin.save("images/temp.jpg")

            img = cv2.imread("images/temp.jpg") 
            dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) 
            

            label = image_to_string(IMAGE.fromarray(dst),lang='eng')
            
            if flag==1:
                self.ylabel = label
                print label
            else:
                self.xlabel = label    
                print label
Example #12
0
def get_contours(img=cv2.imread("image.png", 1)):
    # de-noise,greyscale, blur, threshold the image, and finally get the contours of the shapes
    img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_gray = cv2.medianBlur(img_gray, 5)
    ret, thresh = cv2.threshold(img_gray, 99, 255, cv2.THRESH_BINARY_INV)
    image, contour, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # filter out any child contours
    result = []
    for i in xrange(len(contour)):
        if hierarchy[0][i][3] == -1:
            result.append(contour[i])
    return result
Example #13
0
def fastNlMeansDenoisingColored(image,
                                h=float(3),
                                h_color=float(3),
                                template_window_size=7,
                                search_window_size=21):
    """The function performs denoising using the Non-local Means Denoising
     algorithm provided by the OpenCV2 library.
    
    :Parameters:
    :Returns:
    :Notes:
    """
    return numpy.int_(cv2.fastNlMeansDenoisingColored(
        image, h, h_color, template_window_size, search_window_size))
    def getData(self, vals):

        vals = cv2.fastNlMeansDenoisingColored(vals, None, 10, 10, 7, 21)
        bw = cv2.cvtColor(vals, cv2.COLOR_RGB2GRAY)
        vals = cv2.medianBlur(vals, 13)
        bw = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 101, 0)

        s = self.getSideRatio(bw)
        r = self.getRatio(bw)
        u = self.subDivideAndCalc(bw)
        x = r
        x += s
        x += u
        return x
    def process(self, args):

        if (len(args[0].shape) == 2):
            self.result['img'] = cv2.fastNlMeansDenoising(args[0],
                                            self.f_strength.value,
                                            self.template_size.value*2+1,
                                            self.search_size.value*2+1)
        else:
            ts = self.template_size.value*2+1
            ss = self.search_size.value*2+1
            result = cv2.fastNlMeansDenoisingColored(src=args[0],
                                                     h=self.f_strength.value,
                                                     hColor=self.f_col.value,
                                                     templateWindowSize=ts,
                                                     searchWindowSize=ss)
            self.result['img'] = result
def upload():
    file = request.files['file']
    if file and allowed_file(file.filename):
	filename = file.filename
        file.save(os.path.join(senti.config['UPLOAD_FOLDER'], filename))
        img = cv2.imread('uploads/'+filename)

        if 'Denoise Image' in request.form['submit']:
		dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
		im = Image.fromarray(dst)
		im.save('uploads/d'+filename)
       	 	return redirect(url_for('uploaded_file',filename="d"+filename))
	elif 'Detect Face' in request.form['submit']:
		faceCascade = cv2.CascadeClassifier('static/xml/haarcascade_frontalface_alt.xml')
		gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
		faces = faceCascade.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=5,minSize=(30, 30),flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
		for (x, y, w, h) in faces:
			cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
		im = Image.fromarray(img)
		im.save('uploads/face'+filename)
		return redirect(url_for('uploaded_file',filename="face"+filename))
	elif 'Smoothen Image' in request.form['submit']:
		kernel = np.ones((5,5),np.float32)/25
		dst = cv2.filter2D(img,-1,kernel)
		im = Image.fromarray(dst)
		im.save('uploads/smooth'+filename)
		return redirect(url_for('uploaded_file',filename="smooth"+filename))
	elif 'Detect Blue Colour' in request.form['submit']:
		# Convert BGR to HSV
		hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
		# define range of blue color in HSV
		lower_blue = np.array([110,50,50])
		upper_blue = np.array([130,255,255])
		# Threshold the HSV image to get only blue colors
		mask = cv2.inRange(hsv, lower_blue, upper_blue)
		# Bitwise-AND mask and original image\
		res = cv2.bitwise_and(img,img, mask= mask)
		im = Image.fromarray(res)
		im.save('uploads/blue'+filename)
		return redirect(url_for('uploaded_file',filename="blue"+filename))
	elif 'Image Resize' in request.form['submit']:
	   	small = cv2.resize(image, (0,0), fx=0.5, fy=0.5)
	   	resized_image = cv2.resize(image, (100, 50)) 
      		small = scipy.misc.imresize(image, 0.5)
      		print(type(small))
      		return ''
def upload():
    file = request.files['file']
    if file and allowed_file(file.filename):
        filename = file.filename
        file.save(os.path.join(imageProcessServer.config['UPLOAD_FOLDER'], filename))

	with grid_fs.new_file(filename=filename) as fp:
        	fp.write(file)
        	file_id = fp._id
    	if grid_fs.find({"_id":file_id}) is not None:
        	print(url_for('uploaded_file',filename="d"+filename))
		return redirect(url_for('uploaded_file',filename="d"+filename))
    	else:
        	return json.dumps({'status': 'Error occurred while saving file.'}), 500

	img = cv2.imread('uploads/'+filename)
	dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
	im = Image.fromarray(dst)
	im.save('uploads/d_'+filename)
Example #18
0
    def calcRoiHist(self, img, roi, color_lower_hue, color_lower_sat, color_lower_val, color_upper_hue,
                    color_upper_sat,
                    color_upper_val, collision_detect, dilation):

        # Noise Reduction
        if self.noiseReduction_on is True:
            img = cv2.fastNlMeansDenoisingColored(img, h=self.deNoise_val)

        roi_img = img[roi[0][1]:roi[1][1], roi[0][0]:roi[1][0]]
        track_window = (roi[0][0], roi[0][1], roi[1][0] - roi[0][0], roi[1][1] - roi[0][1])

        color_binary = self.map_binary_color(roi_img, color_lower_hue, color_lower_sat, color_lower_val,
                                             color_upper_hue,
                                             color_upper_sat, color_upper_val, dilation)

        roi_hist = cv2.calcHist([roi_img], [0], color_binary, [180], [0, 180])

        cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

        return roi_hist, track_window
def pre_process_image(image_path):
	"This function accepts path to an image as it's input and performs IP operations on it using OpenCV to preprocess it before performing OCR"
	
	rgb2gray(image_path)											#The RGB image will be changed into true grayscale, name will be changed to orig_gray.jpg
	gray_path = image_path.replace('.jpg', '_gray.jpg')				#path to the grayscale image 
	angle_rot = calc_angle(gray_path)								#calculate skew angle 
	print "Angle of rotation is: "
	print angle_rot
	
	if((angle_rot >= -1) and (angle_rot <= 1)):
		rotated_path = gray_path.replace('.jpg', '_rotated.jpg')
		os.rename(gray_path, rotated_path)										#Renaming the image as orig_rotated so that the code in OCR can work!

	else:																		#modulus(skew angle) is greater than 1 degree
		rotate_image(gray_path, angle_rot)								
		rotated_path = gray_path.replace('.jpg', '_rotated.jpg')
	
	img = cv2.imread(rotated_path)
	improved_img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)    #Noise Removal
	
	improv_path =  rotated_path.replace('.jpg', '_improved.jpg')
	cv2.imwrite(improv_path, improved_img)
    def testColorFilterVideo(self,c_target=np.uint8([[[153, 111, 98]]]),c_tolerance=30):
        # filter color in webcam video
        # (optional) parameters:
        #   c_target is the target color that we want to segment/detect
        #   c_tolerance is the tolerance of color range that we want to track (in H(SV) space)
        #   --> in effect, we track color in H-channel between c_target +/- c_tolerance
        cap = cv2.VideoCapture(0)

        flagScreenshotSaved = False

        while(1):
            # Take each frame
            _, frame = cap.read()
            # denoise
            frame = cv2.resize(frame,None,fx=0.3, fy=0.3, interpolation = cv2.INTER_LINEAR)
            frame = cv2.fastNlMeansDenoisingColored(frame,None,3,3,7,7)
            # Convert BGR to HSV
            hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
            # define range of blue color in HSV
            hsv_targetcolor = cv2.cvtColor(c_target,cv2.COLOR_RGB2HSV)
            c_lower = np.array([np.maximum(hsv_targetcolor[0,0,0]-c_tolerance, 0), 50, 50])
            c_upper = np.array([np.minimum(hsv_targetcolor[0,0,0]+c_tolerance,179), 255, 255])
            # Threshold the HSV image to get only blue colors
            mask = cv2.inRange(hsv, c_lower, c_upper)
            # post-process mask
            #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
            #opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
            # Bitwise-AND mask and original image
            res = cv2.bitwise_and(frame,frame, mask=mask)
            cv2.imshow('frame',frame)
            cv2.imshow('mask',mask)
            cv2.imshow('res',res)
            k = cv2.waitKey(5) & 0xFF
            if k == 27:
                break
        cap.release()
        cv2.destroyAllWindows()
Example #21
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

video_capture = cv2.VideoCapture(0)

# Capture frame
ret, frame = video_capture.read()

dst = cv2.fastNlMeansDenoisingColored(frame,None,10,10,7,21)

gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Display the resulting frame
cv2.imshow('Video', dst)

if cv2.waitKey(1) & 0xFF == ord('q'):
    exit(0)

# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def extract(imageName, face):
    im = cv2.imread(imageName)
    width, height = im.shape[:2]
    print width, height
    # im = cv2.equalizeHist(im)
    # kernel = np.ones((5,5),np.float32)/25
    # dst = cv2.filter2D(im,-1,kernel)
    # im = cv2.GaussianBlur(im,(5,5),100)
    im = cv2.bilateralFilter(im, 9, 75, 75)
    # im = cv2.blur(im,(5,5))
    im = cv2.fastNlMeansDenoisingColored(im, None, 10, 10, 7, 24)

    # im = cv2.medianBlur(im,7)
    # cam = cv2.VideoCapture(0)
    # s, im = cam.read()

    position = {}
    # s, im = cam.read()
    hsv_img = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)  # HSV image

    # lower_white = np.array([70,70,190], dtype=np.uint8)
    # upper_white = np.array([150,130,250], dtype=np.uint8)
    lower_white = np.array([70, 20, 130], dtype=np.uint8)
    upper_white = np.array([180, 110, 255], dtype=np.uint8)

    frame_threshed1 = cv2.inRange(hsv_img, lower_white, upper_white)
    imgray1 = frame_threshed1
    cv2.imshow('white', frame_threshed1)
    ret, thresh1 = cv2.threshold(frame_threshed1, 127, 255, 0)

    contours1, hierarchy1 = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    areas = [cv2.contourArea(c) for c in contours1]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
        print areas[elem]
    max_area = 0
    print '-' * 50
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    # max_area = areas[max_index]
    for a in range(len(areas)):
        # print areas[a] - max_area
        if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:
            print areas[a]
            get.append(contours1[a])
        else:
            pass
    # cnt=contours1[max_index]

    for elem in get:
        for t in elem:

            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '1'] = 'white'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'white'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'white'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'white'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'white'
                elif centroid_y > 133 and centroid_y < 200:
                    # position['white'].append(face+'8')
                    position[face + '8'] = 'white'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:
                    # position['white'].append(face+'3')
                    position[face + '3'] = 'white'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '6'] = 'white'
                elif centroid_y > 133 and centroid_y < 200:
                    # position['white'].append(face+'9')
                    position[face + '9'] = 'white'
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
    """for cnt in contours1:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)"""

    # print '------------------------'

    # COLOR_MIN = np.array([15, 90, 130],np.uint8)		# HSV color code lower and upper bounds
    # COLOR_MAX = np.array([50, 160, 200],np.uint8)		# color yellow

    COLOR_MIN = np.array([15, 90, 130], np.uint8)  # HSV color code lower and upper bounds
    COLOR_MAX = np.array([60, 245, 245], np.uint8)  # color yellow

    frame_threshed = cv2.inRange(hsv_img, COLOR_MIN, COLOR_MAX)  # Thresholding image
    imgray = frame_threshed

    ret, thresh = cv2.threshold(frame_threshed, 127, 255, cv2.THRESH_BINARY)
    # thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    #            cv2.THRESH_BINARY,11,2)
    cv2.imshow('yellow', thresh)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    areas = [cv2.contourArea(c) for c in contours]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
        print areas[elem]
    max_area = 0
    print '-' * 50
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    # max_area = areas[max_index]
    for a in range(len(areas)):

        if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:
            print areas[a]
            get.append(contours[a])


        else:
            pass
    # cnt=contours1[max_index]
    for elem in get:
        for t in elem:
            # print 'hey'
            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:
                    position[face + '1'] = 'yellow'

                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'yellow'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'yellow'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'yellow'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'yellow'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '8'] = 'yellow'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '3'] = 'yellow'
                elif centroid_y > 66 and centroid_y < 133:
                    position[face + '6'] = 'yellow'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '9'] = 'yellow'

    print type(contours)
    """for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        centroid_x = (x + x+w)/2
        centroid_y = (y + y+h)/2
        cv2.circle(im, (centroid_x, centroid_y), 2, (255,0,0), 2)"""

    # lower_blue = np.array([80,180,140], dtype=np.uint8)
    # upper_blue = np.array([140,245,205], dtype=np.uint8)

    lower_blue = np.array([80, 180, 190], dtype=np.uint8)
    upper_blue = np.array([120, 255, 255], dtype=np.uint8)

    frame_threshed3 = cv2.inRange(hsv_img, lower_blue, upper_blue)  # Thresholding image
    imgray3 = frame_threshed3
    ret, thresh3 = cv2.threshold(frame_threshed3, 127, 255, 3)

    cv2.imshow('blue', frame_threshed3)

    contours3, hierarchy3 = cv2.findContours(thresh3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    areas = [cv2.contourArea(c) for c in contours3]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
    # print areas[elem]
    max_area = 0
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    print '-' * 50
    for a in range(len(areas)):
        # print areas[a] - max_area
        if areas[a] - max_area in range(-1200, 1200) and areas[a] >= 1500:
            # print areas[a]
            get.append(contours3[a])
        else:
            pass
    # cnt=contours1[max_index]
    for elem in get:
        for t in elem:
            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:
                    position[face + '1'] = 'blue'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'blue'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'blue'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'blue'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'blue'
                elif centroid_y > 133 and centroid_y < 200:
                    position[face + '8'] = 'blue'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '3'] = 'blue'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '6'] = 'blue'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '9'] = 'blue'

    """for cnt in contours3:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        centroid_x = (x + x+w)/2
        centroid_y = (y + y+h)/2
        cv2.circle(im, (centroid_x, centroid_y), 2, (255,0,0), 2)"""

    # lower_orange = np.array([0, 130, 90],np.uint8)		# HSV color code lower and upper bounds
    # upper_orange = np.array([20, 210, 170],np.uint8)		# color orange
    lower_orange = np.array([5, 150, 150], np.uint8)  # HSV color code lower and upper bounds
    upper_orange = np.array([15, 235, 250], np.uint8)  # color orange

    frame_threshed2 = cv2.inRange(hsv_img, lower_orange, upper_orange)  # Thresholding image
    imgray2 = frame_threshed2
    ret, thresh2 = cv2.threshold(frame_threshed2, 127, 255, 2)
    cv2.imshow('Orange', frame_threshed2)
    contours2, hierarchy2 = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    areas = [cv2.contourArea(c) for c in contours2]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
    # print areas[elem]
    max_area = 0
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    print '-' * 50
    for a in range(len(areas)):
        if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:

            # print areas[a]
            get.append(contours2[a])
        else:
            pass
    # cnt=contours1[max_index]
    for elem in get:
        for t in elem:
            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:
                    position[face + '1'] = 'orange'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'orange'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'orange'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'orange'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'orange'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '8'] = 'orange'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '3'] = 'orange'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '6'] = 'orange'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '9'] = 'orange'

    """for cnt in contours2:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)"""

    # lower_green = np.array([60, 120, 80],np.uint8)		# HSV color code lower and upper bounds
    # upper_green = np.array([100, 170, 120],np.uint8)		# color orange
    lower_green = np.array([60, 110, 110], np.uint8)  # HSV color code lower and upper bounds
    upper_green = np.array([100, 220, 250], np.uint8)  # color orange

    frame_threshed4 = cv2.inRange(hsv_img, lower_green, upper_green)  # Thresholding image
    imgray4 = frame_threshed4
    ret, thresh4 = cv2.threshold(frame_threshed4, 127, 255, 0)
    cv2.imshow('green', frame_threshed4)
    contours4, hierarchy4 = cv2.findContours(thresh4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    areas = [cv2.contourArea(c) for c in contours4]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
    # print areas[elem]
    max_area = 0
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    # max_area = areas[max_index]
    for a in range(len(areas)):
        # print areas[a] - max_area
        if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:

            get.append(contours4[a])
        else:
            pass
    # cnt=contours1[max_index]
    for elem in get:
        for t in elem:
            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:
                    position[face + '1'] = 'green'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'green'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'green'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'green'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'green'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '8'] = 'green'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '3'] = 'green'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '6'] = 'green'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '9'] = 'green'
    """for cnt in contours4:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        centroid_x = (x + x+w)/2
        centroid_y = (y + y+h)/2
        cv2.circle(im, (centroid_x, centroid_y), 2, (255,0,0), 2)"""

    # lower_red = np.array([140, 120, 70],np.uint8)		# HSV color code lower and upper bounds
    # upper_red = np.array([210, 220, 170],np.uint8)		# color orange
    lower_red = np.array([120, 120, 140], np.uint8)  # HSV color code lower and upper bounds
    upper_red = np.array([180, 250, 200], np.uint8)  # color orange

    frame_threshed5 = cv2.inRange(hsv_img, lower_red, upper_red)  # Thresholding image
    imgray5 = frame_threshed5
    ret, thresh5 = cv2.threshold(frame_threshed5, 127, 255, 0)
    cv2.imshow('red', frame_threshed5)
    contours5, hierarchy5 = cv2.findContours(thresh5, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    areas = [cv2.contourArea(c) for c in contours5]
    for elem in range(len(areas)):
        areas[elem] = int(areas[elem])
    # print areas[elem]
    max_area = 0
    for elem in areas:
        if elem > max_area:
            max_area = elem
    # print max_index
    get = []
    # max_area = areas[max_index]
    for a in range(len(areas)):
        if areas[a] - max_area in range(-1500, 1500) and areas[a] >= 1500:
            print areas[a]

            get.append(contours5[a])
        else:
            pass
    # cnt=contours1[max_index]
    for elem in get:
        for t in elem:
            x, y, w, h = cv2.boundingRect(elem)
            # print x,
            # print y
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
            centroid_x = (x + x + w) / 2
            centroid_y = (y + y + h) / 2
            cv2.circle(im, (centroid_x, centroid_y), 2, (255, 0, 0), 2)
            if centroid_x > 0 and centroid_x < 66:
                if centroid_y > 0 and centroid_y < 66:
                    position[face + '1'] = 'red'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '4'] = 'red'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '7'] = 'red'
            if centroid_x > 66 and centroid_x < 133:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '2'] = 'red'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '5'] = 'red'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '8'] = 'red'
            if centroid_x > 133 and centroid_x < 200:
                if centroid_y > 0 and centroid_y < 66:

                    position[face + '3'] = 'red'
                elif centroid_y > 66 and centroid_y < 133:

                    position[face + '6'] = 'red'
                elif centroid_y > 133 and centroid_y < 200:

                    position[face + '9'] = 'red'

    """for cnt in contours5:
        x,y,w,h = cv2.boundingRect(cnt)
        #print x,
        #print y
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        centroid_x = (x + x+w)/2
        centroid_y = (y + y+h)/2
        cv2.circle(im, (centroid_x, centroid_y), 2, (255,0,0), 2)"""

    cv2.line(im, (0, 66), (200, 66), (255, 0, 0), 2)
    cv2.line(im, (0, 133), (200, 133), (255, 0, 0), 2)
    cv2.line(im, (66, 0), (66, 200), (255, 0, 0), 2)
    cv2.line(im, (133, 0), (133, 200), (255, 0, 0), 2)
    # cv2.imshow("Show",im)
    cv2.imshow(imageName, im)
    cv2.imwrite(imageName + '_extracted.jpg', im)
    return position, im
    cv2.waitKey()
    cv2.destroyAllWindows()
Example #23
0
def codeocr(offset):
    global result    
    img=cv2.imread("img_source.png")
    dst=cv2.fastNlMeansDenoisingColored(img,None,30,30,7,21) # 去雜點
    ret,thresh=cv2.threshold(dst,127,255,cv2.THRESH_BINARY_INV)  #黑白
    imgarr=cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY) #灰階    
#    plt.imshow(thresh)
#    plt.show()
    
#    print(imgarr.shape)
    height= imgarr.shape[0]  # 高度
    width = imgarr.shape[1]  # 寬度
    
    start=offset   # 要測試後調整,offset 為左右留的邊界
    end=width-offset 
    
    # 去除回歸曲線
    imgarr[:,start:end]=0  # 從左邊界起至右邊界止,全部挖空
    imagedata=np.where(imgarr==255) # 找到所有白色的點
    
    plt.scatter(imagedata[1],height-imagedata[0],s=100,color="red",label="Cluster")
    plt.ylim(0,height)
#    plt.show() # 顯示起始、結束
    
    ploy_reg =PolynomialFeatures(degree=2) #以二次多項式建立特徵   
    X=np.array([imagedata[1]]) #取得 X座標
    Y=height-imagedata[0]
    X_=ploy_reg.fit_transform(X.T) #特徵數據轉換
    regr=LinearRegression() #建立線性回歸線
    regr.fit(X_,Y)
    LinearRegression(copy_X=True,fit_intercept=True,n_jobs=1,normalize=False)
    
    X2=np.array([[i for i in range(0,width)]])
    X2_=ploy_reg.fit_transform(X2.T)
#    plt.plot(X2.T,regr.predict(X2_),color="blue",linewidth=30) #顯示回歸線
    
    grayimg=cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY) 
    for ele in np.column_stack([regr.predict(X2_).round(0),X2[0],] ):
        pos=height-int(ele[0])
        try:
            grayimg[pos-3:pos+3,int(ele[1])]=255-grayimg[pos-3:pos+3,int(ele[1])]
        except IndexError:
            pass
    
    cv2.imwrite("temp.png", grayimg)  #存檔             
    _, inv = cv2.threshold(grayimg, 150, 255, cv2.THRESH_BINARY_INV)  #轉為反相黑白
    for i in range(len(inv)):  #i為每一列
        for j in range(len(inv[i])):  #j為每一行
            if inv[i][j] == 255:  #顏色為白色
                count = 0 
                for k in range(-2, 3):
                    for l in range(-2, 3):
                        try:
                            if inv[i + k][j + l] == 255:  #若是白點就將count加1
                                count += 1
                        except IndexError:
                            pass
                if count <= 6:  #週圍少於等於6個白點
                    inv[i][j] = 0  #將白點去除    
            
    dilation = cv2.dilate(inv, (8,8), iterations=1)  #圖形加粗
    cv2.imwrite("final.png",dilation)
    
    #文字辨識 
    tools = pyocr.get_available_tools()
    if len(tools) == 0:
        print("No OCR tool found")
        sys.exit(1)
    tool = tools[0]  #取得可用工具
    
    result = tool.image_to_string(
        Image.open('final.png'),
        builder=pyocr.builders.TextBuilder()
    )
Example #24
0
def image_to_json(imgc,models):

  #remove noise
  imgc = cv2.fastNlMeansDenoisingColored(imgc)

  #get gray image
  gray=cv2.cvtColor(imgc,6)
  #remove noise
  gray = cv2.fastNlMeansDenoising(gray,None,10,7,21)
  h,w=gray.shape

  #get binary image
  img= cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
              cv2.THRESH_BINARY,11,2)


  #initializing array for all the components of the form
  fields=[]


  #detect text
  tboxes=models["textDetector"].boxes(Image.fromarray(imgc))
  #recognize text
  for tbox in tboxes:
    x,y,w,h=map(int,tbox)
    new=img[y:y+h,x:x+w]
    res=models["textrec"].recognize(new)
    if res is not None:
      fields.append(classes.Label(res[0],tbox,float(res[1])))
  #calculate average text height, used as reference
  avg_t_h=mean([b[3] for b in tboxes])
  fields=textUtils.groupLabels(fields,avg_t_h) 


  #erasing text
  utils.erase(tboxes,img)


  #detect all components remaining on the form
  contours, hierarchy = cv2.findContours(255-img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  boxes=sorted([cv2.boundingRect(c) for c in contours], key =lambda x:x[2],reverse=True )

  #cleaning
  if (0,0,w,h) in boxes:
    boxes.remove((0,0,w,h))
  temp=[]
  for b in boxes:
    if b[2]<=8 or b[2]*b[3]<150 :
      temp.append(b)
  boxes=[b for b in boxes if b not in temp]
  for b in boxes:
    l=[x for x in boxes if utils.is_inside(x,b,5)]
    boxes=[x for x in boxes if x not in l]

  #recognize components
  lines=[]
  for i in range(len(boxes)):
    x,y,w,h=boxes[i]
    res=models["widgetClassifier"].classify(img[y:y+h,x:x+w],avg_t_h)
    if res is None:
      continue
    
    Ftype=res[1] 

    if Ftype in ["textBox","dropDown","date"]:
      ls=[f for f in fields if f.fieldType == "Label" and utils.is_inside(f.getBbox(),boxes[i],avg_t_h/3)]
      if ls:
        fields=[f for f in fields if f not in ls]
        fields.append(classes.Button(boxes[i],textUtils.mergeLabels(ls)))
      else:
        if Ftype == "textBox":
          fields.append(classes.TextField(boxes[i]))
        elif Ftype == "dropDown":
          fields.append(classes.DropDown(boxes[i]))
        else:
          fields.append(classes.Date(boxes[i]))
    
    elif Ftype == "line":
      ls=[f for f in fields if f.fieldType == "Label" and widgets.is_underline(f.getBbox(),boxes[i],avg_t_h//2,avg_t_h//2)]
      if len(ls) != 0:
        fields=[f for f in fields if f not in ls]
        ls=textUtils.mergeLabels(ls).toTitle()
        ls.headerLine=classes.Line(boxes[i])
        fields.append(ls)
      else:
        lines.append(classes.Line(boxes[i]))

    elif Ftype == "checkBox":
      fields.append(classes.CheckBox(boxes[i]))
    
    elif Ftype == "radio":
      fields.append(classes.Radio(boxes[i]))
    
    elif Ftype == "table":
      fields.append(classes.Table(boxes[i]))
      
  # processing the widgets

  #collecting all the lines
  for l in lines:
    # To recognize lines as fill-in-the blank text-box
    # comparing the line with adjacent text,
    # which is made one fourth it's height so as to match the line height
    textFieldLine=False
    for t in tboxes :
      bbox1=(t[0]+(t[2]*3)//4,t[1],t[2]//4,t[3]);
      bbox2=l.getBbox();
      if utils.is_left(bbox1,bbox2) or utils.is_right(bbox1,bbox2):
        textFieldLine=True
        break
    if textFieldLine:
      lines.remove(l)
      ls=list(l.getBbox())
      ls[3]=avg_t_h
      fields.append(TextField(ls))
      

  lines.sort(key=lambda x:x.BoundingBox['Top'])
  # grouping lines into multiline textArea
  groups=[]
  while lines:
    ls=[lines.pop(0)]
    while True:
      temp=[line for line in lines if utils.is_above(ls[-1].getBbox(),line.getBbox(),avg_t_h)]
      if temp:
        ls.append(min(temp,key=lambda x:x.BoundingBox['Top'] - ls[-1].BoundingBox['Top']))
        lines.remove(ls[-1])
      else:
        break
    groups.append(ls)

  #form text area out of lines
  for group in groups:
    if len(group)==1:
      fields.append(group[0])
    else:
      fields.append(classes.TextArea(utils.getBbox(group),len(group)))



  for f in [f for f in fields if f.fieldType=="Table"]:
    x,y,w,h=f.getBbox()
    f.rows=len(widgets.getRows(img[y:y+h,x:x+w]))
    f.cols=len(widgets.getColums(img[y:y+h,x:x+w]))
    if f.rows == 0 or f.cols == 0:
      fields.remove(f)
  for f in [f for f in fields if f.fieldType=="Date"]:
    x,y,w,h=f.getBbox()
    f.blocks=len(widgets.getColums(img[y:y+h,x:x+w]))
    if f.blocks == 0:
      fields.remove(f)
    #collecting the row and column count in the table

  #rectifying the heights and sorting
  fields=utils.makeHeights(fields,avg_t_h/2)
  #association of related components
  components=utils.getComponents(fields,avg_t_h)
  #breaking of the form into sections
  sections=utils.getSections(components)

  new=classes.form(img.shape)
  new.sections=sections
  cv2.imwrite("result.jpg",new.printForm())
  return new.getJSON()
Example #25
0
def vectorize_image(image_np, destination, icc_profile):
    image = Image.fromarray(image_np).convert('RGB')
    width, height = image.size

    refColours = get_colors(image)
    #refColours = ([
    #	[86, 29, 37],
    #	[206, 129, 71],
    #	[236, 221, 123],
    #	[33, 161, 121],
    #	[4, 31, 30],
    #	[68, 94, 147],
    #	[229, 234, 250]
    #])

    pixels = image.load()

    for i in range(width):
        for j in range(height):
            mindist = distance(refColours[0], pixels[i, j])
            nearest = refColours[0]
            for index in range(1, len(refColours)):
                d = distance(refColours[index], pixels[i, j])
                if d < mindist:
                    mindist = d
                    nearest = refColours[index]
            pixels[i, j] = tuple(nearest)

    denoised = cv2.fastNlMeansDenoisingColored(np.array(image), None, 30, 10,
                                               7, 21)

    image = cv2.cvtColor(denoised, cv2.COLOR_BGR2LAB)
    image = image.reshape((image.shape[0] * image.shape[1], 3))
    clt = MiniBatchKMeans(n_clusters=32)
    labels = clt.fit_predict(image)
    quant = clt.cluster_centers_.astype("uint8")[labels]
    quant = quant.reshape((height, width, 3))
    quant_np = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR)
    tf.disable_eager_execution()
    tf.reset_default_graph()

    input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
    network_out = network.unet_generator(input_photo)
    final_out = guided_filter.guided_filter(input_photo,
                                            network_out,
                                            r=1,
                                            eps=5e-3)

    all_vars = tf.trainable_variables()
    gene_vars = [var for var in all_vars if 'generator' in var.name]
    saver = tf.train.Saver(var_list=gene_vars)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    sess.run(tf.global_variables_initializer())
    saver.restore(sess, tf.train.latest_checkpoint('cartoonize/saved_models'))

    batch_image = image_np.astype(np.float32) / 127.5 - 1
    batch_image = np.expand_dims(batch_image, axis=0)
    output = sess.run(final_out, feed_dict={input_photo: batch_image})
    output = (np.squeeze(output) + 1) * 127.5
    output = np.clip(output, 0, 255).astype(np.uint8)

    Image.fromarray(output).save(destination + '/background.jpg',
                                 'JPEG',
                                 optimize=True,
                                 quality=50,
                                 icc_profile=icc_profile.tobytes())
Example #26
0
    def findObjects(self, openCVImage):

        kernel = np.ones((5, 5), np.uint8)

        deNoisedImage = cv2.fastNlMeansDenoisingColored(rawImage, None, 10, 10)
        ih, iw, ic = rawImage.shape
        hsv = cv2.cvtColor(deNoisedImage, cv2.COLOR_BGR2HSV)

        #color bounds

        #green
        low_green = np.array([35, 40, 40])
        high_green = np.array([80, 255, 255])
        greenMask = cv2.inRange(hsvMedianBlur, low_green, high_green)

        #red bounds 1
        low_red = np.array([0, 180, 30])
        high_red = np.array([10, 255, 255])
        redMask = cv2.inRange(hsvMedianBlur, low_red, high_red)

        #red bounds 2
        low_red2 = np.array([150, 180, 30])
        high_red2 = np.array([180, 255, 255])
        redMask2 = cv2.inRange(hsvMedianBlur, low_red2, high_red2)

        #red combination mask

        redMask3 = cv2.bitwise_or(redMask, redMask2)

        #yellow
        low_yellow = np.array([15, 80, 80])
        high_yellow = np.array([35, 255, 255])
        yellowMask = cv2.inRange(hsvMedianBlur, low_yellow, high_yellow)

        #pink
        low_pink = np.array([135, 100, 100])
        high_pink = np.array([180, 170, 255])
        pinkMask = cv2.inRange(hsvMedianBlur, low_pink, high_pink)

        #creation of a mask with all objects.
        totalMask = cv2.bitwise_or(greenMask, yellowMask)
        totalMask = cv2.bitwise_or(totalMask, redMask3)
        totalMask = cv2.bitwise_or(totalMask, pinkMask)

        #noise reduction on each mask
        erodedPinkMask = cv2.morphologyEx(pinkMask, cv2.MORPH_OPEN, kernel)
        erodedRedMask = cv2.morphologyEx(redMask3, cv2.MORPH_OPEN, kernel)
        erodedYellowMask = cv2.morphologyEx(yellowMask, cv2.MORPH_OPEN, kernel)
        erodedGreenMask = cv2.morphologyEx(greenMask, cv2.MORPH_OPEN, kernel)

        #calculation of the pixel center of each block in the image
        colors = np.array(["yellow", "green", "red", "pink"])
        maskArray = np.array(
            [erodedYellowMask, erodedGreenMask, erodedRedMask, erodedPinkMask])
        centers = np.array([["yellow", 0, 0], ["green", 0, 0], ["red", 0, 0],
                            ["pink", 0, 0]])
        cv2.waitKey(0)
        idx = 0
        for mask in maskArray:
            temp = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
            _, contoursMask, hierarchy = cv2.findContours(
                temp, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            for contour in contoursMask:
                area = cv2.contourArea(contour)
                if area > 300:
                    x, y, w, h = cv2.boundingRect(contour)
                    cv2.rectangle(mask, (x, y), (x + w, y + h), (255, 0, 255),
                                  2)
                    cx = x + (w / 2)
                    cy = y + (h / 2)
                    print(colors[idx])
                    print(cx, cy)
                    print(cx - icx, cy - icy)
                    centers[idx][1] = int(cx)
                    centers[idx][2] = int(cy)
            idx = idx + 1

        i = 0
        for x in points:
            (X, Y, Z) = vision_class.findXYZ(x)
            self.objectPositions[i][1] = X
            self.objectPositions[i][2] = Y
            #self.objectPositions[i][0] = Z
            i = i + 1

        return colors, centers
Example #27
0
def preprocess1(data):
	img = cv2.GaussianBlur(data, (5,5), 0) 
	img = cv2.bilateralFilter(img,9,75,75)
	img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
	return img
Example #28
0
    def run(self):
        pos_frame = self.cam.get(1)
        frame_count = 0
        files = []
        for name in sorted(os.listdir(self.path_to_frames)):
            if fnmatch(name, "*.png"):
                #print(os.path.join(self.path_to_frames, name))
                files.append(str(os.path.join(self.path_to_frames, name)))
        files.sort(key=self.alphanum_key)
        numOfFiles = len(files)
        timePassed = 0
        numCubes = 0
        fileCount = 0
        features = []
        '''
        Remove te condition for 10 or 20 frames, it's only only for testing purposed
        '''
        print("Total Frames " + str(self.cam.get(7)))
        # while True and frame_count < int(self.cam.get(7)):
        while True and frame_count < 10:
            ret, frame = self.cam.read()
            '''print('One Loop')
            print(frame_count)
            print(files[frame_count])'''
            if frame_count == fileCount and (fileCount + 5) <= int(
                    self.cam.get(7)):
                del features[:]
                timePassed += 1
                img_set = []
                img1 = cv2.cvtColor(cv2.imread(files[fileCount]),
                                    cv2.COLOR_BGR2GRAY)
                fileCount += 1
                img2 = cv2.cvtColor(cv2.imread(files[fileCount]),
                                    cv2.COLOR_BGR2GRAY)
                fileCount += 1
                img3 = cv2.cvtColor(cv2.imread(files[fileCount]),
                                    cv2.COLOR_BGR2GRAY)
                fileCount += 1
                img4 = cv2.cvtColor(cv2.imread(files[fileCount]),
                                    cv2.COLOR_BGR2GRAY)
                fileCount += 1
                img5 = cv2.cvtColor(cv2.imread(files[fileCount]),
                                    cv2.COLOR_BGR2GRAY)
                fileCount += 1
                img_set.extend((img1, img2, img3, img4, img5))
                # print(timePassed)
                resize_2020_image_set = []
                resize_4030_image_set = []
                resize_160120_image_set = []
                for image in img_set:
                    resize_2020_image_set.append(cv2.resize(image, (20, 20)))
                    resize_4030_image_set.append(cv2.resize(image, (40, 30)))
                    resize_160120_image_set.append(
                        cv2.resize(image, (160, 120)))
                resized_image_set = [
                    resize_2020_image_set, resize_4030_image_set,
                    resize_160120_image_set
                ]
                patches_all = [[], [], []]
                iterator = 0
                for images_set in resized_image_set:
                    for resized_img in images_set:
                        patch_list = []
                        patch = []
                        for start in range(0, len(resized_img[0]), 10):
                            count = 1
                            for row in resized_img:
                                patch.append(row[start:start + 10])
                                if (count == 10):
                                    count = 0
                                    patch_list.append(patch)
                                    patch = []
                                count += 1
                        patches_all[iterator].append(patch_list)
                    iterator += 1

                cubes = []

                for resolution_patch_set in patches_all:
                    for iterator in range(len(resolution_patch_set[0])):
                        p_one = resolution_patch_set[0][iterator]
                        p_two = resolution_patch_set[1][iterator]
                        p_three = resolution_patch_set[2][iterator]
                        p_four = resolution_patch_set[3][iterator]
                        p_five = resolution_patch_set[4][iterator]
                        cubes.append([p_one, p_two, p_three, p_four, p_five])
                numCubes += len(cubes)
                for cub in cubes:
                    sobelx = cv2.Sobel(np.array(cub),
                                       cv2.CV_64F,
                                       1,
                                       0,
                                       ksize=-1)
                    sobely = cv2.Sobel(np.array(cub),
                                       cv2.CV_64F,
                                       0,
                                       1,
                                       ksize=-1)
                    sobelt = cv2.Sobel(np.array(zip(*cub)),
                                       cv2.CV_64F,
                                       0,
                                       1,
                                       ksize=-1)
                    sobelt = zip(*sobelt)
                    feature = []
                    for time_value in range(5):
                        for y_value in range(10):
                            for x_value in range(10):
                                feature.append(
                                    sobelx[time_value][y_value][x_value])
                                feature.append(
                                    sobely[time_value][y_value][x_value])
                                feature.append(
                                    sobelt[time_value][y_value][x_value])
                    features.append(feature)
                #features = np.array(features)
                # print(features.shape)

            if ret:
                frame_vectors = []
                frame = cv2.fastNlMeansDenoisingColored(
                    frame, None, 10, 10, 7, 21)
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                vis = frame.copy()
                pos_frame = self.cam.get(1)
                if len(self.tracks) > 0:
                    img0, img1 = self.prev_gray, frame_gray
                    p0 = np.float32([tr[-1]
                                     for tr in self.tracks]).reshape(-1, 1, 2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(
                        img0, img1, p0, None, **lk_params)
                    p0r, st, err = cv2.calcOpticalFlowPyrLK(
                        img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 1
                    for loop, point in zip(p0, self.points):
                        pa, st, err = cv2.calcOpticalFlowPyrLK(
                            img0, img1, loop, None, **lk_params)
                        p0a, st, err = cv2.calcOpticalFlowPyrLK(
                            img1, img0, pa, None, **lk_params)
                        if abs(loop - p0a).reshape(-1, 2).max(-1) < 1:
                            dst = spatial.distance.euclidean(loop, p0a)
                            new_Loop = loop.flatten()
                            vtr = [
                                new_Loop[0], new_Loop[1], dst, point.angle,
                                point.response
                            ]
                            while (len(vtr) < 1500):
                                vtr.append(0)
                            # self.vectors.append(vtr)
                            frame_vectors.append(vtr)
                    new_tracks = []
                    for tr, (x, y), good_flag in zip(self.tracks,
                                                     p1.reshape(-1, 2), good):
                        if not good_flag:
                            continue
                        tr.append((x, y))
                        if len(tr) > self.track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                    self.tracks = new_tracks
                    cv2.polylines(vis, [np.int32(tr) for tr in self.tracks],
                                  False, (0, 255, 0))
                    draw_str(vis, (20, 20),
                             'track count: %d' % len(self.tracks))

                if self.frame_idx % self.detect_interval == 0:
                    #mask = np.zeros_like(frame_gray)
                    mask = np.zeros_like(self.mask)
                    mask[:] = 255
                    for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                        cv2.circle(mask, (x, y), 5, 0, -1)
                    '''p = cv2.goodFeaturesToTrack(
                        frame_gray, mask=self.mask, **feature_params)'''
                    p, des = self.orb.detectAndCompute(frame_gray, self.mask)
                    if p is not None:
                        '''for x, y in np.float32(p).reshape(-1, 2):
                            self.tracks.append([(x, y)])'''
                        for keypoint in p:
                            x = keypoint.pt[0]
                            y = keypoint.pt[1]
                            self.tracks.append([(x, y)])
                            self.points.append(keypoint)

                self.frame_idx += 1
                self.prev_gray = frame_gray
                # print(len(frame_vectors))
                zeroes = []
                while (len(zeroes) < 1500):
                    zeroes.append(0)
                while (len(frame_vectors) < 2000):
                    frame_vectors.append(zeroes)
                while (len(features) < 2000):
                    features.append(zeroes)
                # print(len(frame_vectors))
                # frame_vectors=np.array(frame_vectors)
                # print(frame_vectors.shape)
                # featureVal=np.array(features)
                # print(featureVal.shape)
                self.vectors.append(
                    tuple((frame_vectors, features, self.label)))
                cv2.imshow('Frames', vis)
                frame_count += 1
                print('Tracking frame ' + str(frame_count))
            else:
                self.cam.set(1, pos_frame - 1)
                cv2.waitKey(1000)

            ch = cv2.waitKey(1)
            if ch == 27 or self.cam.get(1) == self.cam.get(7):
                break
        '''for a in self.vectorReturn():
            print(a.getxCoOrdinates(), a.getyCoOrdinates(), a.getDistance())
        '''
        #print (len(self.tracks))
        # print(len(self.vectors))
        return self.vectorReturn()
Example #29
0
                     color=color,
                     thickness=thickness)

    return image


###################################################################################################
start_time = time.time()

#with rescaling first
height, width, channels = img.shape
resized_image = cv2.resize(img, (500, int(height * 500 / width)))

# denoise the image for better results (SLOW!!!!!!)
if (noiseFilter > 0):
    resized_image = cv2.fastNlMeansDenoisingColored(resized_image, None,
                                                    noiseFilter)

if (HSVClustering == 1):
    Z = cv2.cvtColor(resized_image, cv2.COLOR_BGR2HSV)
Z = resized_image.reshape((-1, 3))

# convert to np.float32
Z = np.float32(Z)

# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, labels, center = cv2.kmeans(Z, K, None, criteria, 10,
                                 cv2.KMEANS_RANDOM_CENTERS)
print("Kmeans clustering performed in %s seconds" % (time.time() - start_time))

# #print clustered image for debug
Example #30
0
def get_video():
    f = frame_convert2.video_cv(freenect.sync_get_video()[0])
    return cv2.fastNlMeansDenoisingColored(f, None, 10, 10, 3, 3)
Example #31
0
        return [inputs[0][:, :, self.ystart:self.yend, self.xstart:self.xend]]


cv2.dnn_registerLayer('Crop', CropLayer)

# Load the model.
net = cv2.dnn.readNet(args.prototxt, args.caffemodel)

# load the input image and grab its dimensions
image = cv2.imread(args.input)
# image =cv2.equalizeHist(img)
# image = cv2.pyrMeanShiftFiltering(image1,10,20)
height, width, channel = image.shape

# we use fastNlMeansDenoisingColored to reduce the noise
noise_reduced_image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7,
                                                      21)
H, W = args.height, args.width
noise_reduced_image_resized = cv2.resize(noise_reduced_image, (H, W))

# we keep the original ratio to the image to calculate the bounding box sizes
height_ratio = height / H
width_ratio = width / W

inp = cv2.dnn.blobFromImage(image,
                            scalefactor=1.0,
                            size=(args.width, args.height),
                            mean=(104.00698793, 116.66876762, 122.67891434),
                            swapRB=False,
                            crop=False)

net.setInput(inp)
        img_saturation.save(os.path.join(saturation_folder, file))

        #Apply brightness transform
        img_pil = Image.open(path)  # PIL
        img_brightness = brightness_transform(img_pil)
        img_brightness.save(os.path.join(brightness_folder, file))

        # Apply hue transform
        img_pil = Image.open(path)  # PIL
        img_colour = color_transform(img_pil)
        img_colour.save(os.path.join(hue_folder, file))

        # Apply denoising for texture modification
        img_texture = cv2.fastNlMeansDenoisingColored(img_cv,
                                                      None,
                                                      templateWindowSize=7,
                                                      searchWindowSize=21,
                                                      h=texture_h,
                                                      hColor=texture_h)
        cv2.imwrite(os.path.join(texture_folder, file), img_texture)

        # Randomly shuffle tiles of image for shape modification
        img_cv = cv2.imread(path)  # read again in openCV
        img_shape = modify_shape(img_cv, A, w)
        cv2.imwrite(os.path.join(shape_folder, file), img_shape)

# Get folders
print(os.path.abspath(input_dir_test))
folders = next(os.walk(input_dir_test))[1]

# Loop through subfolders
for folder in folders:
Example #33
0
def denoisingColor(image, i):
    img = image

    dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)

    spasiSliku.spasiSliku("colorDenoising", "slika", i, dst)
Example #34
0
    def findMarkings(self):
            #flag=0
        for i in range(0,2):        
            if i == 0:    
                img = Image("images/temp_x1.jpg",0)
            else:
                img = Image("images/temp_y2.jpg",0) 

            if i==0:
                num_marking = int(img.width/self.dx+1)
            else:
                num_marking = int(img.height/self.dy+1)       
            #print str(img.width) +"  " + str(img.height)
           
            #if img.width/img.height<3 : #crop image 

            if img.width<70:
                #print "ok"
                img = img.resize(img.width*4,img.height*4)
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()
                img_bin = img_bin.dilate(2)
                img_bin = img_bin.erode(1)
                #img_bin.erode().show()
            
            #print str(img.width) +"  " + str(img.height)
            #if img.width<400 :
                #img = img.resize(img.width*5,img.height*5)    
            
            else:
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()


            if img.height<50:
                #print "ok"
                img = img.resize(img.width*4,img.height*4)
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()
                img_bin = img_bin.dilate(2)
                img_bin = img_bin.erode(1)


            else:
                img_inv = img.invert()
                img_inv.scale(100,100)
                img_bin = img_inv.binarize()                




            """
            if flag==1:
                img_dilate = img_inv.dilate(2)
                img_erode = img_dilate.erode(2)
                img_erode.save("rot_1.jpg")
                """
            #elif flag!=1:
            img_bin.save("images/temp.jpg")

            img = cv2.imread("images/temp.jpg") 
            dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) 
            

            marking = image_to_string(IMAGE.fromarray(dst),lang='eng')
            marking = marking.replace("\n"," ")
            marking = marking.split(" ")
            print marking
            for j in marking:
                if j=='':
                    marking.remove(j)            
            
            for j in range(len(marking)):
                print len(marking)
                marking[j] = marking[j].replace("O","0")
                #if marking[i][1]=="O":
                    #marking[i][1]="0"
            for j in marking:        
                if re.match("^\d+?\.?\d+?$", j) is None:    
                    marking.remove(j)
            print len(marking)        
            for j in range(len(marking)):
                marking[j]=float(marking[j])

            marking=sorted(marking)
            print marking

            for j in range(len(marking)):
                if marking[1]/marking[0] == marking[2]*1.0/marking[1]:
                    self.isLog = True
                
                else:
                    self.isLog = False 
            
            """    
            if num_marking>len(marking) and self.isLog==False:
                if num_marking-len(marking)==2:
                    #print "diff is 2"
                    marking.append(marking[1]-2*marking[0])
                    marking.sort()
                    marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))                
                    marking.sort()
                if num_marking-len(marking)==1:
                    marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))
                    marking.sort()              
                #else:
                    #i=int(i)
                    
            if num_marking>len(marking) and self.isLog==True:
                if num_marking-len(marking)==2:
                    #print "diff is 2"
                    marking.append(marking[0]**2/marking[1])
                    marking.sort()
                    marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))                
                    marking.sort()
                if num_marking-len(marking)==1:
                    marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))
                    marking.sort()                      
                    """

            if self.isLog==True:
                marking.append(marking[0]**2/marking[1])
                marking.sort()
                marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))                
                marking.sort()    


            if self.isLog==False:
                marking.append(2*marking[0]-marking[1])
                marking.sort()
                marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))                
                marking.sort()        

            print marking
            mark_len = len(marking)        
            if i==0:
                self.minx=marking[0]
                self.maxx=marking[mark_len-1]            
            else:
                self.miny=marking[0]
                self.maxy=marking[mark_len-1]
Example #35
0
    def findColorNnumOfPlots(self):
        for i in range(len(self.textBoxImages)):
            #flag=0
            s='images/onlylabel_'+str(i+1)+'.png'
            img = Image(s,0)
            #print str(img.width) +"  " + str(img.height)
               
            #print str(img.width) +"  " + str(img.height)
            #if img.width<400 :
                #img = img.resize(img.width*5,img.height*5)    
            img_inv = img.invert()
            img_inv.scale(100,100)
            img_bin = img_inv.binarize()
            
            """
            if flag==1:
                img_dilate = img_inv.dilate(2)
                img_erode = img_dilate.erode(2)
                img_erode.save("rot_1.jpg")
                """

            #elif flag!=1:
            img_bin.save("images/rot_1.jpg")

            img = cv2.imread("images/rot_1.jpg") 
            dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) 
            
            graphs = image_to_string(IMAGE.fromarray(dst),lang='eng')
            graphs = graphs.replace("\n",">%$ ")                               #formatting the string to get the list 
            graphs = graphs.split(">%$ ")                                      #of plots plotted in given graphs                               
            print graphs
            graphNamesList=graphs
            n=len(graphs)                                                      #number of plots in given graph
            img = Image(s,0)
            img = img.resize(img.width*3,img.height*3)                         #resizing the image to make it big enough for cropping
            #print height
            #img = img.crop(15,15,img.width,img.height)                   #removing the edges of the given graph description image
            height = (img.height)*1.0/n
            width = img.width 
            graphList=[]
            start = 0
            for i in range(0,n):                                               #cropping the image so as to get a single plot description 
                cropImg = img.crop(0, start, width, height)                   #in one image
                graphList.append(cropImg)
                start+=height
            
            graphList1 = graphList    
            #time.sleep(3)
            """
            graphName = []      
            for i in graphList1:                                               #getting the names of all the plots from the images cropped above
                #i = i.resize(i.width*4,i.height*4)
                i = i.invert()
                i.scale(100,100)
                i = i.binarize()
                #i = i.erode()
                i.save("temp.jpg")
                i = cv2.imread("temp.jpg")
                i = cv2.fastNlMeansDenoisingColored(i,None,10,10,7,21) 
                g = image_to_string(IMAGE.fromarray(i),lang='eng')
                print g
                print "\n"
                graphName.append(g)
                """
            
            
            graphColor = []
            for i in graphList:                                                #finding colors of plots of all the images cropped above
                i.save("images/temp.jpg")    
                #raw_input()
                imge = cv2.imread("images/temp.jpg",1)
                
                imge = cv2.fastNlMeansDenoisingColored(imge,None,10,10,7,21) 
                imge = cv2.cvtColor(imge, cv2.COLOR_BGR2RGB)
                #imge = cv2.cvtColor(imge, cv2.COLOR_RGB2HSV)
         
                # show our image
                plt.figure()
                #plt.axis("off")
                #plt.imshow(imge)
                imge = imge.reshape((imge.shape[0] * imge.shape[1], 3))
                n_clusters = 3                                                 #number of clusters in kmeans clustering
                clt = KMeans(n_clusters = 3)
                clt.fit(imge)
                hist = centroid_histogram(clt)
                bar,color = plot_colors(hist, n_clusters, clt.cluster_centers_)
                #bar = cv2.cvtColor(bar,cv2.COLOR_GRAY2RGB) 
                # show our color bart
                plt.figure()
                #plt.axis()
                #plt.imshow(bar)
                #plt.show()
                
                if color[0]>240 and color[1]>240 and color[2]>240:             
                    color = [10.00, 10.00, 10.00]
                
                color = list(rgb2hsv(color[0],color[1],color[2]))
                #color[1] = color[1]                                      #increasing the picture saturation and value of the image
                #color[2] = color[2]                                       #which got reduced due to processing
                #color = hsv2rgb(color[0],color[1],color[2])     
                print color
                graphColor.append(color)                                    
                
                
            for i in range(0,len(graphColor)):
                c = curve(graphColor[i],graphNamesList[i])
                #c.color(graphColor[i])
                #c.name(graphName[i])
                 
                self.curveList.append(c)
            #return graphColor, graphName
            #pass
            print graphNamesList
            print graphColor
            print self.curveList
    def classify(self, vals):

        vals = cv2.fastNlMeansDenoisingColored(vals, None, 10, 10, 7, 21)
__author__ = 'Vamshi'
import numpy as np
import cv2
from matplotlib import pyplot as plt

img = cv2.imread('16_left.jpeg')

dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)

plt.subplot(121),plt.imshow(img)
plt.subplot(122),plt.imshow(dst)
plt.show()
Example #38
0
def preProcessLocation(imPath):
    #Loading of origional image
    sphere = True
    rawImage = cv2.imread(imPath)
    rawImage = cv2.fastNlMeansDenoisingColored(rawImage, None, 1, 1, 7, 21)
    # if sphere:
    #     norm = cv2.normalize(rawImage, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
    #     cv2.imshow("origional", norm)
    #     cv2.waitKey(0)
    #     cv2.destroyAllWindows()

    brightImage = rawImage.copy()
    gray = cv2.cvtColor(brightImage, cv2.COLOR_BGR2GRAY)

    # Uncomment to see origional Immage

    cv2.imshow("origional", gray)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    #----------------------------------------
    # Find area of the image with the largest intensity value

    (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
    maxLoc = np.transpose(maxLoc)
    maxLoc = tuple([maxLoc[1], maxLoc[0]])

    print("Location of brightest pixel: ", maxLoc)
    # cv2.circle(brightImage, maxLoc, 1, (255, 0, 0), 2)

    print("Intensity: ", gray[maxLoc])

    # display where the brightest area of pixels is

    #     cv2.imshow("Brightest Spot", brightImage)
    #     cv2.waitKey(0)
    #     cv2.destroyAllWindows()
    #----------------------------------------
    # Preprocessing (using a bilateral filter)

    height = int(rawImage.shape[0])
    width = int(rawImage.shape[1])

    if gray[maxLoc] <= 50:
        print("Low intensity image - concentrating image contours...")
        kernel = np.ones((5, 5), np.uint8)
        bilateral_filtered_image = cv2.bilateralFilter(
            rawImage, 5, height, width)  # For dark images
        edge_detected_image = cv2.Canny(bilateral_filtered_image, 0, 10)
        edge_detected_image = cv2.dilate(edge_detected_image,
                                         kernel,
                                         iterations=3)
        edge_detected_image = cv2.medianBlur(edge_detected_image, 11)
        edge_detected_image = cv2.erode(edge_detected_image,
                                        kernel,
                                        iterations=2)
    else:
        bilateral_filtered_image = cv2.bilateralFilter(
            rawImage, 1, height, width)  # For bright images
        edge_detected_image = auto_canny(bilateral_filtered_image, True)

    # cv2.imshow('Bilateral', bilateral_filtered_image)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    #----------------------------------------
    # Edge detection
    # 75 and 200 default min/max for canny edge detection

    print("Median: ", np.median(bilateral_filtered_image))
    print("Detecting islands.....")

    # if gray[maxLoc] <= 50:
    #     edge_detected_image = cv2.Canny(bilateral_filtered_image, 0, 10) # For dark images
    # else:
    #     edge_detected_image = cv2.Canny(bilateral_filtered_image, 10, 100) # For bright images

    corners = cv2.goodFeaturesToTrack(edge_detected_image,
                                      500,
                                      .0001,
                                      7,
                                      useHarrisDetector=True)
    corners = np.int0(corners)
    cornerList = []

    for i in corners:
        x, y = i.ravel()
        cornerList.append((x, y))
        # cv2.circle(brightImage, (x, y), 5, (255,0,0), 1)

    cv2.imshow('Edge detected image', edge_detected_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    #     shutil.rmtree('./Temp Images')
    #     os.mkdir("./Temp Images")
    #----------------------------------------
    # Finding Contours

    contours, _ = cv2.findContours(edge_detected_image, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)  # print(contours)

    contour_list = []
    for contour in contours:
        approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True),
                                  True)
        area = cv2.contourArea(contour)
        #     if area > 0:
        #         print("Pixel area: ", area)
        if ((len(approx) > 1) or (area > 5)):  # len 8, area 30 are default
            contour_list.append(contour)
    #----------------------------------------
    # convert the grayscale image to binary image
    # ret,thresh = cv2.threshold(gray,127,255,0)

    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    lowThresh = 0.5 * ret

    # calculate moments of binary image
    os.chdir("./imageStore")
    imCount = 1
    for cnt in contour_list:
        M = cv2.moments(cnt)
        if int(M["m00"] != 0):
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])

            x, y, w, h = cv2.boundingRect(cnt)
            xCent = int((x + x + w) / 2)
            yCent = int((y + y + h) / 2)
            cv2.rectangle(rawImage, (x, y), (x + w, y + h), (0, 255, 0), 2)

            if (y - 1 == -1) or (x - 1 == -1):
                cropped = edge_detected_image[y:y + h + 1, x:x + w + 1]
                fileName = "edgeImage" + str(imCount) + ".png"
                # print(y, x)
            else:
                cropped = edge_detected_image[y - 1:y + h + 1, x - 1:x + w + 1]
                fileName = "croppedImage" + str(imCount) + ".png"


#             print(fileName)
            cv2.imwrite(fileName, cropped)
            imCount += 1

            # cv2.imshow('Cropped island hits', cropped)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            if classifyObject(fileName) == [1]:
                suppPath = '../suppTrain/hits'
                cv2.imwrite(os.path.join(suppPath, fileName), cropped)
                # cv2.rectangle(brightImage, (xCent, yCent), (xCent, yCent), (0,255,255),6) # Draw rectangle centers
                # cv2.circle(brightImage, (cX, cY), 2, (0, 0, 255), 2)   # Draw centers in relation to moments
                cv2.rectangle(brightImage, (x, y), (x + w, y + h), (0, 255, 0),
                              2)

            elif classifyObject(fileName) == [2]:
                suppPath = '../suppTrain/groups'
                cv2.imwrite(os.path.join(suppPath, fileName), cropped)
                cv2.rectangle(brightImage, (x, y), (x + w, y + h),
                              (0, 255, 255), 2)
                for corner in cornerList:
                    a = corner[0]
                    b = corner[1]
                    within = (x < a < x + w) and (y < b < y + h)
                    # if within:
                    # cv2.circle(brightImage, (a, b), 5, (255,0,0), 1)

            else:
                suppPath = '../suppTrain/negs'
                cv2.imwrite(os.path.join(suppPath, fileName), cropped)

    # cv2.imshow("Rects", rawImage)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    #----------------------------------------

    # Displaying Resuts
    cv2.imshow('Library Detected Image', rawImage)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    cv2.imshow('Objects Detected', brightImage)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #39
0
blur = cv2.blur(image, (7, 7))
cv2.imshow("Averag blur", blur)
cv2.waitKey(0)

gaussian_blur = cv2.GaussianBlur(image, (7, 7), 0)
cv2.imshow("Gaussian blur", gaussian_blur)
cv2.waitKey(0)

median_blur = cv2.medianBlur(image, 7)
cv2.imshow("Median blur", median_blur)
cv2.waitKey(0)

bilateral = cv2.bilateralFilter(image, 7, 75, 75)
cv2.imshow("Bilateral blur", bilateral)
cv2.waitKey(0)

cv2.destroyAllWindows()

# image de-noising
import cv2
import numpy as np

image = cv2.imread("images/crist2.jpg")
dst = cv2.fastNlMeansDenoisingColored(image, None, 6, 6, 7, 21)

cv2.imshow("Original Image", image)
cv2.waitKey(0)
cv2.imshow("Fast Means Denoising", dst)
cv2.waitKey(0)

cv2.destroyAllWindows()
Example #40
0
    ends = []
    bhvs = []

    #for imfile in glob.glob('data/out_'+file+'*.jpg'):
    for i in range(cnt + 1):
        try:

            imfile = 'out_' + file + '_' + str(i) + '.jpg'
            imfile = os.path.join('data', imfile)
            print("loading " + imfile + '...')
            img = cv2.imread(imfile)
            img = make_straight(img)

            img_start = img.copy()
            img_start = img_start[55:, 220:440]
            img_start = cv2.fastNlMeansDenoisingColored(
                img_start, None, 10, 10, 7, 21)
            #cv2.imshow('start',img_start)
            #cv2.waitKey(0)

            img_end = img.copy()
            img_end = img_end[55:, 440:660]
            img_end = cv2.fastNlMeansDenoisingColored(img_end, None, 10, 10, 7,
                                                      21)
            #plt.imshow(img_end)
            #plt.title('end')
            #plt.show()

            img_alphabet = img.copy()
            img_alphabet = img_alphabet[55:, 880:1100]
            img_alphabet = cv2.fastNlMeansDenoisingColored(
                img_alphabet, None, 10, 10, 7, 21)
Example #41
0
def acquire_images(cam, nodemap, nodemap_tldevice):
    """
    This function shows images from a device.

    :param cam: Camera to acquire images from.
    :param nodemap: Device nodemap.
    :param nodemap_tldevice: Transport layer device nodemap.
    :type cam: CameraPtr
    :type nodemap: INodeMap
    :type nodemap_tldevice: INodeMap
    :return: True if successful, False otherwise.
    :rtype: bool
    """

    print('*** IMAGE ACQUISITION ***\n')
    try:

        result = True
        centroids = None
        centroidsTouches = None
        stats = None
        screen_size = [0, 0]

        # Set acquisition mode to continuous
        #
        #  Retrieve enumeration node from nodemap

        # In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here)
        node_acquisition_mode = PySpin.CEnumerationPtr(
            nodemap.GetNode('AcquisitionMode'))
        if not PySpin.IsAvailable(
                node_acquisition_mode) or not PySpin.IsWritable(
                    node_acquisition_mode):
            print(
                'Unable to set acquisition mode to continuous (enum retrieval). Aborting...'
            )
            return False, centroidsTouches, screen_size

        # Retrieve entry node from enumeration node
        node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(
            'Continuous')
        if not PySpin.IsAvailable(
                node_acquisition_mode_continuous) or not PySpin.IsReadable(
                    node_acquisition_mode_continuous):
            print(
                'Unable to set acquisition mode to continuous (entry retrieval). Aborting...'
            )
            return False, centroidsTouches, screen_size

        # Retrieve integer value from entry node
        acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue(
        )

        # Set integer value from entry node as new value of enumeration node
        node_acquisition_mode.SetIntValue(acquisition_mode_continuous)

        print('Acquisition mode set to continuous...')

        #  Begin acquiring images
        cam.BeginAcquisition()

        print('Acquiring images...')

        #  Retrieve device serial number
        device_serial_number = ''
        node_device_serial_number = PySpin.CStringPtr(
            nodemap_tldevice.GetNode('DeviceSerialNumber'))
        if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(
                node_device_serial_number):
            device_serial_number = node_device_serial_number.GetValue()
            print('Device serial number retrieved as %s...' %
                  device_serial_number)

        # Retrieve, convert, and show images
        cv2.namedWindow('im', cv2.WINDOW_NORMAL)
        print(
            "Please ensure you have calibration screen image downloaded. Then put it in full screen to get started."
        )
        print(
            "Please use keyboard :\n\t- press c to calibrate\n\t- press y to extract screen after calibration\n\t- press e to segment the screen after extracted\n\t- press q when you are done (with a correct segmentation).\nYou can extract a new screen (y) and segment it (e) as long as you are not satisfied with the result. Playing with screen brightness and camera focus may help."
        )
        while (1):
            try:

                #  Retrieve next received image
                image_result = cam.GetNextImage()

                #  Ensure image completion
                if image_result.IsIncomplete():
                    print('Image incomplete with image status %d ...' %
                          image_result.GetImageStatus())

                else:
                    # Retrieve image width and height
                    width = image_result.GetWidth()
                    height = image_result.GetHeight()

                    # Convert image to uint8 numpy array
                    row_bytes = float(len(image_result.GetData())) / width
                    rawFrame = np.array(image_result.GetData(),
                                        dtype="uint8").reshape(height, width)
                    # Convert image to BGR
                    im = cv2.cvtColor(rawFrame, cv2.COLOR_BAYER_BG2BGR)

                    # Display image in window 'im'
                    cv2.imshow('im', im)
                    k = cv2.waitKey(10) & 0xFF

                    if k == ord('q'):  # Press 'q' to exit
                        break

                    elif k == ord('c'):
                        print(
                            "Processing... Please wait... This will take less than a minute..."
                        )
                        # Denoise image
                        dst = cv2.fastNlMeansDenoisingColored(
                            im, None, 10, 10, 7, 21)
                        # Calibrate camera
                        centroids = CamCalibrate(dst)
                        # Retrieve screen size
                        screen_size = size(centroids)

                    elif k == ord('y') and centroids is not None:
                        # Extract screen only as im2 and display it
                        im2 = resizeScreen(im, centroids)
                        cv2.imshow('resized', im2)

                    elif k == ord('e') and im2 is not None:
                        # Segment im2.
                        markers, stats, centroidsTouches = segmentation(im2)
                        # Display resulting object markers
                        cv2.imshow('segmentation', markers)

                    #  Release image
                    image_result.Release()

            except PySpin.SpinnakerException as ex:
                print('Error: %s' % ex)
                return False, centroidsTouches, screen_size

        # Destroy all OpenCV windows when exiting the loop
        cv2.destroyAllWindows()

        # Select what are most likely graphical objects beyond those detected by segmentation
        centroidsTouches = getOnlyTouches(stats, centroidsTouches)
        # Convert graphical objects' coordinates to the red dot's basis.
        centroidsTouches = TouchCoordinates(centroidsTouches, centroids)

        #  End acquisition
        cam.EndAcquisition()

    except PySpin.SpinnakerException as ex:
        print('Error: %s' % ex)
        return False, centroidsTouches, screen_size

    return result, centroidsTouches, screen_size
Example #42
0
import cv2

image = cv2.imread(
    'C:\\Users\\ashua\\OneDrive\\Desktop\\Coding\\Python\\Modules\\OpenCV\\cat_resized.jpg'
)

dst = cv2.fastNlMeansDenoisingColored(image, None, 20, 20, 7, 15)
cv2.imshow('', dst)

dst = cv2.fastNlMeansDenoisingColored(image, None, 5, 5, 7, 15)
cv2.imshow('', dst)

cv2.waitKey(0)
Example #43
0
                                                                      all_positive_areas[inner_iteration]['height'])
    # draw the rest of the boxes
    for iterative in range(positive_boxes_count):
        if not all_positive_areas[iterative]['skip']:
            color = (0,255,0)
            cv2.rectangle(img_output, (all_positive_areas[iterative]['xmin'],all_positive_areas[iterative]['ymin']),
                          (all_positive_areas[iterative]['xmin'] + all_positive_areas[iterative]['width'],
                          all_positive_areas[iterative]['ymin'] + all_positive_areas[iterative]['height']), color, 2)



# Main function
if __name__ == "__main__":

    img = cv2.imread(sys.argv[1])
    dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
    cv2.imwrite("de_noised.jpg", dst)

    # normal detection
    input = 'de_noised.jpg'
    img = cv2.imread(input, 0)
    img_grey = cv2.imread(input, 0)
    img_output = cv2.imread(sys.argv[1], 1)
    height, width = img.shape


    twod_line_space = np.zeros((height, width,3), dtype=np.uint8)
    twod_circle_space = np.zeros((height, width, 3), dtype=np.uint8)

    dx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.int32)
    dy = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], np.int32)
Example #44
0
def cv_denoise(x):
    return cv2.fastNlMeansDenoisingColored(x, None, 3, 3, 7, 21)
Example #45
0
r   = 64

input   = args.input
param_h = args.smoothing_strength
search_size = args.search_window_size
patch_size  = args.patch_size
print   'param_h', param_h, 'search', search_size, 'patch', patch_size

img = cv2.imread(input)
h, w    = img.shape[:2]
img = cv2.resize(img, (w / 4, h / 4))
h, w    = img.shape[:2]
cv2.imwrite('result/resize.png', img)

print   'NLM...'
dst = cv2.fastNlMeansDenoisingColored(img, param_h, param_h, patch_size, search_size)

print   'NLM for each colors...'
dst_for_each_color  = nl_means_for_each_color(img, param_h, patch_size, search_size)

cv2.imwrite('result/result.png', dst)
cv2.imwrite('result/result_for_each_color.png', dst_for_each_color)

img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
dst_for_each_color  = cv2.cvtColor(dst_for_each_color, cv2.COLOR_BGR2RGB)

plt.figure(figsize = (10, 7.5))
plt.suptitle('Example of non-local means')

plt.subplot(231)
Example #46
0
if gray.all()==G.all():
    print(True)

cv2.imshow("final",G)
cv2.waitKey()

#k<n , k==2

G1 = np.bitwise_xor(S1,S2)+np.bitwise_xor(S2,S3)+np.bitwise_xor(S1,S3)
G1%=256
print(PSNR(gray, G1))
cv2.imshow("final_noise_image",G1)
cv2.imwrite("final_noise_image.jpg",G1)
cv2.waitKey()
converted_img = cv2.cvtColor(G1, cv2.COLOR_GRAY2BGR)
dst = cv2.fastNlMeansDenoisingColored(converted_img, None, 10, 10, 7, 15)
g = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
print(PSNR(gray, g))
cv2.imshow("final_denoise_image",g)
cv2.imwrite("final_after_denoise_image.jpg",g)
cv2.waitKey()









Example #47
0
def avoid_noise(path, filename):
    img = cv2.imread(path + filename)
    dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
    grayImage = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
    cv2.imwrite(path + 'cropped_graylevel_' + filename, grayImage)
    return None
Example #48
0
        for file_name in file_list:
            test_list = glob.glob(filePath + file_name + "/*")
            for image in test_list:
                # image = image
                print(image)
                lowlight(image)
                path_zero = "/media/ivan/Ivan/Final Applied CV/images/result/test_img/capt.jpg"
                img = cv2.imread(path_zero)
                out = simplest_cb(img, 5)
                correct_path = "/media/ivan/Ivan/Final Applied CV/images/result/test_img/correct.jpg"
                cv2.imwrite(correct_path, out)
                print("Correct written")

                img = cv2.imread(correct_path)
                img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255

                # start_wavelet=time.time()

                # denoise_img_wavelet = np.uint8(denoise_wavelet(img2, multichannel=True, rescale_sigma=True) * 255)
                # wavelet_path="/media/ivan/Ivan/Final Applied CV/images/result/test_img/wavelet.jpg"
                # cv2.imwrite(wavelet_path, cv2.cvtColor(denoise_img_wavelet, cv2.COLOR_RGB2BGR))
                # print("Denoise wavelet written,time: ", time.time()-start_wavelet)

                start_opencv = time.time()

                dst = cv2.fastNlMeansDenoisingColored(img, None, 5, 5, 3, 15)
                opencv_path = "/media/ivan/Ivan/Final Applied CV/images/result/test_img/opencv.jpg"
                cv2.imwrite(opencv_path, dst)
                print("Denoise opencv written,time: ",
                      time.time() - start_opencv)
Example #49
0
"""
denoising = cv2.fastNlMeansDenoising(img)
"""
彩色图像算法

fastNlMeansDenoisingColored(src[, dst[, h[, hColor[, templateWindowSize[, searchWindowSize]]]]]) -> dst
h=3, hColor=3, tempWindow=7, searchWindow=21

推荐参数
噪声 ∂        块大小 s  搜索窗口     衰减参数 h
0< ∂ =<25   |3 x 3  |21 x 21    |0.55 * ∂   |
25< ∂ =<55  |5 x 5  |35 x 35    |0.40 * ∂   |
55< ∂ =<100 |7 x 7  |35 x 35    |0.35 * ∂   |

"""
color = cv2.fastNlMeansDenoisingColored(img)
"""
适用于顺序帧序列的去噪声方法

1.
fastNlMeansDenoisingMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize[, dst[, h[, templateWindowSize[, searchWindowSize]]]]) -> dst

2.
fastNlMeansDenoisingColoredMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize[, dst[, h[, hColor[, templateWindowSize[, searchWindowSize]]]]]) -> dst

"""
denoising = cv2.fastNlMeansDenoisingMulti()
denoising = cv2.fastNlMeansDenoisingColoredMulti()
"""
直方图均衡化处理
Example #50
0
def enhance (img):
    img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return img
Example #51
0
    def findGradient(self):
        for i in range(0,2):
            if i==0:
                img = Image("images/temp_x1.jpg",0)
            else:
                img = Image("images/temp_y2.jpg",0)    
            img_inv = img.invert()
            img_inv.scale(100,100)
            img_bin = img_inv.binarize()

            resize=1
            if img_bin.width<70:
                resize = 1
                img_bin = img_bin.resize(img.width*4,img.height*resize)
            flag=0
            if img_bin.width<img_bin.height:
                flag = 1 
                img_bin = img_bin.rotate(-90,fixed = False)

            img_bin.save("images/temp.jpg")

            img = cv2.imread("images/temp.jpg")
            #h_img, w_img = img.shape[:2]
            #if w_img<h_img:
                 
            img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) 


            gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # grayscale
            _,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV) # threshold
            kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(2,2))
            dilated = cv2.dilate(thresh,kernel,iterations = 13) # dilate
            contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) # get contours
            height = np.size(img, 0)
            width = np.size(img, 1)

            index =1
            rect_dim=[]
            for contour in contours:
                # get rectangle bounding contour
                [x,y,w,h] = cv2.boundingRect(contour)
                y=y+h
                rect = [x,y,w,h]
                
                # discard areas that are too large
                if h>0.7*height and w>0.7*width:
                   continue
                
                # discard areas that are too small
                if h<height*0.02 or w<width*0.02:
                    continue
                #print rect
                cv2.rectangle(img,(x,y),(x+w,y-h),(255,0,255),2)
                rect_dim.append(x+w/2.0) 
            cv2.imwrite("images/contoured.jpg", img)     

            rect_dim.sort(reverse=True)
            pix_diff=[]
            #print len(rect_dim)
            #print rect_dim
            for i in range(0,len(rect_dim)-1):
                pix_diff.append(rect_dim[i]-rect_dim[i+1])
            print pix_diff                        
            print sum(pix_diff)/len(pix_diff)
            print sum(pix_diff[:-1])/len(pix_diff[:-1])
            if abs(sum(pix_diff)/len(pix_diff)-sum(pix_diff[:-1])/len(pix_diff[:-1]))>3:
                pix_diff.remove(pix_diff[-1])
            pix_avg=sum(pix_diff)/(len(pix_diff)*1.0)

            pix_avg = pix_avg/resize


            if flag==1:
                pix_avgy=pix_avg
                print "pix_avgy="+str(pix_avgy)
                self.dy = pix_avgy
            else:
                pix_avgx=pix_avg    
                print "pix_avgx="+str(pix_avgx)  
                self.dx = pix_avgx 

def gaussian_noise(image):
    noise = np.zeros(image.shape, image.dtype)
    m = (15, 15, 15)
    s = (30, 30, 30)
    cv.randn(noise, m, s)
    dst = cv.add(image, noise)
    cv.imshow("gaussian noise", dst)
    return dst


src = cv.imread("D:/images/cos.jpg")
cv.imshow("input", src)
h, w = src.shape[:2]
src = gaussian_noise(src)

result1 = cv.blur(src, (5, 5))
cv.imshow("result-1", result1)

result2 = cv.GaussianBlur(src, (5, 5), 0)
cv.imshow("result-2", result2)

result3 = cv.medianBlur(src, 5)
cv.imshow("result-3", result3)

result4 = cv.fastNlMeansDenoisingColored(src, None, 15, 15, 10, 30)
cv.imshow("result-4", result4)

cv.waitKey(0)
cv.destroyAllWindows()
Example #53
0
def imgRemoveBackgroundGetMask(img, dieProfile, flag_clearHsv, flag_deNoise, flag_debug):
    """Remove the background from an image.
	See http://docs.opencv.org/master/db/d5c/tutorial_py_bg_subtraction.html#gsc.tab=0
	See https://techgimmick.wordpress.com/2015/03/11/background-subtraction-in-a-webcam-video-stream-using-emgucvopencv-wrapper-for-c/
	See http://stackoverflow.com/questions/10736933/frame-difference-noise
	See http://dsp.stackexchange.com/questions/11445/remove-background-from-image
	See http://www.thoughtfultech.co.uk/blog/simple-background-subtraction-for-loday.html
	See http://answers.opencv.org/question/17577/background-subtraction-from-a-still-image/
	See http://www.robindavid.fr/opencv-tutorial/chapter10-movement-detection-with-background.html
	See https://books.google.com/books?id=iNlOCwAAQBAJ&pg=PA179&lpg=PA179&dq=opencv+python+comparing+images+absdiff+smooth+threshold&source=bl&ots=iS-Ef_Toma&sig=K-aNUjZOEIOavkBsTOy6pPUoZCc&hl=en&sa=X&ved=0ahUKEwiG6MXPtKHKAhVHLmMKHf9UAB0Q6AEIPDAF#v=onepage&q=opencv%20python%20comparing%20images%20absdiff%20smooth%20threshold&f=false
	See http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
	"""

    # for difficulty try red on white die (white background problem)

    workingImage = dicerfuncs.copyCvImage(img)
    backgroundImage = dicerfuncs.getBackgroundImage(dieProfile)
    backgroundImage = dieProfile.cropImageIfAppropriate(backgroundImage)

    # new idea to try to remove shadows, convert both hsv, then zero the v of both
    # this seems to help quite a lot, though it does kill dice that are same or similar (gray) color (hue) as background
    if (flag_clearHsv):
        # new idea
        workingImage = convertBgrToHsvAndZeroV(workingImage)
        backgroundImage = convertBgrToHsvAndZeroV(backgroundImage)
    # not sure thie is needed
    elif (True):
        workingImage = dicerfuncs.convertBgrToColorSpace(workingImage, "LAB")
        backgroundImage = dicerfuncs.convertBgrToColorSpace(backgroundImage, "LAB")




    # 2/12/16 - to get rid of spurious background
    if (True):
        workingImage = cv2.blur(workingImage,(10,10))
        backgroundImage = cv2.blur(backgroundImage,(10,10))




    # dif - subtract background from foreground
    imgDiff = cv2.absdiff(workingImage, backgroundImage);
    if (flag_debug):
        dicerfuncs.cvImgShow("Background Diff", imgDiff, zoom=0.5)

    img_mask = dicerfuncs.copyCvImage(imgDiff)

    # test 1/16/16
    img_mask = cv2.cvtColor(img_mask, cv2.COLOR_BGR2GRAY)
    if (False and flag_debug):
        dicerfuncs.cvImgShow("Background GrayMask", img_mask)

    if (False):
        # try fixed level threshold
        threshlevel = 15
        img_mask = cv2.threshold(img_mask, threshlevel, 255, cv2.THRESH_BINARY)[1]
        dicerfuncs.cvImgShow("Test thresh manual", img_mask)
        return img_mask


    # 2/13/16 - erode to try to get rid of circular dif errors at contianer color boundaries
    if (False):
        # this doesnt seem to be too needed on clean white background, nor does it do harm
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
        img_mask = cv2.erode(img_mask, kernel, iterations=1)
        img_mask = cv2.dilate(img_mask, kernel, iterations=1)


    if (flag_deNoise):
        img_mask = cv2.fastNlMeansDenoising(img_mask, None, 3, 5, 7)

    img_mask = cv2.threshold(img_mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
    # dicerfuncs.cvImgShow("Official Background MASK",img_mask)
    return (img_mask, imgDiff)

    # NOTHING USED BELOW HERE FOR NOW


    # test
    if flag_debug:
        if (flag_clearHsv):
            dicerfuncs.cvImgShow('HSV background diff', img_mask)
        else:
            dicerfuncs.cvImgShow('BGR background diff', img_mask)

    # colored denoising? (slow)
    if (False):
        img_mask = cv2.fastNlMeansDenoisingColored(img_mask, None, 10, 10, 7, 21)

    # now grayscale mask
    img_mask = cv2.cvtColor(img_mask, cv2.COLOR_BGR2GRAY)
    # test? (not very helpful)
    # dicerfuncs.cvImgShow('Background mask (grayscale)',img_mask)

    # erode (only if not using proper slow denoising function)
    if (not flag_deNoise):
        # this doesnt seem to be too needed on clean white background, nor does it do harm
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        img_mask = cv2.erode(img_mask, kernel, iterations=1)

    # dilate to fill in holes (only if not using proper slow denoising)
    # ATTN: for white die on white background, we need to run this dilation in order tocapture large convex area
    # but for clearer dice, it expands it too much and we get too much background
    # so ideally we should dynamically adjust this until we get it right
    # maybe by iterating until we have 1 main clearly separated hull
    if (not flag_deNoise):
        # this makes the mask much bigger than it should be
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        img_mask = cv2.dilate(img_mask, kernel, iterations=1)

    # formal gray denoise (slow)
    # denoising does make it less sensitive to threshold binary so we can make that lower min
    if (flag_deNoise):
        img_mask = cv2.fastNlMeansDenoising(img_mask, None, 3, 7, 21)
    # img_mask = cv2.fastNlMeansDenoising(img_mask,None,3,3,7)


    # threshold -- it's scary sensitive to this..
    # 13 seems pretty darn good, but we have seen some near-cutoffs with the d6 die that is bonewhite with red letter, in which case 5 works well but gives bigger background area
    threshlevel = 10
    threshlevel = 5
    if (flag_deNoise):
        threshlevel = 5
    img_mask = cv2.threshold(img_mask, threshlevel, 255, cv2.THRESH_BINARY)[1]

    return (img_mask, imgDiff)
Example #54
0
 def Denoised(self, image):
     """Take the input image and remove obvious noise, like moles or dirt"""
     denoised = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
     return denoised
Example #55
0
def scan():

    font = cv2.FONT_HERSHEY_SIMPLEX
    print("Scan license...")

    cam = cv2.VideoCapture(0)
    ch = 'continue'
    while ch != 'stop':
        ret, im = cam.read()
        cv2.imshow('im', im)
        if cv2.waitKey(1) == ord('q'):
            cv2.imwrite("demo.jpg", im)
            ch = 'stop'
            break

    cam.release()
    cv2.destroyAllWindows()

    from PIL import Image
    image = Image.open('demo.jpg')
    greyscale_image = image.convert('L')
    greyscale_image.save('demog.jpg')
    img = cv2.imread('demog.jpg')
    b, g, r = cv2.split(img)  # get b,g,r
    rgb_img = cv2.merge([r, g, b])
    dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
    b, g, r = cv2.split(dst)  # get b,g,r
    rgb_dst = cv2.merge([r, g, b])  # switch it to rgb
    cv2.imwrite('demogv.jpg', rgb_dst)
    try:
        from PIL import Image
    except ImportError:
        import Image
    import pytesseract
    licence = [
        'licencing', 'dl', 'valid', 'india', 'mcwg', 'lmv', 'name', 'till',
        'throughout', '16(2)', 'bg', 'doi', 'cov', 'dob'
    ]
    rc = [
        'reg', 'chassis', 'engine', 'mfr', 'class', 'registering', 'colour',
        'ownername', 's/w/d', 'model', 'body', 'wheel', 'base', '23A', 'mfg',
        'fuel', 'tax', 'seating', 'cc'
    ]
    rcCounter = 0
    licenceCounter = 0
    foo = pytesseract.image_to_string(
        Image.open('C:\\Users\\pc\\python programs\\demog.jpg'))
    x = open("demogv.txt", "w")
    for word in foo:
        if (
                word.isalpha() or word.isdigit() or word == '/' or word == '('
                or word == ')'
        ):  #if(word == ':' or word == '.' or word == '!' or word == '/' or word == '-' or word == '[' or word == '>' or word == '@' or word == '=' ):
            x.write("%s" % word)
        elif (word == " "):
            x.write("\n")
        else:
            x.write("\n")
            #x = open("licence.txt","w")
            #x.write("%s" % foo)
    x.close()
    file = open("demogv.txt", "r")
    for line in file:
        line = line.strip()
        line = line.lower()
        if line in licence:
            licenceCounter = licenceCounter + 1
            #print(licenceCounter)
        if line in rc:
            rcCounter = rcCounter + 1
            #print(" rc ",rcCounter)
    if licenceCounter > rcCounter:
        print(" \nTHE DOCUMENT IS A LICENSE ")
        chrome_path = 'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
        webbrowser.register('chrome', None,
                            webbrowser.BackgroundBrowser(chrome_path), 1)
        webbrowser.get('chrome').open_new_tab(
            'http://localhost/kpit/telematic_main.html')
    elif licenceCounter < rcCounter:
        print(" the document is a rc ")
    else:
        print(" THE DOCUMENT IS NOT A VALID LICENSE ")
        #chrome_path='C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
        #webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path),1)
        #webbrowser.get('chrome').open_new_tab('http://localhost/kpit/telematic_main.html')
    file.close()
    exit()
Example #56
0
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
#path=r"C:/Users/chaimaelmejgari/Desktop/py/lena.png"
image = cv.imread(
    r"c:/Users/chaimaelmejgari/Desktop/py/lena_bruit_gaussien_alpha.jpg")

image = cv.cvtColor(image, code=cv.COLOR_BGR2RGB)
#dst =cv.fastNlMeansDenoising(image, None, 65, 5, 21)

dst = cv.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
# dst2=cv.cvtColor(dst,code=cv.COLOR_BGR2RGB)

plt.subplot(121), plt.imshow(image)
plt.subplot(122), plt.imshow(dst)
# plt.subplot(122), plt.imshow(dst2)
plt.show()
cv.imwrite('nlmeans1_gauus_alpha.jpg', dst)
cv.imwrite('nlmeans2_gauus_alpha.jpg', image)
Example #57
0
            if cv2.waitKey(1) == ord('8'):
                alpha = alpha - 1

            if cv2.waitKey(1) == ord('4'):
                beta = beta + 1

            if cv2.waitKey(1) == ord('5'):
                beta = beta - 1

            #MAIN FRAME SEM PRINT COM DENOISING
            if cv2.waitKey(1) == ord('n'):
                denoising = 10
                while True:
                    conectado, frame = video.read()
                    frame = cv2.fastNlMeansDenoisingColored(
                        frame, None, denoising, 10, 3, 9)
                    frame = cv2.addWeighted(frame, alpha,
                                            np.zeros(frame.shape, frame.dtype),
                                            0, beta)

                    cv2.imshow('frame', frame)

                    if cv2.waitKey(1) == ord('s'):
                        nl_cont_save = nl_cont_save + 1
                        cv2.imwrite('nl_pic_pict' + str(nl_cont_save) + '.jpg',
                                    frame)

                    if cv2.waitKey(1) == ord('r'):
                        alpha = 1
                        beta = 12
                        denoising = 10
Example #58
0
import numpy as np
import cv2

img = cv2.imread('ba.jpg')
rest = img

result = cv2.fastNlMeansDenoisingColored(img, None, 4, 4, 7, 21)


def adjust_gamma(img, gamma):
    # build a lookup table mapping the pixel values [0, 255] to
    # their adjusted gamma values
    invGamma = 1.0 / gamma
    table = np.array([((i / 255.0)**invGamma) * 255
                      for i in np.arange(0, 256)]).astype("uint8")

    # apply gamma correction using the lookup table
    res = cv2.LUT(img, table)
    return res


res = adjust_gamma(result, 1.3)

hsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v += 255
h -= 5
s += 30
final_hsv = cv2.merge((h, s, v))
image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('res', image)
Example #59
0
def deNoise(image):
    cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
Example #60
0
def image_loading(folder):
    count = 0
    resized_image = []
    new_images = []
    image_norm = []
    image_crop = []
    image_sizing = []
    images = [cv2.imread(file) for file in glob.glob(folder)]
    greyimg = []
    threshold = 25

    # converting to greyscale for cropping purposes
    for j in range(len(images)):
        greyimg.append(cv2.cvtColor(images[j], cv2.COLOR_BGR2GRAY))

    # cropping the image to get rid of extra black border
    for j in range(len(images)):  # cropping image to remove extra black borders
        hStart = 0
        hEnd = greyimg[j].shape[0]
        vStart = 0
        vEnd = greyimg[j].shape[1]

        # get row and column maxes for each row and column
        hMax = greyimg[j].max(1)
        vMax = greyimg[j].max(0)

        hDone_flag = False
        vDone_flag = False

        # go through the list of max and begin where the pixel value is greater
        # than the threshold
        for i in range(hMax.size):
            if not hDone_flag:
                if hMax[i] > threshold:
                    hStart = i
                    hDone_flag = True

            if hDone_flag:
                if hMax[i] < threshold:
                    hEnd = i
                    break

        for i in range(vMax.size):
            if not vDone_flag:
                if vMax[i] > threshold:
                    vStart = i
                    vDone_flag = True

            if vDone_flag:
                if vMax[i] < threshold:
                    vEnd = i
                    break
        image_crop.append(images[j][hStart:hEnd, vStart:vEnd])
        image_sizing.append(cv2.resize(image_crop[j], (512, 512)))

    # adding left and right images together
    for j in range(len(image_sizing)):
        if (j % 2) == 0:
            new_images.append(np.concatenate((image_sizing[j], image_sizing[j + 1]), axis=1))

    # resizing images to 128,128, colour normalisation and image de-noising
    for i in range(len(new_images)):
        # height, width = new_images[i].shape[:2]
        count += 1
        resized_image.append(cv2.resize(new_images[i], (512, 512)))
        image_norm.append(
            cv2.fastNlMeansDenoisingColored(cv2.normalize(resized_image[i], None, 0, 255, cv2.NORM_MINMAX)))
        # height0, width0 = resized_image.shape[:2]
    print('Total Training Images = ', count)
    return image_norm