예제 #1
1
파일: cartoon.py 프로젝트: yshao/cs6475
def cartoon(srcColor):
    srcGray=cv2.cvtColor(srcColor,cv2.COLOR_BGR2GRAY)
    print srcGray.shape,srcColor.shape
    cv2.medianBlur(srcGray,5,srcGray)

    mask=srcGray.copy().astype(np.uint8)
    edges=srcGray.copy().astype(np.uint8)

    ### sketch detection
    cv2.Laplacian(srcGray,cv2.CV_8U,edges,5)
    cv2.threshold(edges,60,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU,mask)
    outImg=srcColor.copy()
    tmp=outImg.copy()

    ### bilateral filtering ###
    rep=10
    for i in xrange(rep):
        size=9;sigmaColor=9;sigmaSpace=7

        cv2.bilateralFilter(outImg,size,sigmaColor,sigmaSpace,tmp)
        cv2.bilateralFilter(tmp,size,sigmaColor,sigmaSpace,outImg)

    output=cv2.bitwise_and(srcColor,srcColor,mask=mask)
    cv2.edgePreservingFilter(output,output)

    return output
  def test_process_whole_channels(self):
    obj1 = AlgBody()
    obj2 = AlgBody()
    obj2.integer_sliders[0].set_value(5)
    obj2.float_sliders[0].set_value(50.0)
    obj2.float_sliders[1].set_value(40.0)

    test_image = cv2.imread("NEFI1_Images/p_polycephalum.jpg")
    ref_image1 = cv2.bilateralFilter(test_image, 3, 30.0, 30.0)
    ref_image2 = cv2.bilateralFilter(test_image, 11, 50.0, 40.0)
    obj1.process(test_image)
    obj2.process(test_image)
    h,w,d = obj1.result.shape
    for i in range(h):
        for j in range(w):
            for k in range(d):
                test_val1 = obj1.result.item(i,j,k)
                test_val2 = obj2.result.item(i,j,k)
                ref_val1 = ref_image1.item(i,j,k)
                ref_val2 = ref_image2.item(i,j,k)
                diff_ksize3 = abs(test_val1-ref_val1)
                diff_ksize11 = abs(test_val2-ref_val2)
                # Less equal due to numerical issues when bilateral filter is processed on the whole color image
                self.assertLessEqual(diff_ksize3,20)
                self.assertLessEqual(diff_ksize11,20)
    def test_greyscale(self):
      obj1 = AlgBody()
      obj2 = AlgBody()
      obj2.integer_sliders[0].set_value(5)
      obj2.float_sliders[0].set_value(50.0)
      obj2.float_sliders[1].set_value(40.0)

      test_img = cv2.imread("NEFI1_Images/p_polycephalum.jpg")
      test_image = cv2.cvtColor(test_img, cv2.COLOR_RGB2GRAY)
      ref_image1 = cv2.bilateralFilter(test_image, 3, 30.0, 30.0)
      ref_image2 = cv2.bilateralFilter(test_image, 11, 50.0, 40.0)

      input = [test_image]
      obj1.process(input)
      obj2.process(input)
      h,w = obj1.result["img"].shape
      for i in range(h):
          for j in range(w):
                  test_val1 = obj1.result["img"].item(i,j)
                  test_val2 = obj2.result["img"].item(i,j)
                  ref_val1 = ref_image1.item(i,j)
                  ref_val2 = ref_image2.item(i,j)
                  diff_ksize3 = abs(test_val1-ref_val1)
                  diff_ksize11 = abs(test_val2-ref_val2)
                  # Less equal due to numerical issues when bilateral filter is processed on the whole color image
                  self.assertEqual(diff_ksize3,0)
                  self.assertEqual(diff_ksize11,0)
예제 #4
0
def cartoonify(image):
    rows , cols = image.shape[0:2]
    try:
        img_gray = cv2.cvtColor(image , cv2.COLOR_BGR2GRAY)
    except:
        img_gray = image
    image_median_blur = cv2.medianBlur(img_gray , 7)

    #mask = np.zeros_like(image)
    mask = np.zeros((rows , cols , 3) , dtype=np.uint8)
    #edges = np.zeros_like(image)
    edges = np.zeros((rows , cols , 3) , dtype=np.uint8)

    edges = cv2.Laplacian(image_median_blur , cv2.CV_8U , 5)
    ret , mask = cv2.threshold(edges , 4 , 255 , cv2.THRESH_BINARY_INV)
    #mask = remove_pepper_noise(mask)
    sketch = cv2.cvtColor(mask , cv2.COLOR_GRAY2BGR)
    repitition = 3
    cpy_image = image.copy()
    for i in range(repitition):
        size = 10
        sigmacolor = 20
        sigmaspace = 20
        tmp = cv2.bilateralFilter(cpy_image, size, sigmaColor=sigmacolor, sigmaSpace=sigmaspace)
        cpy_image = cv2.bilateralFilter(tmp, size, sigmaColor=sigmacolor, sigmaSpace=sigmaspace)
    return cpy_image
예제 #5
0
 def apply_qt_elements_filtering(image):
     #
     # apply bilateral filter on IMAGE to smooth colors while keeping edges
     cv2.bilateralFilter(UtilityOperations.crop_to_720p(image), 3, 255, 50,
                         dst=MultiprocessOperations.shared_memory_image)
     #
     # crop elements from IMAGE
     MultiprocessOperations.wait_for_element_cropping()
     #
     # upload RED channels to GPU
     TheanoOperations.__shared_red.set_value(
         MultiprocessOperations.shared_memory_red_channel, borrow=True)
     #
     # upload GREEN channels to GPU
     TheanoOperations.__shared_green.set_value(
         MultiprocessOperations.shared_memory_green_channel, borrow=True)
     #
     # upload BLUE channels to GPU
     TheanoOperations.__shared_blue.set_value(
         MultiprocessOperations.shared_memory_blue_channel, borrow=True)
     #
     # download FILTERING result from GPU
     np.copyto(MultiprocessOperations.shared_memory_qt_filtered_elements,
               TheanoOperations.__apply_binary_filtering)
     #
     # apply IMAGE threshold CLASSIFICATION
     MultiprocessOperations.wait_for_qt_image_classification()
     return MultiprocessOperations.shared_memory_qt_filtered_elements
예제 #6
0
def get_cars(im1, im2):

	img1 = cv2.imread(im1)
	img2 =	 cv2.imread(im2)

	# equ = cv2.equalizeHist(img1)
	# img1 = np.hstack((img1, equ))

	# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    # cl1 = clahe.apply(img1)

	# imshow(cl1)
	# imshow(img2)

	img1 = cv2.bilateralFilter(img1, 10, 75, 75)
	img2 = cv2.bilateralFilter(img2, 10, 75, 75)

	img = cv2.addWeighted(img1, 1, -img2, 1, 0)
	img = cv2.bilateralFilter(img, 10, 75, 75)

	gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

	kernel = np.ones((3, 3), np.uint8)
	opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN,kernel, iterations = 2)

	sure_bg = cv2.dilate(opening, kernel, iterations=3)

	cnts = cv2.findContours(sure_bg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] #if imutils.is_cv2() else cnts[1]

	return filter(lambda c: cv2.contourArea(c) > 3000, cnts)
예제 #7
0
파일: tools.py 프로젝트: Trineon/lisa
def smoothing(data, d=10, sigmaColor=10, sigmaSpace=10, sliceId=2):
    if data.ndim == 3:
        if sliceId == 2:
            for idx in range(data.shape[2]):
                data[:,:,idx] = cv2.bilateralFilter( data[:,:,idx], d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace )
        elif sliceId == 0:
            for idx in range(data.shape[0]):
                data[idx,:,:] = cv2.bilateralFilter( data[idx,:,:], d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace )
    else:
        data = cv2.bilateralFilter( data, d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace )
    return data
예제 #8
0
def bilateralFilter(img, d, sigmaColor, sigmaSpace):

    imgread = cv2.imread(img)

    d = int(d)
    sigmaColor = int(sigmaColor)
    sigmaSpace = int(sigmaSpace)

    cv2.bilateralFilter(imgread, d, sigmaColor, sigmaSpace)
    

    return imgread
예제 #9
0
def Cartoon(img):
	cols, rows, dim = img.shape
	gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	#remove Pixel noise
	gray_img = cv2.medianBlur(gray_img, 7)
	gray_img = cv2.resize(gray_img,  (rows/2,cols/2))

	#create Mask and Edges
	mask = numpy.zeros((cols,rows,dim), numpy.uint8) 
	edges = numpy.zeros((cols,rows,3), numpy.uint8) 

	#creating Mask for Pencil Draw Effect
	edges = cv2.Laplacian(gray_img, cv2.CV_8U)
	ret, mask = cv2.threshold(edges, 8, 75, cv2.THRESH_BINARY_INV)

	#Shrink Image to Half the size for smaller scale of filtering
	#full resolution is slow, and not entirely needed
	small_image = cv2.resize(img, (rows/4, cols/4))

	#Bilateral Filtering for Cartoon Effect, 
	#enhance the edges and blurring the flat regions
	temp = numpy.zeros((cols/4,rows/4,3), numpy.uint8)
	repetitions = 10
	size = 9
	sigmaColor = 9
	sigmaSpace = 7
	for i in range (repetitions):
		temp = cv2.bilateralFilter(small_image, size, sigmaColor, sigmaSpace)
		small_image = cv2.bilateralFilter(temp, size, sigmaColor, sigmaSpace)

	#resize image back to Normal
	back_size = cv2.resize(small_image, (rows/2,cols/2))
	mask = cv2.resize(mask, (rows/2,cols/2))
	res = numpy.zeros((cols/2,rows/2,dim), numpy.uint8)

    #apply Mask For Effect     
	# First, manipulate (multiply, possibly)
	# your mask array so that the values you want (i.e.: those not masked
	# out) have value 1. Then, multiply your source image by your mask. That
	# will make the resulting image have the original pixels where the mask
	# == 1, and 0 (i.e.:black) where the mask == 0.

	# max_value = numpy.max(mask)
	# mask/=max_value
	# res = back_size*mask

	res1 = cv2.bitwise_and(back_size,back_size,mask = mask)
	res2 = cv2.bitwise_and(gray_img,gray_img,mask = mask)
	cv2.imshow('Cartoon', res1)
	cv2.imshow('Sketch', res2)
	cv2.waitKey(0)
	cv2.destroyAllWindows()
예제 #10
0
def test(fname):
    img = cv2.imread(fname, 0)
    cv2.bilateralFilter(img, 9, 90,16)
    img = cv2.GaussianBlur(img,(5,5),0)
    #binImg = np.zeros((img.shape[0], img.shape[1]), np.uint8)   
    binImg = cv2.adaptiveThreshold(img, 1, cv2.ADAPTIVE_THRESH_MEAN_C, 
                                   cv2.THRESH_BINARY, 55, -3)
    #cv2.bilateralFilter(binImg, 9, 90,16)
    #binImg = cv2.GaussianBlur(binImg, (3,3), 0)
    #ret, binImg = cv2.threshold(img, 35000, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    plt.imshow(binImg, cmap = 'gray', interpolation = 'bicubic')
    plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
    plt.show()
예제 #11
0
	def ext(self,img,classnum):
		#Hu-moments Extraction
		gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
		blur = cv2.bilateralFilter(thresh,15,80,80)
		Humom=cv2.HuMoments(cv2.moments(blur)).flatten()
		#RGB Means extraction
		means = cv2.mean(img)
		means = means[:3]
		#Histogram extraction
		hist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
		hist = hist.flatten()
		#Contour Hu
		gray2=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		thresh=cv2.adaptiveThreshold(gray2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
		blur = cv2.bilateralFilter(thresh,15,80,80)
		gray_lap = cv2.Laplacian(blur,cv2.CV_16S,ksize = 3,scale = 1,delta = 0)
		dst = cv2.convertScaleAbs(gray_lap)
		Humom2=cv2.HuMoments(cv2.moments(dst)).flatten()
		'''
		#Mode
		most_intensity=mode(img)[0][0]
		'''
		#Stats Extraction
		(means2, stds) = cv2.meanStdDev(img)
		stats = np.concatenate([means2, stds]).flatten()
		#Class appending
		Humom=np.append(Humom,classnum)
		means=np.append(means,classnum)
		hist=np.append(hist,classnum)
		stats=np.append(stats,classnum)
		Humom2=np.append(Humom2,classnum)
		#most_intensity=np.append(most_intensity,classnum)
		
		with open('HuMoments.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(Humom)
		'''
		with open('RGB.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(means)
		'''
		with open('MeanStdDev.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(stats)

		with open('ContourHu.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(Humom2)
		'''
예제 #12
0
    def detect_screen(self, blur_pars=(21, 17, 17), draw_contours=True):
        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.bilateralFilter(gray, *blur_pars)
        edged = cv2.Canny(gray, 30, 200)

        (_, contours, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
        screen_contour = []

        for c in contours:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            if len(approx) == 4:
                screen_contour = approx
                break

        if len(screen_contour):
            global global_rect

            coords = np.array([[i[0][0], i[0][1]] for i in screen_contour])
            if draw_contours:
                cv2.drawContours(self.frame, [screen_contour], -1, (0, 0, 255), 3)

            global_rect = self.order_points(coords)
def otsuTwo(img, img_file, man_img, mask=None):

    # blur = cv2.GaussianBlur(img, (5,5),0)
    blur = cv2.bilateralFilter(img, 5, 100, 100)

    thresholds = multithresholdOtsu(blur,mask)
    th1 = thresholds[0]
    th2 = thresholds[1]


    if mask is None:
        ret, thresh1 = cv2.threshold(blur,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(blur,th2,255,cv2.THRESH_BINARY_INV)
    else:
        combined_img = cv2.bitwise_and(blur, blur, mask=mask)
        ret, thresh1 = cv2.threshold(combined_img,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(combined_img,th2,255,cv2.THRESH_BINARY_INV)

    out_img_o = cv2.bitwise_and(thresh1, thresh2, mask=None)
    out_info_o = "_otsu_%d-%d" % (th1, th2)
    out_str_o = out_info_o + '.png'
    out_file_o = re.sub(r'\.jpg', out_str_o, img_file)
    cv2.imwrite(out_file_o, out_img_o)
    t = evaluation.findTotals(out_img_o, man_img)
    f = open('o2_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
예제 #14
0
def cartonify_image(image):
    """
    convert an inpuy image to a cartoon-like image
    Args:
       image: input PIL image

    Returns:
        out (numpy.ndarray): A grasycale or color image of dtype uint8, with
                             the shape of image
    """

    output = np.array(image)
    x, y, c = output.shape

    # noise removal while keeping edges sharp
    for i in xrange(c):
        output[:, :, i] = cv2.bilateralFilter(output[:, :, i], 5, 50, 50)

    #edges in an image using the Canny algorithm
    edge = cv2.Canny(output, 100, 200)
    #convert image into RGB color space
    output = cv2.cvtColor(output, cv2.COLOR_RGB2HSV)

    #historygram array
    hists = []

    #Compute the histogram of a set of data.
    #H
    hist, _ = np.histogram(output[:, :, 0], bins=np.arange(180+1))
    hists.append(hist)
    #S
    hist, _ = np.histogram(output[:, :, 1], bins=np.arange(256+1))
    hists.append(hist)
    #V
    hist, _ = np.histogram(output[:, :, 2], bins=np.arange(256+1))
    hists.append(hist)

    centroids = []
    for h in hists:
        centroids.append(kmeans_histogram(h))
    print("centroids: {0}".format(centroids))

    output = output.reshape((-1, c))
    for i in xrange(c):
        channel = output[:, i]
        index = np.argmin(np.abs(channel[:, np.newaxis] - centroids[i]), axis=1)
        output[:, i] = centroids[i][index]
    output = output.reshape((x, y, c))
    output = cv2.cvtColor(output, cv2.COLOR_HSV2RGB)

    # Retrieves contours from the binary image
    # RETR_EXTERNAL: retrieves only the extreme outer contours
    # CHAIN_APPROX_NONE= stores absolutely all the contour points
    contours, _ = cv2.findContours(edge,
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_NONE)

    # Draws contours outlines
    cv2.drawContours(output, contours, -1, 0, thickness=1)
    return output
예제 #15
0
def upload_file():

	if request.method == 'POST':

		file = request.files['file']
		response = {}

		if file and allowed_file(file.filename):
			now = datetime.now()
			filename = now.strftime('%Y-%m-%d-%H-%M-%S') + '_' + secure_filename(file.filename) 
			file.save(os.path.join(UPLOAD_FOLDER, filename))
			response['first_save'] = url_for('uploaded_file', filename=filename)
			#load image
			dir = os.curdir
			img = filename
			path = os.path.join(UPLOAD_FOLDER,img)
			raw_image = cv2.imread(path,0)
			
			#blur image to remove noise
			#sm_image = cv2.medianBlur(raw_image, 3)
			threshold = int(request.form['threshold'])
			sm_image = cv2.bilateralFilter(raw_image, 25, 50, 50)
			ret,bw_image = cv2.threshold(sm_image,threshold,255,cv2.THRESH_BINARY_INV)
			cv2.imwrite(os.path.join(TEMP_FOLDER, filename), bw_image)
			return jsonify({
				'url': 'static/temporary/'+filename,
				'error': 0,
				'threshold': threshold
			})

	else:
		return jsonify({'error': 1})
예제 #16
0
def good_corners():
    img = cv2.imread('Z:/Cartographer/Test.png')
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    blur = cv2.bilateralFilter(gray, 7, 175, 175)
    corners = cv2.goodFeaturesToTrack(blur,20,0.01,10)
    corners = np.int0(corners)

    #make a set to keep track of the coordinates that are detected
    coordinates = []
    dict_gradient_maps = {}
    
    for i in corners:
        x,y = i.ravel()
        point = Point()
        point.x = x
        point.y = y
        coordinates.append(point)
        cv2.circle(img,(x,y),3,255,-1)

    while len(coordinates ) > 2 :
        smallest = find_smallest(coordinates)
        #if it's been part of a gradient check and the gradient coordinate list
        #is a good size, then the coordinates are removed from the list - they
        #don't belong in there anymore
        #we dont want to check the gradient for something we're checking against
        coordinates.remove(smallest)
        dict_gradient_maps[smallest] = gradients(coordinates)
        
    dict_gradient_maps = {key: value for key, value in dict_gradient_maps.items() if len(value) > 0}    
    plt.imshow(img),plt.show()
    print dict_gradient_maps
예제 #17
0
    def SMGetSM(self, src):
        # definitions
        size = src.shape
        width  = size[1]
        height = size[0]
        # check
#        if(width != self.width or height != self.height):
#            sys.exit("size mismatch")
        # extracting individual color channels
        R, G, B, I = self.SMExtractRGBI(src)
        # extracting feature maps
        IFM = self.IFMGetFM(I)
        CFM_RG, CFM_BY = self.CFMGetFM(R, G, B)
        OFM = self.OFMGetFM(I)
        MFM_X, MFM_Y = self.MFMGetFM(I)
        # extracting conspicuity maps
        ICM = self.ICMGetCM(IFM)
        CCM = self.CCMGetCM(CFM_RG, CFM_BY)
        OCM = self.OCMGetCM(OFM)
        MCM = self.MCMGetCM(MFM_X, MFM_Y)
        # adding all the conspicuity maps to form a saliency map
        wi = pySaliencyMapDefs.weight_intensity
        wc = pySaliencyMapDefs.weight_color
        wo = pySaliencyMapDefs.weight_orientation
        wm = pySaliencyMapDefs.weight_motion
        SMMat = wi*ICM + wc*CCM + wo*OCM + wm*MCM
        # normalize
        normalizedSM = self.SMRangeNormalize(SMMat)
        normalizedSM2 = normalizedSM.astype(np.float32)
        smoothedSM = cv2.bilateralFilter(normalizedSM2, 7, 3, 1.55)
        self.SM = cv2.resize(smoothedSM, (width,height), interpolation=cv2.INTER_NEAREST)
        # return
        return self.SM
예제 #18
0
def processCard(image_o,scale):
    #Scale image down so functions work better and turns to greyscale
    image = cv2.resize(image_o, (image_o.shape[1]/scale, image_o.shape[0]/scale))

    #Processing image to improve reliability of finding corners
    image = cv2.bilateralFilter(image, 5, 150, 50)
    imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)

    kernel = np.ones((5,5),np.uint8)

    imgray = cv2.morphologyEx(imgray,cv2.MORPH_OPEN,kernel)
    imgray = cv2.morphologyEx(imgray,cv2.MORPH_CLOSE,kernel)

    imgray = cv2.Canny(imgray,40,50)
    """ Ploting of image before and after processing
    plt.subplot(121)
    plt.imshow(cv2.cvtColor(image_o, cv2.COLOR_BGR2RGB))
    plt.title("Original")
    plt.axis("off")
    plt.subplot(122)
    plt.axis("off")
    plt.title("After Canny Edge")
    plt.imshow(imgray)
    plt.gray()
    plt.show()
    """

    return imgray
예제 #19
0
 def circles(self,cv_image):
     cv_image=cv2.resize(cv_image,dsize=(self.screen['width'],self.screen['height']))
     #if self.blur:
     #    cv_image=cv2.GaussianBlur(cv_image,ksize=[5,5],sigmaX=0)
     
     channels=cv2.split(cv_image)
     channels[0] = cv2.equalizeHist(channels[0])
     channels[1] = cv2.equalizeHist(channels[1])
     #channels[2] = cv2.equalizeHist(channels[2])
     img = cv2.merge(channels, cv_image)
     img=cv2.bilateralFilter(img, -1, 5, 0.1)
     kern = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
     img=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kern)
     hsvImg=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
     luvImg=cv2.cvtColor(img,cv2.COLOR_BGR2LUV)
     gauss = cv2.GaussianBlur(luvImg, ksize=(5,5), sigmaX=10)
     sum = cv2.addWeighted(luvImg, 1.5, gauss, -0.6, 0)
     enhancedImg = cv2.medianBlur(sum, 3)
     ch=cv2.split(enhancedImg)
     mask = cv2.inRange(ch[2],self.highThresh[2],self.lowThresh[2])
     mask1=cv2.inRange(ch[1],self.highThresh[0],self.lowThresh[0])
     mask2=cv2.inRange(ch[2],self.highThresh[1],self.lowThresh[1])
     
    # cv2.imshow(mask)
     #cv2.imshow(mask1)
     #cv2.imshow(mask2)
     mask_out=cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
     try:
         self.image_filter_pub.publish(self.bridge.cv2_to_imgmsg(mask_out, encoding="bgr8"))
     except CvBridgeError as e:
         rospy.logerr(e)
예제 #20
0
def colorchange(pic):
    img = cv2.imread(pic)
    for k in range(n):  
        i = int(numpy.random.random() * img.shape[1])  
        j = int(numpy.random.random() * img.shape[0]) 
        if img.ndim == 2:   
            img[j,i] = 255        
        elif img.ndim == 3:     
            img[j,i,0]= 255    
            img[j,i,1]= 255    
            img[j,i,2]= 255  
    #cv2.imwrite("th3.png", img, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])       
    dst=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(dst)
    #cv2.imwrite("cl1.png", cl1, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
    th4 = cv2.adaptiveThreshold(cl1,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)

    #th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            #cv2.THRESH_BINARY,11,2)

    cv2.imwrite("im2.png", th4, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
    cha=cv2.imread('im2.png')
    ori = cv2.imread(pic)

    
    cha[numpy.where((cha == [255,255,255]).all(axis = 2))] = [b,g,r]
    cha[numpy.where((cha == [0,0,0]).all(axis = 2))] =[B,G,R]

    dst= cv2.addWeighted(cha,0.7,ori,0.3,0) 
    new= cv2.bilateralFilter(dst,7,75,75) 
    return new
예제 #21
0
def procesar(img):#default es rojo
    bilBlur = cv2.bilateralFilter(img,7,75,75)
    hsv = cv2.cvtColor(bilBlur, cv2.COLOR_BGR2HSV)
    mask =  cv2.inRange(hsv,rango['control'][0],rango['control'][1])# + cv2.inRange(hsv,rango['piel'][0],rango['piel'][1]) + cv2.inRange(hsv,rango['rojo'][0],rango['rojo'][1]) 
    #solo la mascara del control si se comenta la suma
    mk1 = cv2.inRange(hsv,rango['control'][0],rango['control'][1])# + cv2.inRange(hsv,rango['piel'][0],rango['piel'][1]) + cv2.inRange(hsv,rango['rojo'][0],rango['rojo'][1]) 
    return mask, mk1
예제 #22
0
	def cartoonizer(self, imgRGB):
		numDownSamples = 2		# number of downscaling steps
		numBilateralFilters = 7 # number of bilateral filtering steps

		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		imgColor = imgRGB
		for i in xrange(numDownSamples):
			imgColor = cv3.pyrDown(imgColor)
			
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for i in xrange(numBilateralFilters):
			imgColor = cv3.bilateralFilter(imgColor, 9, 9, 7)
			
		# upsample image to original size
		for i in xrange(numDownSamples):
			imgColor = cv3.pyrUp(imgColor)

		# -- STEPS 2 and 3 --
		# convert to grayscale and apply median blur
		imgGray = cv3.cvtColor(imgRGB, cv3.COLOR_RGB2GRAY)
		imgBlur = cv3.medianBlur(imgGray, 7)

		# -- STEP 4 --
		# detect and enhance edges
		imgEdge = cv3.adaptiveThreshold(imgBlur, 255, cv3.ADAPTIVE_THRESH_MEAN_C, cv3.THRESH_BINARY, 9, 2)

		# -- STEP 5 --
		# convert back to color so that it can be bit-ANDed with color image
		imgEdge = cv3.cvtColor(imgEdge, cv3.COLOR_GRAY2RGB)
		return cv3.bitwise_and(imgColor, imgEdge)
예제 #23
0
파일: nms.py 프로젝트: danilons/rcc8-1
def count_objects(image, pixel_mask):
    """
    Count objects according to a pixel_mask
    """
    img = np.zeros(image.shape, dtype=np.uint8)

    if pixel_mask not in image:
        return []

    x, y = np.where(image == pixel_mask)
    img[x, y] = 255.0

    # ensure it is a binarized image
    assert len(np.unique(img)) == 2

    ftr = cv2.bilateralFilter(img, 11, 17, 17)
    blocks = []
    contours, _ = cv2.findContours(ftr.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > 50:
            x, y, w, h = cv2.boundingRect(cnt)
            blocks.append(np.array([y, x, y + h, x + w]))

    return nms(np.array(blocks, dtype=np.float32))
	def filterBefore(self):
		print "Applying some bilateral Filter."
		img = cv2.imread('Images/cheetahGray2.png')
		bilateral =  cv2.bilateralFilter(img,9,75,75)
		cv2.imwrite('Images/cheetahBilateralFilterBefore.png',bilateral)
		self.newImage.save("Images/cheetahBilateralFilterBefore.png",self.imag.format)
		print "Bilateral Filter Done."
예제 #25
0
def main():

    # Specify image file (image #2 cooperates better)
    FILE = 'res/box-2.jpg'

    # Stream the image as a whole
    originalObservable = Observable.just(cv2.imread(FILE))

    # Manipulate the image to bring out the box
    imageObservable = (
        originalObservable
        .map(lambda img: cv2.bilateralFilter(img, 9, 75, 75))       # bilateral blur (denoise)
        .map(lambda img: cv2.Canny(img, 100, 100, apertureSize=3))  # detect the edges
    )

    # Detect any lines and represent them with [startPoint, endPoint] in list [lines]
    lines = (
        imageObservable
        # transform the stream into multiple numpy vectors (each line represented by [[x1, y1, x2, y2]])
        .flat_map(lambda img: cv2.HoughLinesP(img, 1, pi/180, 50, minLineLength=70, maxLineGap=40))
        .map(lambda wrapped_line: wrapped_line[0])                  # flatten line array into [x1, y1, x2, y2]
        .map(lambda lv, _: [Line((lv[0], lv[1]), (lv[2], lv[3]))])  # pack line vector into [((x1, y2), (x2, y2))]
        .reduce(lambda accumulated, line: accumulated + line, [])   # reduce all lines into one [line, line, ..]
    )

    # For debugging/visualization purposes only
    # http://reactivex.io/documentation/operators/zip.html
    lines.zip(originalObservable, lambda lines, image: (image, lines)).subscribe(ImageObserver())
예제 #26
0
파일: bw.py 프로젝트: killinit/drone
def get_contours(image, out):
    image = cv2.imread(image)
     
    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    edged = cv2.Canny(gray, 30, 200)

    # find contours in the edged image, keep only the largest
    # ones, and initialize our screen contour
    (img, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:100]
    screenCnt = None

    # loop over our contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.01 * peri, True)

        # if our approximated contour has four points, then
        # we can assume that we have found our screen
        if len(approx) == 4:
            cv2.drawContours(image, [approx], -1, (0, 255, 0), 3)
    #cv2.imshow("Image", image)
    cv2.imwrite(out, image)
예제 #27
0
def cartoonize_image(img, ds_factor=4, sketch_mode=False):
    # Convert image to grayscale
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median filter to the grayscale image
    img_gray = cv2.medianBlur(img_gray, 7)

    # Detect edges in the image and threshold it
    edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5)
    ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV)

    # 'mask' is the sketch of the image
    if sketch_mode:
        return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)

    # Resize the image to a smaller size for faster computation
    img_small = cv2.resize(img, None, fx=1.0 / ds_factor, fy=1.0 / ds_factor,
                           interpolation=cv2.INTER_AREA)
    num_repititions = 10
    sigma_color = 5
    sigma_space = 7
    size = 5

    # Apply bilateral filter the image multiplies times
    for i in range(num_repititions):
        img_small = cv2.bilateralFilter(
            img_small, size, sigma_color, sigma_space)

    img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor,
                            interpolation=cv2.INTER_LINEAR)
    dst = np.zeros(img_gray.shape)

    # Add the thick boundary lines to the image using 'AND' operator
    dst = cv2.bitwise_and(img_output, img_output, mask=mask)
    return dst
예제 #28
0
def baseDetailSeparationBilateral(I_32F, sigma_space=5.0, sigma_range=0.1):
    B = cv2.bilateralFilter(I_32F, 0, sigma_range, sigma_space)
    #g_filter = GuidedFilter(I_32F, radius=sigma_space, epsilon=sigma_range)
    #B = g_filter.filter(I_32F)
    D = I_32F - B

    return B, D
def main():
	print "[ shape and colour classification  ]"

	# get image filname
	img_filename = sys.argv[1] if len(sys.argv) > 1 else "img1.png"

	# load image
	img = cv2.imread("images/%s" % img_filename)
	if img is None:
		print "image could not be loaded!"
		sys.exit()

	# extract shapes
	gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	blur_img = cv2.bilateralFilter(gray_img, 10, 30, 30)
	thresh_img = cv2.threshold(blur_img, 60, 255, cv2.THRESH_BINARY)[1]

	# detect contours
	contours = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]

	# detect shapes and colours
	shapes = ShapeDetector(contours).shapes
	colours = ColourDetector(contours, img).colours

	# draw results
	for cnt, c, s in zip(contours, colours, shapes):
			cx, cy = calc_center(cnt)
			cv2.circle(img, (cx,cy), 3, WHITE, -1)
			cv2.putText(img, c, (cx+5, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.4, WHITE, 1)
			cv2.putText(img, s, (cx+5, cy+12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, WHITE, 1)

	# display resulting image
	cv2.imwrite("images/result.png", img)
	cv2.imshow("result", img)
	cv2.waitKey(0)
def get_prepped_image(image, convertToGrayscale=False):
    if convertToGrayscale:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # Removes noise while preserving edges
    filtered_image = cv2.bilateralFilter(image, 50, 50, 50)
    # Find edges in image
    return cv2.Canny(filtered_image, 30, 200)
예제 #31
0
def chess_corners(image):
    image = cv2.blur(image, (3, 3))
    ratio = image.shape[0] / 300.0
    orig = image.copy()
    image = imutils.resize(image, height=300)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #gray = cv2.equalizeHist(gray)
    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    edged = cv2.Canny(gray, 50, 70)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    dilated = cv2.dilate(edged, kernel)
    #cv2.imshow('dil',dilated)
    _, cnts, _ = cv2.findContours(dilated.copy(), cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_SIMPLE)
    #(_,cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.07 * peri, True)
        if len(approx) == 4:
            screenCnt = approx
            break

    #cv2.drawContours(image, cnts, 0, (0, 255, 0), 3)
    #cv2.imshow('Image before transform',image)
    lu = (10000, 10000)
    ru = (0, 10000)
    ld = (10000, 0)
    rd = (0, 0)
    print('first contour')
    '''for i in range(len(cnts[0])):
            if cnts[0][i][0][0] <= lu[0] and cnts[0][i][0][1] <= lu[1]:
                lu = cnts[0][i][0]
            if cnts[0][i][0][0] >= ru[0] and cnts[0][i][0][1] <= ru[1]:
                ru = cnts[0][i][0]
            if cnts[0][i][0][0] <= ld[0]+3 and cnts[0][i][0][1] >= ld[1]:
                ld = cnts[0][i][0]
            if cnts[0][i][0][0] >= rd[0] and cnts[0][i][0][1] >= rd[1]:
                rd = cnts[0][i][0]'''

    minSum = 1000
    maxSum = 0
    minDiff = 1000
    maxDiff = -1000
    for i in range(len(cnts[0])):
        if cnts[0][i][0][0] + cnts[0][i][0][1] < minSum:
            lu = cnts[0][i][0]
            minSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] + cnts[0][i][0][1] > maxSum:
            rd = cnts[0][i][0]
            maxSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] > maxDiff:
            ru = cnts[0][i][0]
            maxDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] < minDiff:
            ld = cnts[0][i][0]
            minDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
    '''for i in range(len(cnts[0])):
            cv2.circle(image,tuple(cnts[0][i][0]),3,(0,0,255),-1)'''
    '''cv2.circle(image,tuple(cnts[0][0][0]),3,(0,0,255),-1)
    cv2.circle(image,tuple(cnts[0][len(cnts[0])//2][0]),3,(0,0,0),-1)
    cv2.circle(image,tuple(cnts[0][len(cnts)-1][0]),3,(255,255,255),-1)'''
    #t2=tuple(cnts[0][len(cnts[0])-2][0])
    '''cv2.circle(image,tuple(lu), 3, (0,0,0), -1)
    cv2.circle(image,tuple(ru),3, (255,255,255), -1)
    cv2.circle(image,tuple(ld), 3, (0,0,255), -1)
    cv2.circle(image,tuple(rd), 3, (255,0,0), -1)'''
    #cv2.imshow("dilated", dilated)
    M = cv2.getPerspectiveTransform(
        np.float32([lu, ru, ld, rd]),
        np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
    image = cv2.warpPerspective(image, M, (320, 320))

    #cv2.imshow('first transform',image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    edged = cv2.Canny(gray, 35, 60)
    #cv2.imshow('Second Canny',edged)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    dilated = cv2.dilate(edged, kernel)
    #cv2.imshow('Second dilated',dilated)
    #_, cnts, _ = cv2.findContours(dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_CCOMP,
                                    cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    print(len(cnts))
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.07 * peri, True)
        if len(approx) == 4:
            screenCnt = approx
            break
    #cv2.drawContours(image, cnts, 0, (0, 255, 0), 3)
    minSum = 1000
    maxSum = 0
    minDiff = 1000
    maxDiff = -1000
    for i in range(len(cnts[0])):
        if cnts[0][i][0][0] + cnts[0][i][0][1] < minSum:
            lu = cnts[0][i][0]
            minSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] + cnts[0][i][0][1] > maxSum:
            rd = cnts[0][i][0]
            maxSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] > maxDiff:
            ru = cnts[0][i][0]
            maxDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] < minDiff:
            ld = cnts[0][i][0]
            minDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
    '''cv2.circle(image,tuple(lu), 3, (0,0,0), -1)
    cv2.circle(image,tuple(ru),3, (255,255,255), -1)
    cv2.circle(image,tuple(ld), 3, (0,0,255), -1)
    cv2.circle(image,tuple(rd), 3, (255,0,0), -1)'''
    #cv2.imshow('corn',image)
    M = cv2.getPerspectiveTransform(
        np.float32([lu, ru, ld, rd]),
        np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
    #image = cv2.warpPerspective(image,M,(320,320))
    #cv2.imshow("dilated", dilated)
    #cv2.imshow('Second transform',image)
    return image, [lu, ru, ld, rd]
예제 #32
0
imgCount = 0

#################################################################################
# Algorithm description
# 1. get edge: stair has linear edge properties, so, at first, use edge detection
# 2. save values of x axis: to vote, sum value of edge's x axis and is saved to set
# 3. draw high values of set: draw predicted stair's edge to image
#################################################################################

# get frame from video
while success:
    print('%d' % imgCount)

    # preproccess for detecting edges(blur), and detect edges
    blur = cv2.bilateralFilter(img, 4, 80, 80)
    edge = cv2.Canny(blur, 50, 100)

    # sum each edges of x aixs for searching stair's edge ( algorithm part )
    edgeSum = []

    for i in range(0, height):
        if np.sum(edge[i]) == 0:
            voteStair[i] *= 0.95
        else:
            voteStair[i] = 0.2 * np.sum(edge[i]) + 0.8 * voteStair[i]
            edgeSum.append([voteStair[i], i])

    stair = []

    for i in range(0, height - edgeArea):
예제 #33
0
def blur_image(img):
    blur = cv2.bilateralFilter(img, 9, 75, 75)
    return blur
예제 #34
0
import cv2
import pytesseract
l = 0
a = []
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'
img = cv2.imread("C:\\Users\\admin\\Desktop\\1.jpg")
g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
g = cv2.bilateralFilter(g, 250, 90, 190)
ret, thresh = cv2.threshold(g, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
t = pytesseract.image_to_string(thresh, lang="eng")
cv2.imshow("1", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("THE NUMBER IS:", t)
for i in range(0, len(t)):
    if t[i] == 'T':
        l = i
        break
for j in range(l, len(t)):
    if t[j].isupper():
        a.append(t[j])
    elif t[j].isnumeric():
        a.append(t[j])
print(*a)
first_tour()
vehicle.mode = VehicleMode("AUTO")
vehicle.commands.next = 0
while vehicle.commands.next <= 1:

    nextwaypoint = vehicle.commands.next

vehicle.mode = VehicleMode("GUIDED")

while True:
    ret, frame = cap.read()
    out.write(frame)
    if ret == True:
        # Filter red color
        cv2.imshow("frame", frame)
        frame = cv2.bilateralFilter(frame, 9, 75, 75)
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask1 = cv2.inRange(frame_hsv, (0, 70, 50), (10, 255, 255))
        mask2 = cv2.inRange(frame_hsv, (165, 70, 50), (180, 255, 255))
        mask = mask1 + mask2
        white_pixels = np.where(mask == 255)
        cX = np.average(white_pixels[1])
        cY = np.average(white_pixels[0])

        # Small noise elimination
        if len(white_pixels[0]) > 5000:
            # Object location detection
            img = np.zeros((480, 640, 1), np.uint8)
            cv2.circle(img, (int(cX), int(cY)),
                       85, (255, 255, 255),
                       thickness=-1,
예제 #36
0
def main():
    cap = cv2.VideoCapture(1)

    i = 0
    write_code = 0

    while (True):

        ret, frame = cap.read()

        blurred_frame = cv2.GaussianBlur(frame.copy(), (5, 5), 0)
        hsv = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)

        # of red
        lower_red, upper_red = create_limits(0, 110, 110, 10, 255, 255)

        # of blue
        lower_blue, upper_blue = create_limits(110, 50, 50, 130, 255, 255)

        # of green
        lower_green, upper_green = create_limits(30, 50, 0, 100, 255, 150)

        # of yellow
        lower_yellow, upper_yellow = create_limits(0, 80, 70, 255, 255, 255)

        mask_red = create_mask(hsv, lower_red, upper_red)
        mask_blue = create_mask(hsv, lower_blue, upper_blue)
        mask_green = create_mask(hsv, lower_green, upper_green)
        mask_yellow = create_mask(hsv, lower_yellow, upper_yellow)

        if i % 200 == 0:

            # Image with one lego on tip plate
            # y and then x
            image = frame[50:460, 240:450]

            # create window normal size
            cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
            # show image in the window
            cv2.imshow("Original", image)

            # convert to grayscale
            gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            # create window normal size
            cv2.namedWindow("Grayscale", cv2.WINDOW_NORMAL)
            # show image in the window
            cv2.imshow("Grayscale", gray)

            # noise removal but keeps edges
            # this filter is slower than most but it is very good at preserving edges
            refined_image = cv2.bilateralFilter(gray, 9, 75, 75)
            # thresholding using THRESH_OTSU
            # needs a bimodal image and finds a threshold inbetween the two peaks
            # pixels are set to either black or wight depending on how they compare to the threshold
            ret, threshold_image = cv2.threshold(refined_image, 0, 255,
                                                 cv2.THRESH_OTSU)
            # create window normal size
            cv2.namedWindow("Threshold", cv2.WINDOW_NORMAL)
            # show image in the window
            cv2.imshow("Threshold", threshold_image)

            # Canny Edge detection
            canny_edge_detection = cv2.Canny(threshold_image, 250, 255)
            # kernal for dilation, 3x3
            kernel = np.ones((3, 3), np.uint8)
            dilated_image = cv2.dilate(canny_edge_detection,
                                       kernel,
                                       iterations=1)
            # create window normal size
            cv2.namedWindow("Edges", cv2.WINDOW_NORMAL)
            # show image in the window
            cv2.imshow("Edges", dilated_image)

            # find the contours
            contours, h = cv2.findContours(dilated_image, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)[:2]
            # sorted contours to get largest
            c = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
            print(len(c))
            # draw the largest contour
            cv2.drawContours(image, c, 0, (0, 255, 0), 3)
            # cv2.drawContours(image, c, 1, (0,0,255), 3)
            # create window normal size
            cv2.namedWindow("contour", cv2.WINDOW_NORMAL)
            # show image in the window
            cv2.imshow("contour", image)

            # area in contour
            area = cv2.contourArea(c[0])

            # 2x2
            if area > 2500 and area < 4000:
                write_code = write_code + 2

            #2x4
            elif area > 5500 and area < 7000:
                write_code = write_code + 4

            contours_red, red_center, red_coordinates = find_contours(mask_red)
            contours_blue, blue_center, blue_coordinates = find_contours(
                mask_blue)
            contours_green, green_center, green_coordinates = find_contours(
                mask_green)
            contours_yellow, yellow_center, yellow_coordinates = find_contours(
                mask_yellow)

            if len(contours_red) > 0:
                write_code = write_code + 10

            elif len(contours_blue) > 0:
                # unnecessary
                write_code = write_code + 0

            # elif len(contours_green) > 0:
            # replace 0 with correct number
            #     write_code = write_code + 0
            #
            # elif len(contours_yellow) > 0:
            # replace 0 with correct number
            #     write_code = write_code + 0

        if write_code == 4:
            # serialPort.write("4")
            print("Blue 2x4")
        elif write_code == 2:
            # serialPort.write("2")
            print("Blue 2x2")
        elif write_code == 14:
            # serialPort.write("14")
            print("Red 2x4")
        elif write_code == 12:
            # serialPort.write("12")
            print("Red 2x2")
        i += 1
        write_code = 0
        # print(i)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
예제 #37
0
image = cv2.imread("Assets/image (1).jpg")

hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv_lower = np.array([0, 0, 30])  #Darker Red
hsv_upper = np.array([100, 65, 255])  #Brighter Red

mask = cv2.inRange(image, hsv_lower, hsv_upper)
final = cv2.bitwise_and(image, image,
                        mask=mask)  #Separate the nonRed part of Image

#Blur
kernel = np.ones((4, 4), np.float32) / 16
smoothImage = cv2.filter2D(final, -1, kernel)

#Gaussian
GaussianBlur = cv2.GaussianBlur(final, (15, 15), 0)

#Median Blur (Best)
median = cv2.medianBlur(final, 15)

#bilateral
bilateral = cv2.bilateralFilter(final, 10, 90, 90)

cv2.imshow("Final", final)
cv2.imshow("smoothImage", smoothImage)
cv2.imshow("GaussianBlur", GaussianBlur)
cv2.imshow("median", median)
cv2.imshow("bilateral", bilateral)

cv2.waitKey(0)
cv2.destroyAllWindows()
def optic_flow_lk(img_a, img_b, k_size, k_type, sigma=1):
    """Computes optic flow using the Lucas-Kanade method.

    For efficiency, you should apply a convolution-based method.

    Note: Implement this method using the instructions in the lectures
    and the documentation.

    You are not allowed to use any OpenCV functions that are related
    to Optic Flow.

    Args:
        img_a (numpy.array): grayscale floating-point image with
                             values in [0.0, 1.0].
        img_b (numpy.array): grayscale floating-point image with
                             values in [0.0, 1.0].
        k_size (int): size of averaging kernel to use for weighted
                      averages. Here we assume the kernel window is a
                      square so you will use the same value for both
                      width and height.
        k_type (str): type of kernel to use for weighted averaging,
                      'uniform' or 'gaussian'. By uniform we mean a
                      kernel with the only ones divided by k_size**2.
                      To implement a Gaussian kernel use
                      cv2.getGaussianKernel. The autograder will use
                      'uniform'.
        sigma (float): sigma value if gaussian is chosen. Default
                       value set to 1 because the autograder does not
                       use this parameter.

    Returns:
        tuple: 2-element tuple containing:
            U (numpy.array): raw displacement (in pixels) along
                             X-axis, same size as the input images,
                             floating-point type.
            V (numpy.array): raw displacement (in pixels) along
                             Y-axis, same size and type as U.
    """
    #if blur:
    img_b = cv2.GaussianBlur(img_b, (13, 13), 0)
    img_b = cv2.bilateralFilter(img_b.astype(np.float32), 7, 85, 85)
    img_a = cv2.GaussianBlur(img_a, (13, 13), 0)
    img_a = cv2.bilateralFilter(img_a.astype(np.float32), 7, 85, 85)
    img_b = cv2.medianBlur(img_b, 5)
    img_a = cv2.medianBlur(img_a, 5)
    win = k_size
    #win=50#30#15#30 #15 for 1b in experiment.py **MAKE THIS AN INPUT**
    assert img_a.shape == img_b.shape
    I_x = np.zeros(img_a.shape)
    I_y = np.zeros(img_a.shape)
    I_t = np.zeros(img_a.shape)
    I_x[1:-1, 1:-1] = (img_a[1:-1, 2:] - img_a[1:-1, :-2]) / 2
    I_y[1:-1, 1:-1] = (img_a[2:, 1:-1] - img_a[:-2, 1:-1]) / 2
    I_t[1:-1, 1:-1] = img_a[1:-1, 1:-1] - img_b[1:-1, 1:-1]
    params = np.zeros(img_a.shape + (5, ))  #Ix2, Iy2, Ixy, Ixt, Iyt
    params[..., 0] = I_x * I_x  # I_x2
    params[..., 1] = I_y * I_y  # I_y2
    params[..., 2] = I_x * I_y  # I_xy
    params[..., 3] = I_x * I_t  # I_xt
    params[..., 4] = I_y * I_t  # I_yt
    del I_x, I_y, I_t
    cum_params = np.cumsum(np.cumsum(params, axis=0), axis=1)
    win_params = (cum_params[2 * win + 1:, 2 * win + 1:] -
                  cum_params[2 * win + 1:, :-1 - 2 * win] -
                  cum_params[:-1 - 2 * win, 2 * win + 1:] +
                  cum_params[:-1 - 2 * win, :-1 - 2 * win])
    det = win_params[..., 0] * win_params[..., 1] - win_params[..., 2]**2
    op_flow_u = np.zeros(img_a.shape)
    op_flow_v = np.zeros(img_a.shape)
    u = np.where(det != 0, (win_params[..., 1] * win_params[..., 3] -
                            win_params[..., 2] * win_params[..., 4]) / det, 0)
    v = np.where(det != 0, (win_params[..., 0] * win_params[..., 4] -
                            win_params[..., 2] * win_params[..., 3]) / det, 0)
    op_flow_u[win + 1:-1 - win, win + 1:-1 - win] = u[:-1, :-1]
    op_flow_v[win + 1:-1 - win, win + 1:-1 - win] = v[:-1, :-1]
    return (op_flow_u, op_flow_v)
예제 #39
0
    tohsv(0, 255, 0)
    while (True):
        _, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        lower1, upper1 = np.array([160, 30, 30]), np.array([200, 255, 255])
        lower2, upper2 = np.array([85, 26, 12]), np.array([130, 255, 255])

        mask = cv2.inRange(hsv, lower1, upper1)
        # mask2=cv2.inRange(hsv,lower2,upper2)
        # mask=cv2.bitwise_or(mask1,mask2)
        res = cv2.bitwise_and(frame, frame, mask=mask)

        dst = cv2.filter2D(frame, -1, kernel2)
        blur = cv2.blur(res, (3, 3))
        gauss = cv2.GaussianBlur(res, (3, 3), 5)
        bil = cv2.bilateralFilter(res, 10, 75, 75)
        diff = cv2.subtract(gauss, bil)
        closing = cv2.morphologyEx(gauss, cv2.MORPH_CLOSE, kernel)
        opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)

        cv2.imshow('Gauss', gauss)
        cv2.imshow('Blur', blur)
        cv2.imshow('AVG', frame)
        # cv2.imshow('Bilateral',bil)
        # cv2.imshow('Gauss',closing)
        # cv2.imshow('Morphed',opening)

        key = cv2.waitKey(10)
        if key == 27:
            break
    cap.release()
예제 #40
0
def loop():
    global newX, newY, oldX, oldY, listenClick, scrollMode, scrollBaseY, dragging, timeSinceFive, bgModel, bgCaptured, initialCalibrate, at, recording, timeSinceThree, numFours, isRecording
    _, frame = webcam.read()
    if not backgroundVersion:
        # flip image
        frame = cv2.flip(frame, 1)
        frame = cv2.resize(frame, (screenWidth, screenHeight))
        img = cv2.GaussianBlur(frame, (blurValue, blurValue), 0)
        imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # show frame
        # cv2.imshow('Blur', imgHSV)
        mask = cv2.inRange(imgHSV, lowerBound, upperBound)
        maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
        maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelOpen)
        # show frame
        cv2.imshow('maskClose', maskClose)
        # show frame
        cv2.imshow('mask', mask)
    else:  # TODO Working here
        frame = cv2.bilateralFilter(frame, 5, 50, 100)  # use smoothing filter
        frame = cv2.flip(frame, 1)
        # TODO may mess up calculations
        frame = cv2.resize(frame, (screenWidth, screenHeight))
        if rightHanded:
            cv2.rectangle(frame, (int(boxX * frame.shape[1]), 0),
                          (frame.shape[1], int(boxY * frame.shape[0])),
                          (255, 0, 0), 2)
        else:
            cv2.rectangle(
                frame, (0, 0),
                (int(boxX * frame.shape[1]), int(boxY * frame.shape[0])),
                (255, 0, 0), 2)
        cv2.imshow('original', frame)
        cv2.waitKey(1)
        # print(bgCaptured)
        if bgCaptured:
            img = removeBG(frame)
            if rightHanded:
                img = img[0:int(boxY * screenHeight),
                          int(boxX * screenWidth):screenWidth]  # clip the ROI
            else:
                img = img[0:int(boxY * screenHeight), 0:screenWidth -
                          int(boxX * screenWidth)]  # clip the ROI
            cv2.imshow('mask', img)
            # Binarize
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
            cv2.imshow('blur', blur)
            ret, thresh = cv2.threshold(blur, threshold, 255,
                                        cv2.THRESH_BINARY)
            cv2.imshow('ori', thresh)
            # maskOpen = thresh[0:0+int(screenHeight*boxY), int(screenWidth*boxX):int(screenWidth*boxX)+screenWidth]
            maskOpen = thresh
            maskOpen = cv2.resize(maskOpen,
                                  (screenWidth, screenHeight))  # TODO Here
            cv2.imshow('IMPORTANT', maskOpen)
        # TODO End of BG version
    if not backgroundVersion or (backgroundVersion and bgCaptured):
        _, conts, _ = cv2.findContours(
            cv2.resize(maskOpen, (screenWidth, screenHeight)),
            cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        maxx, maxy, maxh, maxw = [0, 0, 0, 0]
        ci = 0
        for i in range(len(conts)):
            x, y, w, h = cv2.boundingRect(conts[i])
            if h * w > maxh * maxw:
                maxx = x
                maxy = y
                maxw = w
                maxh = h
                ci = i
        # If worthy size
        if maxw * maxh > sizeThreshold:
            detect = True
            # Finger processing
            maxCont = conts[ci]
            hull = cv2.convexHull(maxCont)
            drawing = np.zeros(img.shape, np.uint8)
            cv2.drawContours(drawing, [maxCont], 0, (0, 255, 0), 2)
            cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
            # cv2.imshow("test", drawing)
            isFinishCal, cnt = calculateFingers(maxCont, drawing)
            numFing = cnt + 1
            if numFing == 1:
                numFing = calculateOneZero(maxCont, frame)
            pos = calculateHighestPoint(maxCont)
            # Check for one finger
            if inputQueue.qsize() == maxQueueSize:
                if inputQueue.get() == 4:
                    numFours -= 1
            inputQueue.put(numFing)
            if numFing == 4:
                numFours += 1
            if numFours > maxQueueSize - 2 and not isRecording:
                at.start()
                isRecording = True
                playsound.playsound(
                    'C:\\Users\\huytr\\PycharmProjects\\IronHand\\beep-02.wav',
                    True)
            if numFing == 0 and isRecording:
                transcript = at.stop()
                isRecording = False
                playsound.playsound(
                    'C:\\Users\\huytr\\PycharmProjects\\IronHand\\s2.wav',
                    True)
                if transcript is not None:
                    doCommand(transcript)
            print(numFing, pos, numFours)
            # Debug
            # print(pos)
            cv2.circle(frame, pos, 5, (0, 0, 255), -1)
            cv2.imshow('Point', frame)
            # Recalibrate
            # # print(cv2.contourArea(maxCont)/screenWidth/screenHeight)
            # if cv2.contourArea(maxCont)/screenWidth/screenHeight > .5 and backgroundVersion and initialCalibrate != 0:
            #     bgCaptured = False
            # initialCalibrate = 1
        else:
            detect = False
        if detect and numFing == 1:
            scaleX = gui.size()[0] / (xBoundHigh - xBoundLow)
            scaleY = gui.size()[1] / (yBoundHigh - yBoundLow)
            oldX = newX
            oldY = newY
            newX = (pos[0] - xBoundLow) * scaleX
            newY = (pos[1] - yBoundLow) * scaleY
            listenClick = True
            scrollMode = False
        elif detect and numFing == 2:
            # if not in scroll mode, store the base y and set scroll mode to true
            if not scrollMode:
                scrollMode = True
                scrollBaseY = pos[1]
            else:
                clicks = int((scrollBaseY - pos[1]))
                gui.scroll(clicks)
        elif detect and numFing >= 5:
            scrollMode = False
            listenClick = False
            timeSinceFive = time.time()
        elif detect and numFing == 0:
            scrollMode = False
            listenClick = False
            # if recording and time.time()-timeSinceThree > 0.5:
            #     recording = False
            #     transcript = at.stop()
            #     if transcript is not None:
            #         gui.typewrite(transcript)
            if time.time() - timeSinceFive < 0.5:
                gui.keyDown('ctrl')
                gui.press('w')
                gui.keyUp('ctrl')
        # elif detect and numFing == 4:
        #     scrollMode = False
        #     listenClick = False
        #     timeSinceThree = time.time()
        #     if not recording:
        #         recording = True
        #         at.start()
        else:
            scrollMode = False
            listenClick = False
cap = cv2.VideoCapture(0)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    res = cv2.resize(frame,
                     None,
                     fx=0.5,
                     fy=0.5,
                     interpolation=cv2.INTER_CUBIC)
    # Display original frame
    #cv2.imshow("Original video (0.5 resizing)", res)
    gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    #blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    blurred = cv2.bilateralFilter(res, 9, 75, 75)
    edges = auto_canny(blurred)
    cv2.imshow("Edge detection (0.5 resizing)", edges)

    (cnts, _) = cv2.findContours(edges.copy(), cv2.RETR_LIST,
                                 cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4]
    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        if len(approx) == 4:
            screenCnt = approx
            break
예제 #42
0
    ret, frame = cap.read()
    x += 1

x = 0
while x < 10:
    ret, frame = cap.read()
    img = fgbg.apply(frame, learningRate=0)
    x += 1
#gather gestures
desList = []
while len(desList) < numGestures:
    while True:
        ret, img = cap.read()
        cam = img
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.bilateralFilter(img, 9, 300, 150)
        img = cv2.GaussianBlur(img, (5, 5), 0)
        img = fgbg.apply(img, learningRate=0)
        img = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)
        img = cv2.morphologyEx(img, cv2.MORPH_DILATE, kernel, iterations=2)
        img = cv2.threshold(img, 128, 255,
                            cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

        cv2.imshow('img', img)
        cv2.imshow('cam', cam)
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break
    cv2.destroyAllWindows()

    kp, des = detect.detectAndCompute(img, None)
예제 #43
0
def canny_face_(face, t1, t2):
    cv2.bilateralFilter(face, 5, color_para, space_para)
    res = cv2.Canny(face, t1, t2, L2gradient=True)
    return res
def main():
    try:
        os.mkdir(os.path.join(args.dataset_path, "test_data"))
    except:
        pass

    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = False

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    camera = rtx.OrthographicCamera()

    # enumerateがちゃんと動くか心配...
    original_data = c_gqn.data.Dataset(args.dataset_path)
    for i, subset in enumerate(original_data):
        iterator = c_gqn.data.Iterator(subset, batch_size=1)

        for j, data_indices in enumerate(iterator):
            _images, viewpoints, _original_images = subset[data_indices]

            images = []
            scene = build_scene(color_array)
            for viewpoint in viewpoints[0]:
                eye = tuple(viewpoint[0:3])

                center = (0, 0, 0)
                camera.look_at(eye, center, up=(0, 1, 0))

                renderer.render(scene, camera, rt_args, cuda_args,
                                render_buffer)

                # Convert to sRGB
                image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
                image = np.uint8(image * 255)
                image = cv2.bilateralFilter(image, 3, 25, 25)

                images.append(image)

            view_radius = 3
            angle_rad = 0
            original_images = []
            for _ in range(args.frames_per_rotation):
                eye = rotate_viewpoint(angle_rad)
                eye = tuple(view_radius * (eye / np.linalg.norm(eye)))
                center = (0, 0, 0)
                camera.look_at(eye, center, up=(0, 1, 0))

                renderer.render(scene, camera, rt_args, cuda_args,
                                render_buffer)

                # Convert to sRGB
                original_image = np.power(np.clip(render_buffer, 0, 1),
                                          1.0 / 2.2)
                original_image = np.uint8(original_image * 255)
                original_image = cv2.bilateralFilter(original_image, 3, 25, 25)

                original_images.append(original_image)
                angle_rad += 2 * math.pi / args.frames_per_rotation

            np.save(
                os.path.join(args.dataset_path, "test_data",
                             str(i) + "_" + str(j) + ".npy"),
                [images, original_images])
            print('saved:  ' + str(i) + "_" + str(j) + ".npy")
def gaussian_blur(img, kernel_size):
    """Applies a Gaussian Noise kernel"""
    return cv2.bilateralFilter(img, kernel_size, 75, 75)
예제 #46
0
    if (idx > ignoreFrame):
        original = gray.copy()

        gray[0:50] = 0
        gray[-40:] = 0
        canny = original.copy()

        # ------------------------------------------------------------
        # cartoonise
        for _ in xrange(num_down):
            gray = cv2.pyrDown(gray)
#gray = cv2.equalizeHist(gray)
#gray = cv2.GaussianBlur(gray,(5,5),0)
        for _ in xrange(num_bilateral):
            gray = cv2.bilateralFilter(gray, d=9, sigmaColor=11, sigmaSpace=90)

        for _ in xrange(num_down):
            gray = cv2.pyrUp(gray)

        #gray = cv2.cvtColor(gray, cv2.COLOR_RGB2GRAY)
        gray = cv2.medianBlur(gray, 15)

        edge = cv2.adaptiveThreshold(gray,
                                     255,
                                     cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY,
                                     blockSize=9,
                                     C=2)
        output = cv2.bitwise_and(gray, edge)
예제 #47
0
    lower_white = np.array([0, 0, 230])
    upper_white = np.array([180, 25, 255])
    lower_color = np.array([0,80,50])
    upper_color = np.array([20,100,100])
    lower_red = np.array([150,150,50])
    upper_red = np.array([180,255,150])


    mask1 = cv2.inRange(hsv, np.array([0, 150, 100]), np.array([1, 255, 255]));
    mask2 = cv2.inRange(hsv, np.array([178, 150, 100]), np.array([180, 255, 255]));
    mask = mask1 | mask2

    # Attempting Green
    # mask = cv2.inRange(hsv, np.array([50,100,100]), np.array([65,255,255]))

    mask = cv2.bilateralFilter(mask, 10, 40, 40)
    mask = cv2.blur(mask,(5,5))

    res = cv2.bitwise_and(frame,frame,mask=mask)
    mask = cv2.blur(mask,(20,20))
    # Getting a contour and the center of the contour
    im2,contours,hierarchy = cv2.findContours(mask, 1, 2)
    try:
        if frame_num > 0 or frame_num == 0:
            cnt = contours[0]
            M = cv2.moments(cnt)
            # print(M['m10']/M['m00'])
            cx = int(M['m10']/M['m00'])
            # print(cx)
            cy = int(M['m01']/M['m00'])
            # print(cy)
예제 #48
0
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(dst)
th4 = cv2.adaptiveThreshold(cl1,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)

cv2.imwrite("im2.png", th4, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
im=cv2.imread('im2.png')
ori = cv2.imread('me.png',)

im[np.where((im == [255,255,255]).all(axis = 2))] = [180,105,255]
im[np.where((im == [0,0,0]).all(axis = 2))] = [155,54,255]


#imline[0:rows,0:cols] = [142,29,255]

dst2 = cv2.addWeighted(im,0.7,ori,0.3,0)
blur= cv2.bilateralFilter(dst2,9,75,75)
#med= cv2.medianBlur(dst,5)
#gauss= cv2.GaussianBlur(dst,(5,5),0)

#kernel = np.ones((5,5),np.float32)/25
#blur = cv2.filter2D(dst,-1,kernel)

#cv2.imshow('lip',dst2)
cv2.imshow('lip2',blur)


cv2.imwrite("./2c.jpg", blur, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #49
0
# g+r P*piexl = new
import cv2
import numpy as np
img = cv2.imread('../imgs/4.jpg', 1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
cv2.imshow('src', img)
dst = np.zeros((height, width, 3), np.uint8)
for i in range(0, height):
    for j in range(0, width):
        (b, g, r) = img[i, j]
        bb = int(b * 1.3) + 10
        gg = int(g * 1.2) + 15

        if bb > 255:
            bb = 255
        if gg > 255:
            gg = 255

        dst[i, j] = (bb, gg, r)
cv2.imshow('dst', dst)
cv2.waitKey(0)

#双边滤波
import cv2
img = cv2.imread('1.png', 1)
cv2.imshow('src', img)
dst = cv2.bilateralFilter(img, 15, 35, 35)
cv2.imshow('dst', dst)
cv2.waitKey(0)
예제 #50
0
def passport_ocr(path,thresh,rescale,average):
	image = cv2.imread(path)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


	if thresh == "thresh":
	    gray = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
	elif thresh == "adaptive":
	    gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)


	if rescale == "linear":
	    gray = cv2.resize(gray, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
	elif rescale == "cubic":
	    gray = cv2.resize(gray, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)


	if average == "blur":
	    gray = cv2.medianBlur(gray, 3)
	elif average == "bilateral":
	    gray = cv2.bilateralFilter(gray, 9, 75, 75)
	elif average == "gauss":
	    gray = cv2.GaussianBlur(gray, (5,5), 0)

	cv2.imwrite("."+path.split('.')[1]+"_gray"+"."+path.split('.')[2],gray)
	text = pytesseract.image_to_string(gray, lang = 'eng')


	name = None
	fname = None
	dob = None
	pan = None
	nameline = []
	dobline = []
	panline = []
	text0 = []
	text1 = []


	lines = text.split('\n')
	for lin in lines:
		s = lin.strip()
		s = lin.replace('\n','')
		s = s.rstrip()
		s = s.lstrip()
		text1.append(s)

	text1 = list(filter(None, text1))


	lineno = -1

	for wordline in text1:
		xx = wordline.split(' ')
		if ([w for w in xx if re.search('(INCOMETAXDEPARWENT @|mcommx|INCOME|TAX|GOW|GOVT|GOVERNMENT|OVERNMENT|VERNMENT|DEPARTMENT|EPARTMENT|PARTMENT|ARTMENT|INDIA|NDIA)$', w)]):
			text1 = list(text1)
			lineno = text1.index(wordline)
			break

	text0 = text1[lineno+1:]

	for wordline in text0:
		wordline=str(wordline)
		xx = wordline.split(' ')
		for w in xx:
			print("ii",w)
			if re.search('(Pormanam|Number|umber|Account|ccount|count|Permanent|ermanent|manent|wumm)', w):
				lineno = text0.index(wordline)
				break
	if(lineno==0):
		pat=0
	else:
		pat=1
	data=pre_cleaning(text0,pat)
	print("hey",data,text0,pat,lineno)

	return data
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 14:17:20 2019

@author: te122613
"""

# import libraries opencv, matplotlib and numpy
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt

# read image
img2 = cv.imread('bilateral1.jpg')

# blur image
bilateral = cv.bilateralFilter(img2, 9, 200, 200)

# plot settings
plt.subplot(121), plt.imshow(bilateral), plt.title('Bilateral')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img2), plt.title('Original')
plt.xticks([]), plt.yticks([])
# show plots
plt.show()
import cv2
import sys

image='../../images/'+sys.argv[1]

img=cv2.imread(image, cv2.IMREAD_UNCHANGED)

blur=cv2.bilateralFilter(img,9,75,75)

tmp=sys.argv[1]
ext=sys.argv[2]
name=tmp[:len(tmp)-len(ext)-1]
output=name+'_blur_bilat'+'.'+ext
target='../../images/'+output

if cv2.imwrite(target, blur): print(output,end='')
else: print('failed',end='')
import numpy as np
import cv2

# read images
ThreshS = cv2.imread("ThreshS.jpg", 0)
ThreshB = cv2.imread("ThreshB.jpg", 0)

# Apply median blur
ThreshS = cv2.bilateralFilter(ThreshS, 3, 150, 150)
ThreshB = cv2.bilateralFilter(ThreshB, 3, 150, 150)

# Erode the edges
kernel = np.ones((3, 3), 'uint8')
imgS_ERODE = cv2.erode(ThreshS, kernel, iterations=1)
imgB_ERODE = cv2.erode(ThreshB, kernel, iterations=1)

# Find Contours
contoursS, hierarchyS = cv2.findContours(imgS_ERODE, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
contoursB, hierarchyB = cv2.findContours(imgB_ERODE, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
# • Contours: This is the contours output where each detected contour is a vector of points.
# • Hierarchy: This is the optional output vector where we store the hierarchy of contours. This is the topology of the image where we can get the relations between each contour.
# • Image: This is the input binary image.
# • Mode: This is the method used to retrieve the contours:
# 		°° RETR_EXTERNAL: This retrieves only the external contours.
# 		°° RETR_LIST: This retrieves all the contours without establishing the hierarchy.
# 		°° RETR_CCOMP: This retrieves all the contours with two levels of hierarchy: external and holes. If another object is inside one hole, then this is put on the top of the hierarchy.
# °° RETR_TREE: This retrieves all the contours that create a full hierarchy between contours.
# • Method: This allows you to perform the approximation method to retrieve the contours' shapes:
# 		°° CV_CHAIN_APPROX_NONE: This does not apply any approximation to the contours and stores all the contours points.
import cv2
import numpy as np
#from scipy import ndimage 
#img = cv2.imread('camaron_caja_negra.tiff',0)
img = cv2.imread('0000000002.tiff',0)

size = np.size(img)
skel = np.zeros(img.shape,np.uint8)

bilateral_filtered_image = cv2.bilateralFilter(img, 5, 35, 150)
ret,threshold = cv2.threshold(bilateral_filtered_image,24,255,0)

cv2.imshow("threshold",threshold)
cv2.waitKey(0)
cv2.imwrite("camarones_ocvthreshold_39.tiff",threshold)
cv2.destroyAllWindows()

element = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
done = False
count=0
while( not done):
    eroded = cv2.erode(threshold,element)
    temp = cv2.dilate(eroded,element)
    temp = cv2.subtract(threshold,temp)
    skel = cv2.bitwise_or(skel,temp)
    threshold = eroded.copy()
 
    zeros = size - cv2.countNonZero(threshold)
    
    if zeros==size:
예제 #55
0
    lower_red = np.array([100,100,100])
    upper_red = np.array([255,200,250])


    mask= cv2.inRange(hsv,lower_red, upper_red)    # if not in the range=0, else 1
    res = cv2.bitwise_and(frame, frame, mask = mask) # show frame wherever mask is 1 in the
                                                     #region

    kernel= np.ones((15,15),np.float32)/255
    smoothed= cv2.filter2D(res,-1,kernel)

    blur = cv2.GaussianBlur(res,(15,15),0)

    median = cv2.medianBlur(res,15)

    bilareral_ cv2.bilateralFilter(res,15,75,75)

    cv2.imshow('frame',frame)
    #cv2.imshow('mask',mask)
    cv2.imshow('res',res)
    cv2.imshow('blur',blur)
    cv2.imshow('median',median)
    cv2.imshow('smoot',smoothed)

    k=cv2.waitKey(5) & 0xFF

    if k==27:
        break

cv2.destroyAllWindows()
cap.release()
예제 #56
0
def chess_corners_HSV(image, corn=None):

    global corners
    #corners = corn
    lower_red = np.array([30, 100, 150])
    upper_red = np.array([40, 225, 255])

    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, lower_red, upper_red)
    res = cv2.bitwise_and(image, image, mask=mask)

    #cv2.imshow('frame', image)
    #cv2.imshow('mask', mask)
    #cv2.imshow('res', res)

    cor = cv2.findNonZero(mask)

    lu = (10000, 10000)
    ru = (0, 10000)
    ld = (10000, 0)
    rd = (0, 0)
    minSum = 1000
    maxSum = 0
    minDiff = 1000
    maxDiff = -1000

    for i in range(len(cor)):
        if cor[i][0][0] + cor[i][0][1] < minSum:
            lu = cor[i][0]
            minSum = cor[i][0][0] + cor[i][0][1]
        if cor[i][0][0] + cor[i][0][1] > maxSum:
            rd = cor[i][0]
            maxSum = cor[i][0][0] + cor[i][0][1]
        if cor[i][0][0] - cor[i][0][1] > maxDiff:
            ru = cor[i][0]
            maxDiff = cor[i][0][0] - cor[i][0][1]
        if cor[i][0][0] - cor[i][0][1] < minDiff:
            ld = cor[i][0]
            minDiff = cor[i][0][0] - cor[i][0][1]

    if corners == None:
        corners = []
        corners.append(lu)
        corners.append(ru)
        corners.append(ld)
        corners.append(rd)
    '''cv2.circle(image, tuple(lu), 3, (0, 0, 0), -1)
    cv2.circle(image, tuple(ru), 3, (255, 255, 255), -1)
    cv2.circle(image, tuple(ld), 3, (0, 0, 255), -1)
    cv2.circle(image, tuple(rd), 3, (255, 0, 0), -1)
    cv2.imshow('edges',image)'''

    #cv2.imshow("final", image)

    M = cv2.getPerspectiveTransform(
        np.float32(corners),
        np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
    image = cv2.warpPerspective(image, M, (320, 320))

    #cv2.imshow('first transform',image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    edged = cv2.Canny(gray, 35, 60)
    # cv2.imshow('Second Canny',edged)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    dilated = cv2.dilate(edged, kernel)
    # cv2.imshow('Second dilated',dilated)
    # _, cnts, _ = cv2.findContours(dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_CCOMP,
                                    cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    print(len(cnts))
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.07 * peri, True)
        if len(approx) == 4:
            screenCnt = approx
            break
    # cv2.drawContours(image, cnts, 0, (0, 255, 0), 3)
    minSum = 1000
    maxSum = 0
    minDiff = 1000
    maxDiff = -1000
    for i in range(len(cnts[0])):
        if cnts[0][i][0][0] + cnts[0][i][0][1] < minSum:
            lu = cnts[0][i][0]
            minSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] + cnts[0][i][0][1] > maxSum:
            rd = cnts[0][i][0]
            maxSum = cnts[0][i][0][0] + cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] > maxDiff:
            ru = cnts[0][i][0]
            maxDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
        if cnts[0][i][0][0] - cnts[0][i][0][1] < minDiff:
            ld = cnts[0][i][0]
            minDiff = cnts[0][i][0][0] - cnts[0][i][0][1]
    '''cv2.circle(image,tuple(lu), 3, (0,0,0), -1)
    cv2.circle(image,tuple(ru),3, (255,255,255), -1)
    cv2.circle(image,tuple(ld), 3, (0,0,255), -1)
    cv2.circle(image,tuple(rd), 3, (255,0,0), -1)'''
    # cv2.imshow('corn',image)
    #M = cv2.getPerspectiveTransform(np.float32([lu, ru, ld, rd]), np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
    # image = cv2.warpPerspective(image,M,(320,320))
    #cv2.imshow("dilated", dilated)
    #cv2.imshow('Second transform', image)
    return image, corners
예제 #57
0
import cv2
import numpy as np

image = cv2.imread("di_in_cache.jpeg")

gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.bilateralFilter(gray_image, 11, 17, 17)
edged = cv2.Canny(gray_image, 30, 200)

_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                              cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
candidates = []

for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
    if len(approx) > 2:
        candidates.append(approx)

mask = np.zeros_like(gray_image)
cv2.drawContours(mask, candidates, 0, (255, 255, 255), -1)

#cv2.imshow("mask", mask)

out = np.zeros_like(gray_image)
out[mask == 255] = gray_image[mask == 255]

#cv2.imshow("woo", out)
ret, filter_image = cv2.threshold(gray_image, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
예제 #58
0
    def segmentation(self, _ImgInput):
        '''
            Tách các ký tự ra khỏi biển số
            ...
            Parameters
                ----------
                _ImgInput : Hình ảnh biển số
                ----------
            Returns
                ----------
                Danh sách các ký tự của biển số
                ----------
        '''
        charactersFound = []
        imgDraw = np.array([])
        imgBinary = np.array([])
        # Tiến hành tiền xử lý ảnh
        # Chuyển ảnh biển số về ảnh xám
        img_gray = cv2.cvtColor(_ImgInput, cv2.COLOR_BGR2GRAY)
        # Lọc ảnh:
        img_filter = img_gray.copy()
        if self.gaussFilter != 0:
            img_filter = cv2.GaussianBlur(img_gray,
                                          (self.gaussFilter, self.gaussFilter),
                                          1)
        if self.bilaFilter != 0:
            img_filter = cv2.bilateralFilter(img_gray, self.bilaFilter, 75, 75)
        # Phân ngưỡng
        img_binary = cv2.threshold(img_filter, self.threshold, 255,
                                   cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]

        # Tiến hành tách ký tự
        plate = img_binary.copy()
        if (self.typePlate == 0):  # Xử lý biển số dài
            charactersFound, imgDraw = segment_characters_from_plate(
                _ImageInput=plate,
                _RatioMin=self.ratioMin,
                _RatioMax=self.ratioMax,
                _CropPadding=self.cropPadding)
            return charactersFound, imgDraw, img_binary

        if (self.typePlate == 1):  # Xử lý biển số ngắn
            upper_charactersFound = []
            lower_charactersFound = []
            imgDrawUp = np.array([])
            imgDrawLow = np.array([])

            # Chia biển số thành hai phần trên dưới và tùy chỉnh kích thước
            utilitiesOpenCV = UtilitiesOpenCV()
            plate_upper = plate[0:int(plate.shape[0] / 2), 0:plate.shape[1]]
            plate_lower = plate[int(plate.shape[0] / 2):plate.shape[0],
                                0:plate.shape[1]]

            img_resize_upper = plate_upper.copy()
            img_resize_lower = plate_lower.copy()

            if self.sizeHeight != 0:
                img_resize_upper = utilitiesOpenCV.resize_height(
                    _ImgInput=img_resize_upper, _SizeHeight=self.sizeHeight)
                img_resize_lower = utilitiesOpenCV.resize_height(
                    _ImgInput=img_resize_lower, _SizeHeight=self.sizeHeight)
            if self.sizeWidth != 0:
                img_resize_upper = utilitiesOpenCV.resize_width(
                    _ImgInput=img_resize_upper, _SizeWidth=self.sizeWidth)
                img_resize_lower = utilitiesOpenCV.resize_width(
                    _ImgInput=img_resize_lower, _SizeWidth=self.sizeWidth)
            if self.border != 0:
                img_resize_upper = cv2.copyMakeBorder(
                    img_resize_upper,
                    top=self.border,
                    bottom=self.border,
                    left=self.border,
                    right=self.border,
                    borderType=cv2.BORDER_REPLICATE)
                img_resize_lower = cv2.copyMakeBorder(
                    img_resize_lower,
                    top=self.border,
                    bottom=self.border,
                    left=self.border,
                    right=self.border,
                    borderType=cv2.BORDER_REPLICATE)

            # Lấy các ký tự của phần trên và phần dưới
            upper_charactersFound, imgDrawUp = segment_characters_from_plate(
                _ImageInput=img_resize_upper,
                _RatioMin=self.ratioMin,
                _RatioMax=self.ratioMax,
                _CropPadding=self.cropPadding)
            lower_charactersFound, imgDrawLow = segment_characters_from_plate(
                _ImageInput=img_resize_lower,
                _RatioMin=self.ratioMin,
                _RatioMax=self.ratioMax,
                _CropPadding=self.cropPadding)

            if (len(upper_charactersFound) > 0
                    and len(lower_charactersFound) > 0):
                charactersFound = upper_charactersFound + lower_charactersFound
                imgDraw = np.concatenate((imgDrawUp, imgDrawLow), axis=0)
            return charactersFound, imgDraw, img_binary
        return charactersFound, imgDraw, img_binary
def get_bags(frame,
             center_x=center_x,
             center_y=center_y,
             roi_width=roi_width,
             roi_height=roi_height,
             cool_box=None):
    g_kernel = 3
    bi_kernel = 4
    bi_area = 100
    min_area = 1700
    max_area = 45000
    LOW_edge = 50
    HIGH_edge = 139
    current_filter = 2
    lowH = 25
    lowS = 6
    lowV = 25
    upH = 25
    upS = 255
    upV = 255
    thresh = 42000
    lowblueH = 110
    lowblueS = 50
    lowblueV = 50
    upperblueH = 130
    upperblueS = 255
    upperblueV = 255
    bluethresh = 10000

    gauss_args = [g_kernel, g_kernel]
    bilat_args = [bi_kernel, bi_area, bi_area]

    # cv2.imshow("uncropped image", frame)
    orig = frame.copy()
    roi = frame[int(np.asscalar(center_y - roi_height /
                                2)):int(np.asscalar(center_y +
                                                    roi_height / 2)),
                int(np.asscalar(center_x - roi_width /
                                2)):int(np.asscalar(center_x + roi_width / 2))]
    now = time.time()

    cv2.imshow('roi', roi)

    image = roi

    edged = []
    # Apply filters

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Contrast Limited Adaptive Histogram Equalization
    clahe = cv2.createCLAHE()
    cl1 = clahe.apply(gray)

    # gaussian blur
    # gauss = cv2.GaussianBlur(gray, (3, 3), 0)
    gauss = cv2.GaussianBlur(gray, (gauss_args[0], gauss_args[1]), 0)

    # global Histogram Equalization
    global_histeq = cv2.equalizeHist(gray)

    # bilateralFilter
    bilat = cv2.bilateralFilter(gray, bilat_args[0], bilat_args[1],
                                bilat_args[2])

    # total list of individual filters
    filtered = [gray, global_histeq, gauss, bilat]

    # perform Canny edge detection on all filters
    for i in range(len(filtered)):
        edged.append(cv2.Canny(filtered[i], LOW_edge, HIGH_edge))
        edged[i] = cv2.dilate(edged[i], None, iterations=1)
        edged[i] = cv2.erode(edged[i], None, iterations=1)
        # cv2.imshow("dilated" + str(i) + str(num), edged[i])
    cv2.imshow("dilated" + str(3), edged[current_filter])

    # findContours
    index = current_filter
    cnts = cv2.findContours(edged[index].copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    if len(cnts) is not 0:

        # (cnts, _) = contours.sort_contours(cnts)
        orig = image.copy()
        big_box = find_box(cnts, max_area, image)

        bags = []
        for c in cnts:
            if cv2.contourArea(c) < min_area or cv2.contourArea(c) > max_area:
                continue

            type, min_rect = detect_type(big_box, orig, c, lowH, lowS, lowV,
                                         upH, upS, upV, thresh, lowblueH,
                                         lowblueS, lowblueV, upperblueH,
                                         upperblueS, upperblueV, bluethresh)
            # important!!!!
            # print(type)
            if type is not 'box':
                bags.append([type, min_rect])
        cv2.imshow("orig", orig)
        return bags, big_box, orig
예제 #60
0
def bi_demo(image):
    dst = cv.bilateralFilter(image, 0, 100, 2)
    cv.imshow('bi_demo', dst)