예제 #1
0
def bwCompare(img, src):
	# compares a strictly black/white image
	# to a strictly black/white source
	# returns a percentage of match
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

	# we want to compare black areas
	# black - off, white - on
	# so it's easier to invert, then invert again at end
	img = invert(img)
	src = invert(src)

	(imgRows, imgCols) = img.shape
	(srcRows, srcCols) = src.shape

	# resize the images
	cmpHeight = min(imgRows, srcRows)
	cmpWidth = min(imgCols, srcCols)

	cmpImg = cv2.resize(img, (cmpWidth, cmpHeight))
	cmpSrc = cv2.resize(src, (cmpWidth, cmpHeight))
	#cmpImg = np.resize(cmpImg, (cmpHeight, cmpWidth))
	#cmpSrc = np.resize(cmpSrc, (cmpHeight, cmpWidth))

	comparison = cv2.bitwise_and(cmpImg, cmpSrc)
	similar1 = cv2.bitwise_xor(invert(comparison), cmpSrc)
	similar2 = cv2.bitwise_xor(invert(comparison), cmpImg)
	similar = (np.mean(similar1) + np.mean(similar2)) / 2
	return similar / 255, cmpImg, cmpSrc, comparison
예제 #2
0
    def getPoly(self):
        
        self.image = self.image[0:][300:]
        imgray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        ret,thresh = cv2.threshold(imgray,0,255,0)
        
        #cv2.bitwise_not(thresh)
        height = thresh.shape[0]
        width = thresh.shape[1]
        tempImage = np.copy(thresh)
        fillPoint = None
        for x in range(height - 1, 0, -1):
            currPixel = thresh[x][width / 2]
            if currPixel.all()  != 0:
                 fillPoint = ((width/ 2), x)
                 print fillPoint
                 break
        dim = (height + 2, width + 2)
        mask = np.zeros(dim, dtype=np.uint8)
        
        #Produces nothing if the fill point is used
        #If (0, 0) is used it fills in noise
        cv2.floodFill(thresh, mask, (0, 0), 255)
        cv2.imshow("filledImage", thresh)

        #removes most noise from the thresholded image
        noiseRemoved = cv2.bitwise_xor(thresh, tempImage)
        
        #Dilates in order to remove more noise
        cv2.dilate(noiseRemoved, np.ones((4,4), dtype=np.uint8), noiseRemoved, (-1, -1), 1)
        
        cv2.imshow("f", noiseRemoved)
예제 #3
0
def logical_xor(img1, img2, device, debug=None):
    """Join two images using the bitwise XOR operator.

    Inputs:
    img1   = image object1, grayscale
    img2   = image object2, grayscale
    device = device number. Used to count steps in the pipeline
    debug  = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device = device number
    merged = joined image

    :param img1: numpy array
    :param img2: numpy array
    :param device: int
    :param debug: str
    :return device: int
    :return merged: numpy array
    """

    device += 1
    merged = cv2.bitwise_xor(img1, img2)
    if debug == 'print':
        print_image(merged, (str(device) + '_xor_joined.png'))
    elif debug == 'plot':
        plot_image(merged, cmap='gray')
    return device, merged
예제 #4
0
def doCapture(name_video, name_class, name_lesson, period):

	if not os.path.isdir("./" + name_class + "/"):
		os.mkdir("./" + name_class + "/")
	if not os.path.isdir("./" + name_class + "/" + name_lesson + "/"):
		os.mkdir("./" + name_class + "/" + name_lesson + "/")

	camera = cv2.VideoCapture(name_video)
	_, frame1 = camera.read()
	_, frame2 = camera.read()
	temp = cv2.bitwise_xor(frame1, frame1)
	
	cc = list()
	picCount = 0
	framecount = 0
	
	while True:
		frame1 = frame2
		for i in range(frm):
			_, frame2 = camera.read()

		if not _:
			break
		
		gray1 = gray(frame1)
		gray2 = gray(frame2)
		dframe = cv2.absdiff(gray1, gray2)
		
		(_, mask) = cv2.threshold(dframe, 5, 255, cv2.THRESH_BINARY)
		mask = cv2.dilate(mask, None, iterations = 6)

		contours, hierarcy = cv2.findContours(mask, 1, 2) 

		cc.append(contours)
		if len(cc) > 5: cc.pop(0)
		
		for contours in cc:
			for c in contours:
				x, y, w, h = cv2.boundingRect(c)
				cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)
		
		new = cv2.bitwise_and(frame2, frame2, mask = cv2.bitwise_not(mask))
		old = cv2.bitwise_and(temp, temp, mask = mask)
		temp = cv2.add(new, old)
		
		cv2.imshow('original video', frame1)
		cv2.imshow('delta frame', dframe)
		cv2.imshow('mask', mask)
		cv2.imshow('record', temp)

		framecount = framecount + 1
		if framecount == (fps*period/frm):
			cv2.imwrite((name_class+'/'+name_lesson+'/'+'save{0}.jpg').format(picCount), temp)
			framecount = 0
			picCount = picCount + 1
			print('Captured...')
			
		cv2.waitKey(1)
		
	camera.release()
예제 #5
0
파일: views.py 프로젝트: tocttou/djtest
    def XOR(self):
        image_path1 = Filter.save_image(
            self.parents_node[0]['key'],
            self.dir_id,
            self.parents_node[0]['src']
        )
        image_path2 = Filter.save_image(
            self.parents_node[1]['key'],
            self.dir_id,
            self.parents_node[1]['src']
        )
        if image_path1 and image_path2:
            temp_save_path = '/tmp/{}/{}.{}'.format(
                self.dir_id,
                self.node_data_array[self.node_index]['key'],
                image_path1[1]
            )

            # function goes here
            img1 = cv2.imread(image_path1[0])
            img2 = cv2.imread(image_path2[0])
            img = cv2.bitwise_xor(img1, img2)
            # function ends here

            return self.make_new_src(
                temp_save_path,
                image_path1[1],
                img
            )
        else:
            return False
예제 #6
0
파일: test_cuda.py 프로젝트: ArkaJU/opencv
    def test_cudaarithm_logical(self):
        npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
        npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)

        self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
                                         cv.bitwise_or(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
                                         cv.bitwise_and(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
                                         cv.bitwise_xor(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
                                         cv.bitwise_not(npMat1)))

        self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
                                         cv.min(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
                                         cv.max(npMat1, npMat2)))
	def setBorderTestClickImage(self, imageListList):

		borderList=[]
		borderImg=[]
		
		for imageList in imageListList:
			tmp0 = None
			for image in imageList:
				tmp1 = self.processImage(cv2.imread(image))
				tmp1 = cv2.bitwise_xor(self.image_ori,tmp1)
				if tmp0 == None:
					tmp0 = tmp1
				else:
					tmp0 = cv2.bitwise_or(tmp0,tmp1)
			borderImg.append(tmp0)

		borderList.append(self.detectBorder(borderImg[0],["y0","y1"]))
		borderList.append(self.detectBorder(borderImg[1],["x0","x1"]))
		borderList.append(self.detectBorder(borderImg[2],["y1"]))
		borderList.append(self.detectBorder(borderImg[3],["x0"]))

		self.x0=borderList[3]["x0"]
		self.y0=borderList[0]["y0"]
		self.x1=borderList[1]["x1"]
		self.y1=borderList[2]["y1"]
		self.cellW=borderList[1]["x1"]-borderList[1]["x0"]
		self.cellH=borderList[0]["y1"]-borderList[0]["y0"]
		
		self.puzzleWidth=(self.x1-self.x0+(self.cellW/2))/self.cellW
		self.puzzleHeight=(self.y1-self.y0+(self.cellH/2))/self.cellH

		self.cellXList=[]
		self.cellXmList=[]
		for xx in xrange(self.puzzleWidth):
			tmp = (self.x0*(self.puzzleWidth-xx)+self.x1*xx)/self.puzzleWidth
			self.cellXList.append(tmp)
			self.cellXmList.append(tmp+self.cellW/2)
		self.cellYList=[]
		self.cellYmList=[]
		for yy in xrange(self.puzzleHeight):
			tmp = (self.y0*(self.puzzleHeight-yy)+self.y1*yy)/self.puzzleHeight
			self.cellYList.append(tmp)
			self.cellYmList.append(tmp+self.cellH/2)
		
		self.puzzle={}
		self.puzzle["width"]=self.puzzleWidth
		self.puzzle["height"]=self.puzzleHeight
		self.puzzle["cellListList"]=[]
		for yy in xrange(self.puzzleHeight):
			cellList=[]
			for xx in xrange(self.puzzleWidth):
				cellDetectPointList = self.getCellDetectPointList(xx,yy)
				v=0
				u=1
				for cellDetectPoint in cellDetectPointList:
					if self.image_ori[cellDetectPoint["y"],cellDetectPoint["x"]] >= 0x7f:
						v |= u
					u <<= 1
				cellList.append(v)
			self.puzzle["cellListList"].append(cellList)
예제 #8
0
def main():
    basePath = "../data/"

    imageFileOne = basePath + "4.1.04.tiff"
    imageFileTwo = basePath + "4.1.05.tiff"

    imageOne = cv2.imread(imageFileOne, 1)
    imageTwo = cv2.imread(imageFileTwo, 1)

    imageOneRGB = cv2.cvtColor(imageOne, cv2.COLOR_BGR2RGB)
    imageTwoRGB = cv2.cvtColor(imageTwo, cv2.COLOR_BGR2RGB)

    negativeImage = cv2.bitwise_not(imageOneRGB)
    andImage = cv2.bitwise_and(imageOneRGB, imageTwoRGB)
    orImage = cv2.bitwise_or(imageOneRGB, imageTwoRGB)
    xorImage = cv2.bitwise_xor(imageOneRGB, imageTwoRGB)

    imageNames = [imageOneRGB, imageTwoRGB, negativeImage, andImage, orImage, xorImage]
    imageTitles = ["Image One", "Image Two", "Negative", "AND", "OR", "XOR"]

    for i in range(6):
        plt.subplot(2, 3, i + 1)
        plt.imshow(imageNames[i])
        plt.title(imageTitles[i])
        plt.xticks([])
        plt.yticks([])

    plt.show()
예제 #9
0
    def object_comparisson(self, roi):
        # Hago una comparacion bit a bit de la imagen original
        # Compara solo en la zona de la máscara y deja 0's en donde hay
        # coincidencias y 255's en donde no coinciden
        xor = cv2.bitwise_xor(self.object_roi(), roi, mask=self.object_mask())

        # Cuento la cantidad de 0's y me quedo con la mejor comparacion
        return cv2.countNonZero(xor)
예제 #10
0
	def transform(self, img):
		transformation = FireMask()
		res = transformation.transform(img)
		if self.previousImg != None:
			mask = cv2.bitwise_xor(res, self.previousImg)
		else:
			mask = res
		self.previousImg = res
		return mask
예제 #11
0
def logical_xor(img1, img2, device, debug=False):
    # Join two images using the bitwise XOR operator
    # img1, img2 = image objects, grayscale
    # device = device number. Used to count steps in the pipeline
    # debug = True/False. If True, print image
    device += 1
    merged = cv2.bitwise_xor(img1, img2)
    if debug:
        print_image(merged, (str(device) + "_xor_joined.png"))
    return device, merged
예제 #12
0
def get_mask(character, border_limit):

    mask = np.zeros(character.shape, np.uint8)

    upper = character[0:22, 0:128]
    # upper_rgb = character_rgb[0:22, 0:128]
    lower = character[42:64, 0:128]
    # lower_rgb = character_rgb[42:64, 0:128]



    ret, thresh_upper = cv2.threshold(upper, 127, 255, 0)
    contours_upper, hierarchy = cv2.findContours(thresh_upper,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    ret, thresh_lower = cv2.threshold(lower, 127, 255, 0)
    contours_lower, hierarchy = cv2.findContours(thresh_lower,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours_upper:
        # Height coordinates of the contour
        val = 128
        for cn in cnt:
            if cn[:, 1][0] < val:
                val = cn[:, 1][0]

        if val> (22-border_limit):
            if cv2.contourArea(cnt) < 20 or 0 < cv2.arcLength(cnt, False) < 15:
                        # cv2.drawContours(upper_rgb, [cnt], 0, (0, 255, 0), -1)
                        cv2.drawContours(mask, [cnt], 0, 255, -1)

    for cnt in contours_lower:
       # Height coordinates of the contour
        val1 = 0
        for cn in cnt:

            if cn[:, 1][0] > val1:
                val1 = cn[:, 1][0]

        if val1 < border_limit:
            if cv2.contourArea(cnt)<20 or 0<cv2.arcLength(cnt, False)<15:
                        # cv2.drawContours(lower_rgb,[cnt],0,(0,255,0),-1)
                        cv2.drawContours(mask[42:64, 0:128],[cnt],0,255,-1)



    character = cv2.bitwise_xor(character, mask)


    # cv2.imshow("char", character)
    # # cv2.imshow("upper", lower)
    # # # cv2.imshow("mask", mask1)
    # cv2.imwrite("C:/Users/Naleen/Desktop/New folder (2)/character.jpg", character)
    # # cv2.imwrite("C:/Users/Naleen/Desktop/New folder (2)/lower.jpg", lower)
    # # cv2.imwrite("C:/Users/Naleen/Desktop/New folder (2)/mask.jpg", mask)
    # #
    # cv2.waitKey(0)
    return character
예제 #13
0
    def compareImage(self, img1, img2): # vergelijk de afbeeldingen
        minDiff = 12000

        diffImg = cv2.bitwise_xor(img1, img2)
        kernal = numpy.ones((5,5),numpy.uint8)
        diffImg = cv2.erode(diffImg, kernal, iterations = 1)
        diff = cv2.countNonZero(diffImg)
        if diff < 12000:
            return None
        else:
            return diff
예제 #14
0
    def comparisson(self, roi):
        # Hago una comparacion bit a bit de la imagen original
        # Compara solo en la zona de la máscara y deja 0's en donde hay
        # coincidencias y 255's en donde no coinciden
        past_obj_roi = self._descriptors['frame']

        mask = self._descriptors['mask']

        xor = cv2.bitwise_xor(past_obj_roi, roi, mask=mask)

        # Cuento la cantidad de 0's y me quedo con la mejor comparacion
        return cv2.countNonZero(xor)
def motion_detect(frame):
    previous_frame = previous_frame_manager.get(len(frame[0]), (None, None,))[0]
    previous_previous_frame = previous_frame_manager.get(len(frame[0]), (None, None,))[1]
    return_frame = None
    if previous_previous_frame is not None:
        d1 = cv2.absdiff(frame, previous_frame)
        d2 = cv2.absdiff(previous_frame, previous_previous_frame)
        return_frame = cv2.bitwise_xor(d1, d2)
    previous_previous_frame = previous_frame
    previous_frame = frame
    previous_frame_manager[len(frame[0])] = (previous_frame, previous_previous_frame, )
    return return_frame
예제 #16
0
def compute_similarity(img1, img2):
    rest = cv2.bitwise_xor(img1, img2)
    height, width = img1.shape
    pixels = height * width
    diffs = 0
    for x in range(width):
        for y in range(height):
            if rest[y, x] != 0:
                diffs += 1

    similarity = (pixels - diffs) / float(pixels)

    return similarity
예제 #17
0
	def callback(self,data):
		try:
			cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
		except CvBridgeError as e:
			print(e)

		cv_image = cv2.flip(cv_image,0)
		self.h,self.w,self._ = cv_image.shape
		self.roi_w = 30
		self.roi_h = 30

		# Rectangle for ROI
		cv2.rectangle(cv_image,(int(self.w/2.0 - self.roi_w/2.0), self.h - self.roi_h),(int(self.w/2.0 + self.roi_w/2.0), self.h),(0,0,255),3) 
		self.image = cv_image
		self.roi_norm = self.get_roi(cv_image)
		self.hsv_roi = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
		self.dst = cv2.calcBackProject([self.hsv_roi],[0,1],self.roi_norm,[0,180,0,256],1)
		self.term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

		# Now convolute with circular disc
		self.disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
		cv2.filter2D(self.dst,-1,self.disc,self.dst)

		# OTSU Algorithm
		self.ret,self.thresh = cv2.threshold(self.dst,127,192,0+cv2.THRESH_OTSU)
		self.thresh_copy = self.thresh.copy()

		#Performing Erosion and Dilation
		self.kernel = np.ones((9,9),np.uint8)
		self.thresh = cv2.erode(self.thresh, self.kernel, iterations = 1)
		self.thresh = cv2.dilate(self.thresh, self.kernel, iterations=1)

		self.thresh = cv2.merge((self.thresh,self.thresh,self.thresh))
		self.roi_norm = cv2.GaussianBlur(self.dst,(5,5),0)
		self.res = cv2.bitwise_xor(self.image,self.thresh)

		# Code to draw the contours and fill them in
		self.contours, self.hierarchy = cv2.findContours(self.thresh_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		self.cnts = sorted(self.contours, key = cv2.contourArea, reverse = True)[:5]
		cv2.drawContours(self.res, self.cnts, -1,(0,255,0),2)
		cv2.fillPoly(self.res,self.contours,(100,200,100))

		cv2.imshow('frame',self.res)
		cv2.waitKey(3)
		
		try:
			self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
		except CvBridgeError as e:
			print(e)

		rospy.loginfo('callback triggered')
예제 #18
0
def arithmeticOps():
    """
    Performs different arithmetic operations on two input image
    :return:
    """
    day = cv2.imread("*****@*****.**")
    h1,w1 = day.shape[:2]
    night = cv2.imread("*****@*****.**")
    night = cv2.resize(night, (w1,h1)) #Resize the image

    plt.subplot(2,4,1)
    plt.imshow(day)
    plt.title("Mumbai@Day")

    plt.subplot(2,4,2)
    plt.imshow(night)
    plt.title("Mumbai@Night")


    plt.subplot(2,4,3)
    added = cv2.add(day, night)
    plt.imshow(added)
    plt.title("Addition")

    plt.subplot(2,4,4)
    subtract = cv2.subtract(day, night)
    plt.imshow(subtract)
    plt.title("Subtract")

    plt.subplot(2,4, 5)
    bitwise_and = cv2.bitwise_and(night,day)
    plt.imshow(bitwise_and)
    plt.title("bitwise_and")

    plt.subplot(2,4, 6)
    bitwise_not = cv2.bitwise_not(night)
    plt.imshow(bitwise_not)
    plt.title("bitwise_not")

    plt.subplot(2,4, 7)
    bitwise_or = cv2.bitwise_or(night,day)
    plt.imshow(bitwise_or)
    plt.title("bitwise_or")

    plt.subplot(2,4, 8)
    bitwise_xor = cv2.bitwise_xor(night,day)
    plt.imshow(bitwise_xor)
    plt.title("bitwise_xor")
    plt.show()
예제 #19
0
    def object_comparisson(self, roi):
        """
        Asumiendo que queda en blanco la parte del objeto que estamos
        buscando, comparo la cantidad de blancos entre el objeto guardado y
        el objeto que se esta observando.
        """
        # Calculo la máscara del pedazo de imagen que estoy mirando
        roi_mask = self.calculate_mask(roi)

        # Hago una comparacion bit a bit de la imagen original
        # Compara solo en la zona de la máscara y deja 0's en donde hay
        # coincidencias y 255's en donde no coinciden
        xor = cv2.bitwise_xor(self.object_mask(), roi_mask, mask=self.object_mask())

        # Cuento la cantidad de nros distintos a 0 y me quedo con la mejor comparacion
        return cv2.countNonZero(xor)
예제 #20
0
def _get_motion_detection_frame(curr_min2, curr_min1, frame):
        d1 = cv2.absdiff(frame, curr_min1)
        d2 = cv2.absdiff(curr_min1, curr_min2)
        motion_detection_frame = cv2.bitwise_xor(d1, d2)

        # cv2.imshow('frame', frame)
        # cv2.imshow('curr_min1', curr_min1)
        # cv2.imshow('curr_min2', curr_min2)
        # cv2.imshow('d1', d1)
        # cv2.imshow('d2', d2)

        # time.sleep(1)
        # Remove any single white dots (background movements)
        motion_detection_frame  = cv2.erode(motion_detection_frame, KERNEL, iterations = 1)
        #  motion_detection_frame  = cv2.dilate(motion_detection_frame, KERNEL, iterations = 1)
        return motion_detection_frame
    def bitwiseOp(self):
        # read image
        im = cv2.imread(self.Image)

        # first, let's draw a rectangle
        rectangle = np.zeros((300, 300), dtype = "uint8")
        cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
        cv2.imshow("Rectangle", rectangle)

        # secondly, let's draw a circle
        circle = np.zeros((300, 300), dtype = "uint8")
        cv2.circle(circle, (150, 150), 150, 255, -1)
        cv2.imshow("Circle", circle)

        # A bitwise 'AND' is only True when both rectangle and circle have
        # a value that is 'ON.' Simply put, the bitwise AND function
        # examines every pixel in rectangle and circle. If both pixels
        # have a value greater than zero, that pixel is turned 'ON' (i.e
        # set to 255 in the output image). If both pixels are not greater
        # than zero, then the output pixel is left 'OFF' with a value of 0.
        bitwiseAnd = cv2.bitwise_and(rectangle, circle)
        cv2.imshow("AND", bitwiseAnd)
        cv2.waitKey(0)

        # A bitwise 'OR' examines every pixel in rectangle and circle. If
        # EITHER pixel in rectangle or circle is greater than zero, then
        # the output pixel has a value of 255, otherwise it is 0.
        bitwiseOr = cv2.bitwise_or(rectangle, circle)
        cv2.imshow("OR", bitwiseOr)
        cv2.waitKey(0)

        # The bitwise 'XOR' is identical to the 'OR' function, with one
        # exception: both rectangle and circle are not allowed to BOTH
        # have values greater than 0.
        bitwiseXor = cv2.bitwise_xor(rectangle, circle)
        cv2.imshow("XOR", bitwiseXor)
        cv2.waitKey(0)

        # Finally, the bitwise 'NOT' inverts the values of the pixels. Pixels
        # with a value of 255 become 0, and pixels with a value of 0 become
        # 255.
        bitwiseNot = cv2.bitwise_not(circle)
        cv2.imshow("NOT", bitwiseNot)

        cv2.waitKey(0)
        return
예제 #22
0
def test_carpet(input_path):
    img = cv2.imread(input_path, cv2.IMREAD_GRAYSCALE)
    cv2.threshold(img, thresh=0, maxval=255,
                  type=cv2.THRESH_OTSU | cv2.THRESH_BINARY,
                  dst=img)
    # cv2.imshow('img', img)
    _, b1 = largest_blob(img)
    _, b2 = largest_contour_blob(img)
    cv2.imshow('b1', b1)
    cv2.imshow('b2', b2)
    cv2.imshow('db', cv2.bitwise_xor(b1, b2))
    cv2.waitKey()
    cv2.destroyAllWindows()
    return

    # ret, blob = largest_blob(img)
    ret, blob = True, img
    if ret:
        # cv2.imshow('blob', blob)

        ker = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(11, 11))
        eroded = cv2.morphologyEx(blob, op=cv2.MORPH_ERODE, kernel=ker)
        eroded = cv2.copyMakeBorder(eroded, 10, 10, 10, 10,
                                    cv2.BORDER_CONSTANT,
                                    value=0)

        # cv2.imshow('erode', eroded)

        ret, eroded = largest_contour_blob(eroded)
        # cv2.imshow('blob_erode', eroded)

        dilated = cv2.morphologyEx(eroded, op=cv2.MORPH_DILATE, kernel=ker)
        # cv2.imshow('blob_dilate', dilated)

        ker = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(101, 101))
        closed = cv2.morphologyEx(dilated, op=cv2.MORPH_CLOSE, kernel=ker)
        cv2.imshow(input_path + '_blob_closed', closed)
    cv2.waitKey()
    cv2.destroyAllWindows()
예제 #23
0
def logical_xor(bin_img1, bin_img2):
    """Join two images using the bitwise XOR operator.

    Inputs:
    bin_img1   = Binary image data to be compared to bin_img2
    bin_img2   = Binary image data to be compared to bin_img1

    Returns:
    merged     = joined binary image

    :param bin_img1: numpy.ndarray
    :param bin_img2: numpy.ndarray
    :return merged: numpy.ndarray
    """

    params.device += 1
    merged = cv2.bitwise_xor(bin_img1, bin_img2)
    if params.debug == 'print':
        print_image(merged, os.path.join(params.debug_outdir, str(params.device) + '_xor_joined.png'))
    elif params.debug == 'plot':
        plot_image(merged, cmap='gray')
    return merged
def diffImg(t0, t1, t2, img):
  width = len(img[0])
  height = len(img)
  number_of_changes = 0
  d1 = cv2.absdiff(t2, t1)
  d2 = cv2.absdiff(t1, t0)
  result = cv2.bitwise_xor(d1, d2)
  cv2.threshold(result, 40, 255, cv2.THRESH_BINARY, result)
  min_x = width
  max_x = 0
  min_y = height
  max_y = 0
  
#POSZUKIWANIE WSPÓ£RZÊDNYCH DO WYCIÊCIA RUCHU..
  for i in range(0,height,10):
      for j in range(0,width,10):
          if result[i,j]==255:
              number_of_changes=number_of_changes+1
              if min_x > j:
                  min_x = j
              if max_x < j:
                  max_x = j
              if min_y > i:
                  min_y = i
              if max_y < i:
                  max_y = i
  if number_of_changes>0:
    if min_x-10 > 0:
      min_x -= 10
    if min_y-10 > 0:
      min_y -= 10
    if max_x+10 < width-1:
      max_x += 10
    if max_y+10 < height-1:
      max_y += 10
    roi = img[min_y:max_y, min_x:max_x]       #OBCINANIE KLATKI

  return roi #FUNKCJA ZWRACA WYCIÊTY RUCH
def getIrisUsingThreshold(gray,pupil):
    ''' Given a gray level image and pupil cluster return a list of iris locations(threshold for iris)'''
    gray2=np.copy(gray)
    #Resize for faster performance
    smallI = cv2.resize(gray, (40,40))
    M,N = smallI.shape
    #Generate coordinates in a matrix
    X,Y = np.meshgrid(range(M),range(N))
    #Make coordinates and intensity into one vectors
    z = smallI.flatten()
    x = X.flatten()
    y = Y.flatten()
    O = len(x)
    #make a feature vectors containing (x,y,intensity)
    features = np.zeros((O,3))
    features[:,0] = z;
    features[:,1] = y/2; #Divide so that the distance of position weighs less than intensity
    features[:,2] = x/2;
    features = np.array(features,'f')
    # cluster data
    centroids,variance = kmeans(features,3)
    centroids.sort(axis = 0) # Sorting clusters according to intensity (ascending)
    irisPupilCluster = centroids[1]

    #   inverted threshold irisPupilCluster (pupil and iris white)
    val,binIrisPupil = cv2.threshold(gray2, irisPupilCluster[0], 255, cv2.THRESH_BINARY_INV)
    #   normal threshold pupilCluster (pupil black, iris white)
    val,binPupil =  cv2.threshold(gray2, pupil, 255, cv2.THRESH_BINARY)

    irisPupilCluster=cv2.cvtColor(binIrisPupil, cv2.COLOR_GRAY2RGB)
    binPupil=cv2.cvtColor(binPupil, cv2.COLOR_GRAY2RGB)

    #   bitwise xor (exactly one should be white - result iris black)
    iris=cv2.bitwise_xor(irisPupilCluster,binPupil)
    #   invert the colors of the resulting image (black to white iris)
    iris=255-iris
    return iris
예제 #26
0
blank = np.zeros((400,400), dtype='uint8')

rect = cv.rectangle(blank.copy(), (30,30), (370,370), 255, -1)
cir = cv.circle(blank.copy(), (200,200), 200, 255, -1)

cv.imshow('rectangle', rect)
cv.imshow('circle', cir)


#1. AND
bitand = cv.bitwise_and(rect, cir)
cv.imshow('AND', bitand)

#2. OR
bitor = cv.bitwise_or(rect, cir)
cv.imshow('OR', bitor)

#3. XOR(non-intersecting regions of the two figures)
bitxor = cv.bitwise_xor(rect, cir)
cv.imshow('XOR', bitxor)

#4. NOT
bitnot = cv.bitwise_not(rect)
cv.imshow('NOT', bitnot)





cv.waitKey(0)
예제 #27
0
 def bit_xor():
     # 원본이미지와 지문부분만 검정색으로 칠한 이미지를 Xor연산(같은 색일 경우 검정색으로 다른색은 흰색)
     go_xor = cv2.bitwise_xor(draw_circle()[1], frame)
     # medianblur사용해서 지문 부위만 흐림효과 추가
     go_xor = cv2.medianBlur(go_xor, 3)
     return go_xor
예제 #28
0
        gray = cv2.GaussianBlur(gray, (3, 3), 0)

        mask = cv2.inRange(hsv, lower, upper)

        fundo = cv2.bitwise_and(fundo, fundo, mask=mask)

        res = cv2.bitwise_and(frame, frame, mask=mask)
        res = cv2.medianBlur(res, 5)

        norm = cv2.threshold(cv2.cvtColor(res, cv2.COLOR_BGR2GRAY), 50, 255,
                             1)[1]
        norm = np.invert(norm)
        norm = cv2.dilate(norm, None, iterations=1)

        edged = cv2.erode(norm, None, iterations=1)
        res2 = cv2.bitwise_xor(frame, cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR))
        res2 = cv2.bitwise_or(frame, res2)

        final1 = cv2.hconcat([frame, hsv])
        final2 = cv2.hconcat([fundo, fundo + res2])
        final3 = cv2.vconcat([final1, final2])

        cv2.imshow('Resultado', final3)

        c = cv2.waitKey(1)
        if c == ord('q'):
            break

    cv2.destroyAllWindows()
예제 #29
0
def lineCleaner(pageCut, lineInterval, numberPage):
    pageCopy = pageCut.copy()

    for i in range(0, len(lineInterval)):
        if i == 0:
            lineSection = pageCopy[lineInterval[0][0]:lineInterval[i +
                                                                   1][0], :]
        elif i == len(lineInterval) - 1:
            lineSection = pageCopy[lineInterval[-2][1]:lineInterval[-1][1], :]
        else:
            lineSection = pageCopy[lineInterval[i -
                                                1][1]:lineInterval[i + 1][0] +
                                   15, :]
        #if i == 0:
        #    lineSection = pageCopy[0:lineInterval[i + 1][0], :]
        #else:
        #    lineSection = pageCopy[lineInterval[i - 1][1]:lineInterval[i + 1][0]+20, :]

        lineCopy = lineSection.copy()
        output = cv.connectedComponentsWithStats(lineCopy)
        prop = regionprops(output[1])
        if i < len(lineInterval) - 1:
            limitSet = int((lineInterval[i + 1][0] - lineInterval[i][1]) / 4)
        else:
            limitSet = int((lineInterval[i][0] - lineInterval[i][1]) / 4)
        boundingBox = []

        for x in range(0, len(prop)):
            py, px = prop[x].centroid

            limitLine = lineInterval[i][1] + limitSet if not i else \
                lineInterval[i][1] + limitSet - lineInterval[i - 1][1]

            #if i == 0:
            #    limitLine = lineInterval[i][1] + limitSet
            #else:
            #    limitLine = lineInterval[i][1] + limitSet - lineInterval[i - 1][1]

            if py >= limitLine:
                currentSection = lineCopy[prop[x].bbox[0]:prop[x].bbox[2],
                                          prop[x].bbox[1]:prop[x].bbox[3]]
                convexHull = prop[x].convex_image
                X, Y = convexHull.shape
                for m in range(X):
                    for n in range(Y):
                        if convexHull[m][n] == 1:
                            currentSection[m][n] = 0
            else:
                boundingBox.append(prop[x].bbox)

        if i == 0:
            pageCopy[0:lineInterval[1, 0], :] = cv.bitwise_xor(
                pageCopy[0:lineInterval[1, 0], :], lineCopy)
        elif i == len(lineInterval) - 1:
            pageCopy[lineInterval[i - 1, 1]:lineInterval[
                i, 1], :] = cv.bitwise_xor(
                    pageCopy[lineInterval[i - 1, 1]:lineInterval[i, 1], :],
                    lineCopy)
        else:
            pageCopy[lineInterval[i - 1][1]:lineInterval[i + 1, 0] +
                     15, :] = cv.bitwise_xor(
                         pageCopy[lineInterval[i -
                                               1][1]:lineInterval[i + 1, 0] +
                                  15, :], lineCopy)

        #Setting words inside the line
        wordInLine(boundingBox, lineCopy, i, numberPage)

        #Write in folder, all lines in page
        linePath = './lines/line_' + str(i + 1) + '.png'
        cv.imwrite(linePath, lineCopy)
예제 #30
0
def line_detection(img_ori, img_gray, iterations=3):
    """
    Summary line.
    using series of cv2 methods, detect if there's combination of lines that possible create a image then decide wether that image is a table or not by counting the inner rectangle.

    Parameters:
    img_ori : return from cv2.imread
    img_gray : return from cv2.imread from grayscale
    iterations (int) : iteration to do erode and dilatation  

    Return:
    tables : list of list of integer, contain boundingboxes of table detected with format of [[x1,y1,w1,h1],[x2,y2,w2,h2],...]
    non_table : list of list of integer, contain boundingboxes of non_table detected with format of [[x1,y1,w1,h1],[x2,y2,w2,h2],...]
    """
    filename = ''
    df_boxes_outer_all = pd.DataFrame()
    df_line_horizontals = pd.DataFrame(columns=['filename', 'x1', 'x2', 'y'])
    df_line_verticals = pd.DataFrame(columns=['filename', 'y1', 'y2', 'x'])
    image = img_ori
    img = image
    gray = img_gray
    height_, width_ = gray.shape
    #thresholding the image to a binary image
    thresh, img_bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY
                                    | cv2.THRESH_OTSU)  # inverting the image
    img_bin = 255 - img_bin
    kernel_len = np.array(img).shape[1] // 100
    # Defining a horizontal kernel to detect all horizontal lines of image
    ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
    hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                           (kernel_len, 1))  # A kernel of 2x2
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
    # Use vertical kernel to detect and save the vertical lines in a jpg
    image_1 = cv2.erode(img_bin, ver_kernel, iterations=iterations)
    vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=iterations)
    # Use horizontal kernel to detect and save the horizontal lines in a jpg
    image_2 = cv2.erode(img_bin, hor_kernel, iterations=iterations)
    horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=iterations)
    # Eroding and thesholding the vertical lines
    img_v = cv2.erode(~vertical_lines, kernel, iterations=iterations)
    thresh, img_v = cv2.threshold(img_v, 128, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    kernel = np.ones((2, 2), np.uint8)
    img_v = cv2.erode(img_v, kernel, iterations=iterations)
    # Eroding and thesholding the horizontal lines
    img_h = cv2.erode(~horizontal_lines, kernel, iterations=2)
    thresh, img_h = cv2.threshold(img_h, 128, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    gray = img_h
    # All Lines
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)
    minLineLength = 100
    lines = cv2.HoughLinesP(image=edges,
                            rho=0.02,
                            theta=np.pi / 500,
                            threshold=10,
                            lines=np.array([]),
                            minLineLength=minLineLength,
                            maxLineGap=100)
    if lines is None:
        lines_detected = False
    else:
        lines_detected = True
    horizontal_detected = False
    if (lines_detected):
        tolerance = 5
        # Horizontal Only
        horizontal_lines = [
            list(line[0]) for line in lines
            if (abs(line_angle(line[0])) > 180 - tolerance) and (
                abs(line_angle(line[0])) < 180 + tolerance)
        ]
        horizontal_detected = len(horizontal_lines) > 0
        if (horizontal_detected):
            df_horizontal = pd.DataFrame(horizontal_lines,
                                         columns=['x1', 'y1', 'x2', 'y2'])
            x1x2 = [
                list(x)
                for x in df_horizontal.apply(switchHigherLowerHorizontal,
                                             axis=1)
            ]
            df_horizontal[['x1', 'x2']] = x1x2
            df_horizontal.sort_values(['y1', 'x1'], inplace=True)
            df_horizontal.reset_index(drop=True, inplace=True)
            y_th = 20
            separate_line_index = df_horizontal[
                df_horizontal.diff()['y1'] > y_th].index.tolist()
            separate_line_index = [0] + separate_line_index + [
                df_horizontal.shape[0] - 1
            ]
            line_index = []
            for i in range(len(separate_line_index) - 1):
                for j in range(separate_line_index[i],
                               separate_line_index[i + 1]):
                    line_index.append(i)
            line_index_df = pd.DataFrame(line_index, columns=['line_index'])
            df_h = pd.concat([line_index_df, df_horizontal], axis=1)
            df_h.fillna(method='ffill', inplace=True)
            df_h_sort = pd.DataFrame(columns=df_h.columns)
            indexes = df_h['line_index'].unique()
            for index in indexes:
                df_temp = df_h[df_h['line_index'] == index].sort_values('x1')
                df_h_sort = pd.concat([df_h_sort, df_temp], axis=0)
            df_h = df_h_sort
            df_h.reset_index(drop=True, inplace=True)
            h_lines = list(df_h['line_index'].unique())
            line_no = 1
            df_line_no = pd.DataFrame(columns=['line_no'])
            for h_line in h_lines:
                line_no_list = []
                df_line_no_temp = pd.DataFrame(columns=['line_no'])
                df_temp = df_h[df_h['line_index'] == h_line]
                df_temp_x_sort = df_temp.sort_values('x1').reset_index(
                    drop=True)
                max_x = df_temp_x_sort['x2'][0]
                min_column_width = 200
                for i in range(df_temp_x_sort.shape[0]):
                    if (df_temp_x_sort['x1'][i] <= max_x + min_column_width):
                        line_no_list.append(line_no)
                        if (max_x < df_temp_x_sort['x2'][i]):
                            max_x = df_temp_x_sort['x2'][i]
                    else:
                        line_no += 1
                        line_no_list.append(line_no)
                        max_x = df_temp_x_sort['x2'][i]
                df_line_no_temp['line_no'] = line_no_list
                df_line_no = pd.concat([df_line_no, df_line_no_temp], axis=0)
                line_no += 1
            df_line_no.reset_index(drop=True, inplace=True)
            df_h_final = pd.concat([df_h, df_line_no], axis=1)
            line_no = list(df_h_final['line_no'].unique())
            img_temp = img
            df_line_horizontal = pd.DataFrame(
                columns=['filename', 'x1', 'x2', 'y'])
            for line in line_no:
                x1 = df_h_final[df_h_final['line_no'] == line]['x1'].min()
                x2 = df_h_final[df_h_final['line_no'] == line]['x2'].max()
                y = int(df_h_final[df_h_final['line_no'] == line]['y1'].mean())
                cv2.line(img_temp, (x1, y), (x2, y), (0, 0, 255), 3,
                         cv2.LINE_AA)
                df_line_horizontal.loc[df_line_horizontal.shape[0]] = [
                    filename, x1, x2, y
                ]
            df_line_horizontals = pd.concat(
                [df_line_horizontals, df_line_horizontal], axis=0)
            df_line_horizontals.reset_index(inplace=True, drop=True)

    img = image
    gray = img_v
    # All Lines
    edges = cv2.Canny(gray, 225, 250, apertureSize=3)
    minLineLength = 50
    lines = cv2.HoughLinesP(image=edges,
                            rho=0.02,
                            theta=np.pi / 500,
                            threshold=10,
                            lines=np.array([]),
                            minLineLength=minLineLength,
                            maxLineGap=100)
    # Vertical Only
    tolerance = 5
    vertical_detected = False
    if lines is None:
        lines_detected = False
    else:
        lines_detected = True
    if (lines_detected):
        vertical_lines = [
            list(line[0]) for line in lines
            if (abs(line_angle(line[0])) > 90 - tolerance) and (
                abs(line_angle(line[0])) < 90 + tolerance)
        ]
        vertical_detected = len(vertical_lines) > 0
        if (vertical_detected):
            vertical_detected = len(lines) > 0
            df_vertical = pd.DataFrame(vertical_lines,
                                       columns=['x1', 'y1', 'x2', 'y2'])
            y1y2 = [
                list(x)
                for x in df_vertical.apply(switchHigherLowerVertical, axis=1)
            ]
            df_vertical[['y1', 'y2']] = y1y2
            df_vertical.sort_values(['x1', 'y2'], inplace=True)
            df_vertical.reset_index(drop=True, inplace=True)
            x_th = 20
            separate_line_index = df_vertical[
                df_vertical.diff()['x1'] > x_th].index.tolist()
            separate_line_index = [0] + \
                separate_line_index+[df_vertical.shape[0]-1]
            line_index = []
            for i in range(len(separate_line_index) - 1):
                for j in range(separate_line_index[i],
                               separate_line_index[i + 1]):
                    line_index.append(i)
            line_index_df = pd.DataFrame(line_index, columns=['line_index'])
            df_v = pd.concat([line_index_df, df_vertical], axis=1)
            df_v.fillna(method='ffill', inplace=True)
            df_v_sort = pd.DataFrame(columns=df_v.columns)
            indexes = df_v['line_index'].unique()
            for index in indexes:
                df_temp = df_v[df_v['line_index'] == index].sort_values('y2')
                df_v_sort = pd.concat([df_v_sort, df_temp], axis=0)
            df_v = df_v_sort
            df_v.reset_index(drop=True, inplace=True)
            v_lines = list(df_v['line_index'].unique())
            line_no = 1
            df_line_no = pd.DataFrame(columns=['line_no'])
            for v_line in v_lines:
                line_no_list = []
                df_line_no_temp = pd.DataFrame(columns=['line_no'])
                df_temp = df_v[df_v['line_index'] == v_line]
                df_temp_y_sort = df_temp.sort_values('y2').reset_index(
                    drop=True)
                max_y = df_temp_y_sort['y1'][0]
                min_row_width = 100
                for i in range(df_temp_y_sort.shape[0]):
                    if (df_temp_y_sort['y2'][i] <= max_y + min_row_width):
                        line_no_list.append(line_no)
                        if (max_y < df_temp_y_sort['y1'][i]):
                            max_y = df_temp_y_sort['y1'][i]
                    else:
                        line_no += 1
                        line_no_list.append(line_no)
                        max_y = df_temp_y_sort['y1'][i]
                df_line_no_temp['line_no'] = line_no_list
                df_line_no = pd.concat([df_line_no, df_line_no_temp], axis=0)
                line_no += 1
            df_line_no.reset_index(drop=True, inplace=True)
            df_v_final = pd.concat([df_v, df_line_no], axis=1)
            line_no = list(df_v_final['line_no'].unique())
            img_temp = img
            df_line_vertical = pd.DataFrame(
                columns=['filename', 'y1', 'y2', 'x'])
            for line in line_no:
                y1 = int(df_v_final[df_v_final['line_no'] == line]['y1'].max())
                y2 = int(df_v_final[df_v_final['line_no'] == line]['y2'].min())
                x = int(df_v_final[df_v_final['line_no'] == line]['x1'].mean())
                cv2.line(img_temp, (x, y1), (x, y2), (0, 0, 255), 3,
                         cv2.LINE_AA)
                df_line_vertical.loc[df_line_vertical.shape[0]] = [
                    filename, y1, y2, x
                ]
            df_line_verticals = pd.concat(
                [df_line_verticals, df_line_vertical], axis=0)
            df_line_verticals.reset_index(inplace=True, drop=True)

    img = image
    # Horizontal Line
    if (horizontal_detected):
        for i in range(df_line_horizontal.shape[0]):
            df_temp = df_line_horizontal.loc[i]
            x1, x2, y = df_temp[['x1', 'x2', 'y']].values
            cv2.line(img, (x1, y), (x2, y), (0, 0, 255), 3, cv2.LINE_AA)
    # Vertical Line
    if (vertical_detected):
        for i in range(df_line_vertical.shape[0]):
            df_temp = df_line_vertical.loc[i]
            y1, y2, x = df_temp[['y1', 'y2', 'x']].values
            cv2.line(img, (x, y1), (x, y2), (0, 0, 255), 3, cv2.LINE_AA)

    blank_image = np.zeros(shape=list(image.shape), dtype=np.uint8)
    blank_image.fill(255)
    df_line_horizontal = df_line_horizontals[df_line_horizontals['filename'] ==
                                             filename]
    df_line_vertical = df_line_verticals[df_line_verticals['filename'] ==
                                         filename]
    df_line_horizontal.reset_index(drop=True, inplace=True)
    df_line_vertical.reset_index(drop=True, inplace=True)
    for i in range(df_line_horizontal.shape[0]):
        df_temp = df_line_horizontal.loc[i]
        x1, x2, y = df_temp[['x1', 'x2', 'y']].values
        cv2.line(blank_image, (x1, y), (x2, y), (0, 0, 0), 3, cv2.LINE_AA)
    for i in range(df_line_vertical.shape[0]):
        df_temp = df_line_vertical.loc[i]
        y1, y2, x = df_temp[['y1', 'y2', 'x']].values
        cv2.line(blank_image, (x, y1), (x, y2), (0, 0, 0), 3, cv2.LINE_AA)

    # find the contours of rectangle from the line outline
    img_vh = cv2.cvtColor(blank_image, cv2.COLOR_BGR2GRAY)
    img = img_gray
    bitxor = cv2.bitwise_xor(img, img_vh)
    # Detect contours for following box detection
    contours = cv2.findContours(img_vh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    # Sort all the contours by top to bottom.
    contours, boundingBoxes = sort_contours(contours, method="top-to-bottom")

    # Retrieve Cell Position
    # Creating a list of heights for all detected boxes
    heights = [boundingBoxes[i][3]
               for i in range(len(boundingBoxes))]  # Get mean of heights
    mean = np.mean(heights)
    BoundingBoxes = [[filename] + list(boundingBox)
                     for boundingBox in list(boundingBoxes)]
    df_boxes = pd.DataFrame(BoundingBoxes,
                            columns=['filename', 'x', 'y', 'w', 'h'])
    df_boxes_copy = df_boxes.copy()
    h_max = 0.95 * img.shape[0]
    h_min = height_ // 50
    w_min = width_ // 50
    df_boxes_content = df_boxes[(df_boxes['h'] < h_max)
                                & (df_boxes['h'] > height_ // 100) &
                                (df_boxes['w'] > width_ // 100)]
    content_index = df_boxes_content.index
    # Table Detection
    df_boxes = df_boxes[(df_boxes['h'] < h_max) & (df_boxes['h'] > h_min) &
                        (df_boxes['w'] > w_min)]
    boxes_index = df_boxes.index
    # Remove cell inside another cell
    skip_inside_box_index_from_zero = []
    skip_inside_box_index = []
    for i in range(df_boxes.shape[0] - 1):
        if i not in skip_inside_box_index_from_zero:
            for j in range(i + 1, df_boxes.shape[0]):
                A = df_boxes.values[i][1:]
                B = df_boxes.values[j][1:]
                if (check_B_inside_A(A, B)):
                    skip_inside_box_index_from_zero.append(j)
                    skip_inside_box_index.append(boxes_index[j])
                elif (check_B_inside_A(B, A)):
                    skip_inside_box_index_from_zero.append(i)
                    skip_inside_box_index.append(boxes_index[i])
    df_boxes_outer = df_boxes[~df_boxes.index.isin(skip_inside_box_index)]
    df_boxes_outer_all = pd.concat([df_boxes_outer_all, df_boxes_outer],
                                   axis=0)
    df_boxes_final = df_boxes_outer
    table = []
    non_table = []
    count_table = 0
    count_nontable = 0
    # count the inner rectangle of each outer box
    for i in range(df_boxes_outer.shape[0]):
        df_temp = df_boxes_outer.values[i]
        # save image
        x = df_temp[1]
        y = df_temp[2]
        w = df_temp[3]
        h = df_temp[4]
        ############### COUNT INNER RECT FOR EACH OUTER BOX ############
        start_index = df_boxes_outer.index[i]
        if (i == df_boxes_outer.shape[0] - 1):
            end_index = content_index[-1]
        else:
            end_index = df_boxes_outer.index[i + 1]
        scan_index = [
            content for content in content_index
            if content > start_index and content < end_index
        ]
        rects_inside_number = 0
        for index in scan_index:
            A = df_boxes_outer.values[i][1:]
            B = df_boxes_content.loc[index].values[1:]
            if (check_B_inside_A(A, B)):
                rects_inside_number += 1
        threshold_table = 5  # if inner_rect>threshold_table -> table, vice versa
        if (rects_inside_number >= threshold_table):
            table.append([])
            table[count_table] = [int(x), int(y), int(w), int(h)]
            count_table += 1
        else:
            non_table.append([])
            non_table[count_nontable] = [int(x), int(y), int(w), int(h)]
            count_nontable += 1
    return (table, non_table)
예제 #31
0
def processTables(image, count):

    img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #thresholding the image to a binary image
    #thresh,img_bin = cv2.threshold(img,128,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    #imgWarpGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    #img_bin= cv2.adaptiveThreshold(img, 255, 1, 1, 7, 2)
    img_bin = cv2.adaptiveThreshold(~img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 91, -2)
    #img_bin = cv2.bitwise_not(img_bin)
    horizontal = img_bin.copy()
    vertical = img_bin.copy()
    # Specify size on horizontal axis
    scale = my_utils.valScale()
    #print(horizontal.shape[0])
    #print("X")
    #print(horizontal.shape[1])

    horizontalsize = int(horizontal.shape[1] / scale)
    # Create structure element for extracting horizontal lines through morphology operations
    horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                    (horizontalsize, 1))
    # Apply morphology operations
    horizontal = cv2.erode(horizontal, horizontalStructure, (-1, -1))
    horizontal = cv2.dilate(horizontal, horizontalStructure, (-1, -1))
    #dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1)); // expand horizontal lines
    #Show extracted horizontal lines
    #imshow("horizontal", horizontal)
    #Specify size on vertical axis
    verticalsize = int(vertical.shape[0] / scale)
    # Create structure element for extracting vertical lines through morphology operations
    verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                  (1, verticalsize))
    #Apply morphology operations
    vertical = cv2.erode(vertical, verticalStructure, (-1, -1))
    vertical = cv2.dilate(vertical, verticalStructure, (-1, -1))
    #dilate(vertical, vertical, verticalStructure, Point(-1, -1)); // expand vertical lines
    # Show extracted vertical lines
    #imshow("vertical", vertical);

    # create a mask which includes the tables
    mask = horizontal + vertical
    newsize = vertical.shape[0] / vertical.shape[1]
    #print(newsize)
    cv2.imwrite("scanned/mask" + str(count) + ".jpg", mask)
    #maskRes = cv2.resize(mask, ( 800,round(newsize*800)))
    #cv2.imshow("TABLE MASK", maskRes)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
    img_vh = cv2.addWeighted(vertical, 0.5, horizontal, 0.5,
                             0.0)  #Eroding and thesholding the image
    img_vh = cv2.erode(~img_vh, kernel, iterations=2)
    thres = my_utils.valTableTrackbars()
    thresh, img_vh = cv2.threshold(img_vh, thres[0], thres[1],
                                   cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    bitxor = cv2.bitwise_xor(img, img_vh)
    bitnot = cv2.bitwise_not(bitxor)
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    print(hierarchy)
    #image = cv2.drawContours(image, contours, -1, (0,255,0), 3)
    #joints = cv2.bitwise_and(horizontal, vertical)
    #jointsRes = cv2.resize(joints, ( 800,round(newsize*800)))
    #cv2.imshow("JOINTS",jointsRes)
    #contours, hierarchy = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
    imgWithContours = img.copy()
    key = cv2.waitKey(1)
    if key == ord('q'):
        cv2.destroyAllWindows()
        sys.exit()
    if key == ord('c'):
        # cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),(1100, 350), (0, 255, 0), cv2.FILLED)
        #cv2.putText(stackedImage, "Se esta procesando la tabla!", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
        #cv2.imshow("PROCESAMIENTO DE TABLA",stackedImage)
        # Sort all the contours by top to bottom.
        #contours, boundingBoxes = sort_contours(contours, method="top-to-bottom")

        #Creating a list of heights for all detected boxes
        #heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]

        #Get mean of heights
        #mean = np.mean(heights)

        #Create list box to store all boxes in
        box = []
        # Get position (x,y), width and height for every contour and show the contour on image
        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            if (w < 2000 and h < 700):
                #print("box1:"+x+','+w+','+y+','+h)
                image = cv2.rectangle(image, (x, y), (x + w, y + h),
                                      random_color(), -1)
                box.append([x, y, w, h])
                cv2.imshow("CELL DETECTED", img[y:y + h, x:x + w])
                cv2.waitKey(0)
            else:

                image = cv2.rectangle(image, (x, y), (x + w, y + h),
                                      random_color(), 3)
                print("box1:" + str(x) + ',' + str(y) + ',' + str(w) + ',' +
                      str(h))
                print("Esto no es celda")
        imageRsz = cv2.resize(image, (800, round(newsize * 800)))
        cv2.imwrite("scanned/cells" + str(count) + ".jpg", image)
        cv2.imshow("CELLS", imageRsz)

    # SAVE IMAGE WHEN 's' key is pressed
    if key == ord('a'):
        # cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),(1100, 350), (0, 255, 0), cv2.FILLED)
        #cv2.putText(stackedImage, "Se esta procesando la tabla!", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
        #cv2.imshow("PROCESAMIENTO DE TABLA",stackedImage)
        # Sort all the contours by top to bottom.
        contours, boundingBoxes = sort_contours(contours,
                                                method="top-to-bottom")

        #Creating a list of heights for all detected boxes
        heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]

        #Get mean of heights
        mean = np.mean(heights)

        #Create list box to store all boxes in
        box = []
        # Get position (x,y), width and height for every contour and show the contour on image
        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            if (w < 2000 and h < 700 and w > 10):
                #print("box1:"+x+','+w+','+y+','+h)
                image = cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                box.append([x, y, w, h])
                cv2.imshow("CELL DETECTED", img[y:y + h, x:x + w])
                cv2.waitKey(0)

        plotting = plt.imshow(image, cmap='gray')
        plt.show()

        #Creating two lists to define row and column in which cell is located
        row = []
        column = []
        j = 0
        print("Ordenando las cajas por filas y columnas....")
        #Sorting the boxes to their respective row and column
        for i in range(len(box)):

            if (i == 0):
                column.append(box[i])
                previous = box[i]

            else:
                if (box[i][1] <= previous[1] + mean / 2):
                    column.append(box[i])
                    previous = box[i]

                    if (i == len(box) - 1):
                        row.append(column)

                else:
                    row.append(column)
                    column = []
                    previous = box[i]
                    column.append(box[i])

        print(column)
        print(row)

        print("Calculando el numero de celdas....")
        #calculating maximum number of cells
        countcol = 0
        for i in range(len(row)):
            countcol = len(row[i])
            if countcol > countcol:
                countcol = countcol

        #Retrieving the center of each column
        center = [
            int(row[i][j][0] + row[i][j][2] / 2) for j in range(len(row[i]))
            if row[0]
        ]

        center = np.array(center)
        center.sort()
        print(center)
        #Regarding the distance to the columns center, the boxes are arranged in respective order

        finalboxes = []
        for i in range(len(row)):
            lis = []
            for k in range(countcol):
                lis.append([])
            for j in range(len(row[i])):
                diff = abs(center - (row[i][j][0] + row[i][j][2] / 4))
                minimum = min(diff)
                indexing = list(diff).index(minimum)
                lis[indexing].append(row[i][j])
            finalboxes.append(lis)

        print("Reconociendo el texto en cada una de las celdas....")
        #from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
        outer = []
        for i in range(len(finalboxes)):
            for j in range(len(finalboxes[i])):
                inner = ''
                if (len(finalboxes[i][j]) == 0):
                    outer.append(' ')
                else:
                    for k in range(len(finalboxes[i][j])):
                        y, x, w, h = finalboxes[i][j][k][0], finalboxes[i][j][
                            k][1], finalboxes[i][j][k][2], finalboxes[i][j][k][
                                3]
                        finalimg = bitnot[x:x + h, y:y + w]
                        kernel = cv2.getStructuringElement(
                            cv2.MORPH_RECT, (2, 1))
                        border = cv2.copyMakeBorder(finalimg,
                                                    2,
                                                    2,
                                                    2,
                                                    2,
                                                    cv2.BORDER_CONSTANT,
                                                    value=[255, 255])
                        resizing = cv2.resize(border,
                                              None,
                                              fx=2,
                                              fy=2,
                                              interpolation=cv2.INTER_CUBIC)
                        dilation = cv2.dilate(resizing, kernel, iterations=1)
                        erosion = cv2.erode(dilation, kernel, iterations=2)

                        out = pytesseract.image_to_string(erosion, lang='spa')
                        if (len(out) == 0):
                            out = pytesseract.image_to_string(erosion,
                                                              config='--psm 3',
                                                              lang='spa')
                        inner = inner + " " + out
                    outer.append(inner)

        #Creating a dataframe of the generated OCR list
        arr = np.array(outer)
        dataframe = pd.DataFrame(arr.reshape(len(row), countcol))
        print(dataframe)
        data = dataframe.style.set_properties(align="left")
        #Converting it in a excel-file
        data.to_excel("scanned/output.xlsx")

    #my_utils.drawRectangle(imgBigContour,biggest,2)
    imageArray = ([
        cv2.resize(img, (fixedwidthImg, fixedheightImg)),
        cv2.resize(img_bin, (fixedwidthImg, fixedheightImg)),
        cv2.resize(vertical, (fixedwidthImg, fixedheightImg)),
        cv2.resize(horizontal, (fixedwidthImg, fixedheightImg))
    ], [
        cv2.resize(mask, (fixedwidthImg, fixedheightImg)),
        cv2.resize(img_vh, (fixedwidthImg, fixedheightImg)),
        cv2.resize(bitxor, (fixedwidthImg, fixedheightImg)),
        cv2.resize(image, (fixedwidthImg, fixedheightImg))
    ])

    #cv2.imwrite("scanned/Original"+str(count)+".jpg",img)
    #cv2.imwrite("scanned/Warp-Prespective"+str(count)+".jpg",imgWarpColored)
    # LABELS FOR DISPLAY
    lables = [["Original", "Binary", "Vertical", "Horizontal"],
              ["Mask", "BITXOR", "BITXOR", "Contours"]]

    stackedImage = my_utils.stackImages(imageArray, 0.75, lables)
    cv2.imshow("PROCESAMIENTO DE TABLA", stackedImage)
예제 #32
0
#
img1[:, :200] = 255  # 오른쪽은 검정색(0), 왼쪽은 흰색(255)
img2[100:200, :] = 255  # 위는 검정색(0), 아래는 흰색(255)

print(img1)
print(img2)
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)

# -- ② 비트와이즈 연산
# 두 이미지의 255 영역이 만나는 부분
bitAnd = cv2.bitwise_and(img1, img2)
# 255 영역은 모두
bitOr = cv2.bitwise_or(img1, img2)
# 겹치는 영역은 0 안겹치는 영역은 255
bitXor = cv2.bitwise_xor(img1, img2)
# image1의 반대결과
bitNot = cv2.bitwise_not(img1)

cv2.imshow('bitand', bitAnd)
cv2.imshow('bitOr', bitOr)
cv2.imshow('bitXor', bitXor)
cv2.imshow('bitNot', bitNot)

cv2.waitKey(0)
cv2.destroyAllWindows()
# #--③ Plot으로 결과 출력
# imgs = {'img1':img1, 'img2':img2, 'and':bitAnd,
#           'or':bitOr, 'xor':bitXor, 'not(img1)':bitNot}
# for i, (title, img) in enumerate(imgs.items()):
#     plt.subplot(3,2,i+1)
import cv2
import numpy as np

img1 = np.zeros((512,512,3), np.uint8)
img2 = cv2.rectangle(img1,(200,0), (300,100), (255,255,255), -1)
img2 = cv2.imread("lena.jpg")

#bitAnd = cv2.bitwise_and(img2,img1)
#bitAnd = cv2.bitwise_or(img2,img1)
#bitAnd = cv2.bitwise_not(img2,img1)
bitAnd = cv2.bitwise_xor(img2,img1)

cv2.imshow("img1",img1)
cv2.imshow("img2", img2)
cv2.imshow("bitAnd", bitAnd)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #34
0
import cv2
import numpy as np

img1 = np.zeros((250, 500, 3), np.uint8)
img2 = np.zeros((250, 500, 3), np.uint8)
img3 = cv2.imread("images/messi5.jpg")

img1 = cv2.rectangle(img1, (200, 0), (300, 100), (255, 255, 255), -1)
img2 = cv2.rectangle(img2, (250, 0), (500, 250), (255, 255, 255), -1)

and_op = cv2.bitwise_and(img1, img2)
or_op = cv2.bitwise_or(img1, img2)
xor_op = cv2.bitwise_xor(img1, img2)
not_op = cv2.bitwise_not(img1)

cv2.imshow("img1", img1)
cv2.imshow("img2", img2)
# cv2.imshow("and_op", and_op)
# cv2.imshow("or_op", or_op)
# cv2.imshow("xor_op", xor_op)
# cv2.imshow("not_op", not_op)
cv2.imshow("not_messi", cv2.bitwise_not(img3))

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #35
0

# Bitwise operators
blank=np.zeros((500,500),dtype='uint8')
rect=cv2.rectangle(blank.copy(),(100,100),(400,400),255,-1)
# cv2.imshow('Rectangle',rect)
circ=cv2.circle(blank.copy(),(250,250),200,255,-1)
# cv2.imshow('Circle',circ)
# AND (intersection)
btwand=cv2.bitwise_and(rect,circ)
# cv2.imshow('Bitwise AND',btwand)
# OR (Union)
btwor=cv2.bitwise_or(rect,circ)
# cv2.imshow('Bitwise OR',btwor)
# XOR (Complement)
btwxor=cv2.bitwise_xor(rect,circ)
# cv2.imshow('Bitwise XOR',btwxor)
# NOT (Reverse)
btwnot=cv2.bitwise_not(rect)
# cv2.imshow('Bitwise NOT',btwnot)


# Masking
blank=np.zeros(df.shape[:2],dtype='uint8')
circ=cv2.circle(blank.copy(),(df.shape[1]//2,df.shape[0]//2),100,255,-1)
rect=cv2.rectangle(blank.copy(),(100,100),(df.shape[1]//2,df.shape[0]//2),255,-1)
mask=cv2.bitwise_xor(rect,circ)
# cv2.imshow('Mask',mask)
masked=cv2.bitwise_and(df,df,mask=mask)
# cv2.imshow('Masked',masked)
예제 #36
0
success, frame = cap.read()
last=frame
last = simp(frame)
caught=False
dim=(640,480)
codec = cv2.cv.CV_FOURCC('D','I','V','X')
mdetect = cv2.VideoWriter('output.avi',codec, 24.0, dim)

while True:
    success, frame = cap.read()
    if success != True:
        break

    scaled = simp(frame)
    #frame = cv2.resize(frame, (640,480), interpolation=cv2.INTER_NEAREST)
    diff = cv2.bitwise_xor(scaled, last)

    _, thresh = cv2.threshold(diff, 127, 255, cv2.THRESH_BINARY)
    mag=np.count_nonzero(thresh)
    
    cv2.imshow("diff", cv2.resize(thresh, (640,480)))
    if mag > 30: 
        if not caught:
            print "changed", mag
            caught=True
            cv2.imshow("changed", frame)
        mdetect.write(frame)
    else:
        #print "reset"
        caught=False
    #last=scaled.copy()
예제 #37
0
import cv2

img1 = cv2.imread('1.png')
img2 = cv2.imread('2.png')
cv2.imshow('1', img1)
cv2.imshow('2', img2)

# mask is an 8 bit array adding a mask ig

cv2.imshow('and', cv2.bitwise_and(img1, img2, mask=None))
cv2.imshow('or', cv2.bitwise_or(img1, img2, mask=None))
cv2.imshow('xor', cv2.bitwise_xor(img1, img2, mask=None))
cv2.imshow('not', cv2.bitwise_not(img1))

k = cv2.waitKey(0) and 0XFF
if (k == 27):
    cv2.destroyAllWindows()
예제 #38
0
import cv2
import numpy as np

image1 = cv2.imread('flag.png')
image2 = cv2.imread('lemur.png')
dest_xor = cv2.bitwise_xor(image1, image2, mask=None)
cv2.imshow('Bitwise XOR', dest_xor)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
예제 #39
0
    def compare(self, pattern, images, settings):
        frame_height, frame_width = images[COLOR_IMAGE].shape[:2]
        frame_mask = np.zeros((frame_height, frame_width), np.uint8)
        pattern_height, pattern_width = pattern.image.shape[0:2]
        selected_split = settings.selected_split()
        points = selected_split.split(images) if selected_split else [
            (0, frame_height, 0, frame_width)
        ]
        for y1, y2, x1, x2 in points:
            pcb_gray = np.array(images[GRAY_IMAGE][y1:y2, x1:x2])
            pcb_bin = np.array(images[BIN_IMAGE][y1:y2, x1:x2])
            pcb_height, pcb_width = pcb_gray.shape[0:2]

            pcb_keypoints, pcb_descriptors = self.extract_key_points_and_descriptors(
                pcb_gray)
            matches = self.extract_matches(pattern.descriptors,
                                           pcb_descriptors)
            #img3 = cv2.drawMatches(pattern.image,pattern.keypoints,pcb_bin,pcb_keypoints,matches, flags=2, outImg = None)
            #plt.imshow(img3), plt.show()
            pattern_points = np.float32([
                pattern.keypoints[m.queryIdx].pt for m in matches
            ]).reshape(-1, 2)
            pcb_points = np.float32([
                pcb_keypoints[m.trainIdx].pt for m in matches
            ]).reshape(-1, 2)

            M_pcb_translate = np.float32([[1, 0, x1], [0, 1, y1]])
            M_pcb_to_pattern = self.ransac(pcb_points,
                                           pattern_points,
                                           iters=1000,
                                           maxerror=2)
            M_pattern_to_pcb = cv2.invertAffineTransform(M_pcb_to_pattern)
            M_xor_to_frame = self.add_affine_transform(M_pattern_to_pcb,
                                                       M_pcb_translate)

            transformed_pcb = cv2.warpAffine(pcb_bin, M_pcb_to_pattern,
                                             (pattern_width, pattern_height))
            _, transformed_pcb = cv2.threshold(transformed_pcb, 127, 255,
                                               cv2.THRESH_BINARY)
            #plt.subplot(1,3,1)
            #plt.title("Orig"), plt.imshow(pcb_bin, 'gray', interpolation='none')
            #plt.subplot(1,3,2)
            #plt.title("transformed_pcb"), plt.imshow(transformed_pcb, 'gray', interpolation='none')
            #plt.subplot(1,3,3)
            #plt.title("pattern"), plt.imshow(pattern.image, 'gray', interpolation='none'), plt.show()

            xor_img = cv2.bitwise_xor(transformed_pcb, pattern.image)
            frame_mask = cv2.bitwise_or(
                cv2.warpAffine(xor_img, M_xor_to_frame,
                               (frame_width, frame_height)), frame_mask)

        _, frame_mask = cv2.threshold(frame_mask, 127, 255, cv2.THRESH_BINARY)

        #plt.title("After xor"), plt.imshow(frame_mask, 'gray', interpolation='none'), plt.show()

        frame_mask = self.morphology_opening(frame_mask, (20, 20))
        #plt.title("After morphology"), plt.imshow(frame_mask, 'gray', interpolation='none'), plt.show()

        images[OUT_IMAGE] = BIN_IMAGE
        images[BIN_IMAGE] = frame_mask
        return images
예제 #40
0
import numpy as np
import cv2 as cv

blank = np.zeros((400, 400), dtype='uint8')

rectangle = cv.rectangle(blank.copy(), (30, 30), (370, 370), 255, -1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, -1)

bitAND = cv.bitwise_and(rectangle, circle)
bitXOR = cv.bitwise_xor(rectangle, circle)
bitN = cv.bitwise_not(rectangle)
# OR available.

cv.imshow("bitAND", bitAND)
cv.imshow("bittOR", bitN)
cv.waitKey(0)
예제 #41
0
def get_current_value(path):
    t1 = cv2.getTickCount()
    img = cv2.imread(path)
    '''
        This function should be run using a test image in order to calibrate the range available to the dial as well as the
        units.  It works by first finding the center point and radius of the gauge.  Then it draws lines at hard coded intervals
        (separation) in degrees.  It then prompts the user to enter position in degrees of the lowest possible value of the gauge,
        as well as the starting value (which is probably zero in most cases but it won't assume that).  It will then ask for the
        position in degrees of the largest possible value of the gauge. Finally, it will ask for the units.  This assumes that
        the gauge is linear (as most probably are).
        It will return the min value with angle in degrees (as a tuple), the max value with angle in degrees (as a tuple),
        and the units (as a string).
    '''
    #img = cv2.imread('gauge-%s.%s' %(gauge_number, file_type))
    #cv2.imshow("Image", img)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    print("img shape = %s" % (img.shape, ))
    height, width = img.shape[:2]
    img_output = img.copy()

    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  #convert to gray scale
    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-gray.jpg', img_gray)

    # Set threshold
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    bright = np.average(v)
    threshold_bin = 0.24 * bright + 68
    img_black = np.zeros(img.shape[0:2], dtype=np.uint8)
    img_white = np.full(img.shape[0:2], 255, dtype=np.uint8)
    th, img_bin = cv2.threshold(img_gray, threshold_bin, 255,
                                cv2.THRESH_BINARY_INV)
    cv2.imwrite(
        os.path.dirname(path) + '/' +
        os.path.splitext(os.path.basename(path))[0] + '-thresholded.jpg',
        img_bin)

    #detect circles
    #restricting the search from 35-48% of the possible radius gives fairly good results across different samples.  Remember that
    #these are pixel values which correspond to the possible radius search range.
    # cv2.HoughCircles(img, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) → circles

    circles = cv2.HoughCircles(img_gray, cv2.HOUGH_GRADIENT, 1,
                               int(width * 0.3), np.array([]), 100, 50,
                               int(width * 0.2), int(width * 0.5))
    #circles = cv2.HoughCircles(img_gray, cv2.HOUGH_GRADIENT, 1, int(height*0.1), np.array([]), 100, 50, int(height*0.35), int(height*0.45))
    ###circles = cv2.HoughCircles(img_gray, cv2.HOUGH_GRADIENT, 1, 20, np.array([]), 100, 50, int(height * 0.35), int(height * 0.48))
    # average found circles, found it to be more accurate than trying to tune HoughCircles parameters to get just the right one
    print("circles: %s" % (circles, ))

    # ensure at least some circles were found
    if len(circles):
        a, b, c = circles.shape
        # convert the (x, y) coordinates and radius of the circles to integers
        circles = np.round(circles[0, :]).astype("int")

        # loop over the (x, y) coordinates and radius of the circles
        for (x, y, r) in circles:
            # draw the circle in the output image, then draw a rectangle
            # corresponding to the center of the circle
            cv2.circle(img_output, (x, y), r, (0, 0, 255), 1)
            cv2.rectangle(img_output, (x - 1, y - 1), (x + 1, y + 1),
                          (0, 0, 255), cv2.FILLED)
        #x,y,r = avg_circles(circles, b)
        # b=1 => выбираем первую окружность !!!"
        x = circles[0][0]
        y = circles[0][1]
        r = circles[0][2]
        # draw circle with center
        cv2.circle(img_output, (x, y), r, (0, 0, 255), 4)  # draw circle
        cv2.circle(img_output, (x, y), 2, (0, 0, 255),
                   cv2.FILLED)  # draw center of circle

    # уточняем координаты центра манометра путем поиска окружности малого радиуса вблизи уже найденного центра
    x_min = x - 150
    y_min = y - 150
    img_crop = img_gray[y - 150:y + 150, x - 150:x + 150]
    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-crop.jpg', img_crop)
    small_circles = cv2.HoughCircles(img_crop, cv2.HOUGH_GRADIENT, 1, 100,
                                     np.array([]), 50, 20, 20, 150)
    # ensure at least some small circles were found
    if len(small_circles):
        # convert the (x, y) coordinates and radius of the circles to integers
        small_circles = np.round(small_circles[0, :]).astype("int")

        # loop over the (x, y) coordinates and radius of the circles
        for (x0, y0, r0) in small_circles:
            # draw the circle in the output image, then draw a rectangle
            # corresponding to the center of the circle
            cv2.circle(img_output, (x_min + x0, y_min + y0), r0, (0, 255, 0),
                       1)
            cv2.rectangle(img_output, (x_min + x0 - 1, y_min + y0 - 1),
                          (x_min + x0 + 1, y_min + y0 + 1), (0, 255, 0),
                          cv2.FILLED)
        # b=1 => выбираем первую окружность !!!"
        x0 = small_circles[0][0]
        y0 = small_circles[0][1]
        r0 = small_circles[0][2]

        # draw small circle with center
        x0 = x_min + x0
        y0 = y_min + y0
        cv2.circle(img_output, (x0, y0), r0, (0, 255, 0), 4)  # draw circle
        cv2.circle(img_output, (x0, y0), 3, (0, 255, 0),
                   cv2.FILLED)  # draw center of circle

    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-circles.jpg', img_output)

    ##################################
    ### ПОИСК СТРЕЛКИ
    ##################################
    ArrowL = 0
    ArrowAngle1 = 0
    Arrow_x1 = 0
    Arrow_y1 = 0
    Arrow_x2 = 0
    Arrow_y2 = 0
    img_Arrow = img.copy()  # цветное изображение
    angle_step = 0.5  # step 1 degree
    sector_r1 = []
    sector_r2 = []
    for alfa in np.arange(0, 360, angle_step):
        temp = np.array(  # формируем сектор
            [[
                x0 + 3 * np.cos(alfa * np.pi / 180) -
                r * np.sin(alfa * np.pi / 180), y0 +
                3 * np.sin(alfa * np.pi / 180) + r * np.cos(alfa * np.pi / 180)
            ],
             [
                 x0 - 3 * np.cos(alfa * np.pi / 180) -
                 r * np.sin(alfa * np.pi / 180),
                 y0 - 3 * np.sin(alfa * np.pi / 180) +
                 r * np.cos(alfa * np.pi / 180)
             ],
             [
                 x0 - 3 * np.cos(alfa * np.pi / 180),
                 y0 - 3 * np.sin(alfa * np.pi / 180)
             ],
             [
                 x0 + 3 * np.cos(alfa * np.pi / 180),
                 y0 + 3 * np.sin(alfa * np.pi / 180)
             ]]
            # [[x0 - r * np.sin((alfa + angle_step) * np.pi / 180),y0 + r*np.cos((alfa + angle_step) * np.pi / 180)],
            #  [x0 - r * np.sin(alfa*np.pi/180),y0 + r*np.cos(alfa*np.pi/180)],
            #  [x0, y0]]
        ).reshape((-1, 1, 2)).astype(np.int32)
        img_temp = cv2.drawContours(
            img_black.copy(), [temp], 0, (255, 255, 255),
            thickness=cv2.FILLED)  # black image with only 1 white sector
        img_intersection = cv2.bitwise_and(img_bin, img_temp.astype(np.uint8))
        lines = cv2.HoughLinesP(img_intersection,
                                1,
                                np.pi / 180,
                                127,
                                np.array([]),
                                minLineLength=20,
                                maxLineGap=1)
        if (lines is not None and
                len(lines) > 0):  # если найдена хоть одна линия в пересечении
            for line in np.array(lines):
                x1, y1, x2, y2 = line[0]
                dist01 = dist_2_pts(
                    x0, y0, x1, y1)  # dist from center of circle to point 1
                dist02 = dist_2_pts(
                    x0, y0, x2, y2)  # dist from center of circle to point 2
                dist12 = dist_2_pts(x1, y1, x2, y2)  # line length
                # set diff1 to be the smaller (closest to the center) of the two), makes the math easier
                if (dist01 > dist02):
                    temp = dist01
                    dist01 = dist02
                    dist02 = temp
                    tempx = x1
                    tempy = y1
                    x1 = x2
                    y1 = y2
                    x2 = tempx
                    y2 = tempy
                # check if line is within an acceptable range
                if (dist02 - dist01 >= 0.90 * dist12):
                    if (dist12 > ArrowL):
                        ArrowL = dist12
                        ArrowAngle1 = alfa
                        Arrow_x1 = x0
                        Arrow_y1 = y0
                        Arrow_x2 = x2
                        Arrow_y2 = y2
    ArrowL = dist_2_pts(x0, y0, Arrow_x2, Arrow_y2)
    #cv2.line(img_Arrow, (Arrow_x1, Arrow_y1), (Arrow_x2, Arrow_y2), (0, 0, 255), 2)
    #take the arc tan of y/x to find the angle
    res = np.arctan(
        np.divide(np.abs(float(Arrow_x2 - x0)), np.abs(float(Arrow_y2 - y0))))

    res = np.rad2deg(res)
    if (Arrow_x2 - x0) < 0 and (Arrow_y2 - y0) > 0:  #in quadrant I
        angle_1 = res
    if (Arrow_x2 - x0) < 0 and (Arrow_y2 - y0) < 0:  #in quadrant II
        angle_1 = 180 - res
    if (Arrow_x2 - x0) > 0 and (Arrow_y2 - y0) < 0:  #in quadrant III
        angle_1 = 180 + res
    if (Arrow_x2 - x0) > 0 and (Arrow_y2 - y0) > 0:  #in quadrant IV
        angle_1 = 360 - res
    cv2.line(img_Arrow, (x0, y0),
             (x0 - int(ArrowL * np.sin(angle_1 * 3.14 / 180)),
              y0 + int(ArrowL * np.cos(angle_1 * 3.14 / 180))), (0, 0, 255), 1)

    # у точняем угол angle_1
    angle_step_presize = 0.1
    for i in np.arange(angle_1 - 5.0, angle_1 + 5.0, angle_step_presize):
        # строим дугу с радиусов 0,6*ArrowL
        r1 = img_bin[int(y0 + Arrow_r1 * ArrowL * np.cos(i * np.pi / 180)),
                     int(x0 - Arrow_r1 * ArrowL * np.sin(i * np.pi / 180))]
        sector_r1.append(r1)
        img_Arrow[int(y0 + Arrow_r1 * ArrowL * np.cos(i * np.pi / 180)),
                  int(x0 - Arrow_r1 * ArrowL * np.sin(i * np.pi / 180))] = [
                      0, 0, 255
                  ]
        # строим дугу с радиусов 0,85*ArrowL
        r2 = img_bin[int(y0 + Arrow_r2 * ArrowL * np.cos(i * np.pi / 180)),
                     int(x0 - Arrow_r2 * ArrowL * np.sin(i * np.pi / 180))]
        sector_r2.append(r2)
        img_Arrow[int(y0 + Arrow_r2 * ArrowL * np.cos(i * np.pi / 180)),
                  int(x0 - Arrow_r2 * ArrowL * np.sin(i * np.pi / 180))] = [
                      0, 0, 255
                  ]
    #plt.plot(sector_r1)
    #plt.plot(sector_r2)

    # поиск пиксела со  значением 0 в списке sector_r1
    maxL_r1, maxL_r2 = (0, 0)
    maxstartL_r1, maxstartL_r2 = (0, 0)
    maxstopL_r1, maxstopL_r2 = (0, 0)
    curL_r1, curL_r2 = (0, 0)
    curstartL_r1, curstartL_r2 = (0, 0)
    curstopL_r1, curstopL_r2 = (0, 0)
    for k in range(1, len(sector_r1), 1):  # len(sector_r1) = len(sector_r2)
        if sector_r1[k] == 255 and sector_r1[k - 1] == 0:  # начало области
            curstartL_r1 = k
            curL_r1 = 1
        if sector_r1[k] == 255 and sector_r1[k -
                                             1] == 255:  # продолжение области
            curL_r1 = curL_r1 + 1
        if sector_r1[k] == 0 and sector_r1[k - 1] == 255:  # конец области
            curstopL_r1 = k - 1
            if (curL_r1 > maxL_r1):
                maxL_r1 = curL_r1
                maxstartL_r1 = curstartL_r1
                maxstopL_r1 = curstopL_r1
        if sector_r2[k] == 255 and sector_r2[k - 1] == 0:  # начало области
            curstartL_r2 = k
            curL_r2 = 1
        if sector_r2[k] == 255 and sector_r2[k -
                                             1] == 255:  # продолжение области
            curL_r2 = curL_r2 + 1
        if sector_r2[k] == 0 and sector_r2[k - 1] == 255:  # конец области
            curstopL_r2 = k - 1
            if (curL_r2 > maxL_r2):
                maxL_r2 = curL_r2
                maxstartL_r2 = curstartL_r2
                maxstopL_r2 = curstopL_r2

    angle_r1 = angle_1 - 5.0 + (maxstartL_r1 +
                                maxstopL_r1) * 0.5 * angle_step_presize
    cv2.line(img_Arrow, (x0, y0),
             (x0 - int(ArrowL * np.sin(angle_r1 * 3.14 / 180)),
              y0 + int(ArrowL * np.cos(angle_r1 * 3.14 / 180))), (0, 255, 0),
             1)

    angle_r2 = angle_1 - 5.0 + (maxstartL_r2 +
                                maxstopL_r2) * 0.5 * angle_step_presize
    cv2.line(img_Arrow, (x0, y0),
             (x0 - int(ArrowL * np.sin(angle_r2 * 3.14 / 180)),
              y0 + int(ArrowL * np.cos(angle_r2 * 3.14 / 180))), (255, 0, 0),
             1)

    if (np.abs(angle_r1 - angle_r2) >= 0.5):
        # angle_r1 is true
        if (np.abs(angle_r1 - angle_1) < 0.5):
            final_angle = 0.5 * (angle_r1 + angle_1)
        else:
            final_angle = angle_r1
    else:
        # angle_r2 is true
        if (np.abs(angle_r2 - angle_1) < 0.5):
            final_angle = 0.5 * (angle_r2 + angle_1)
        else:
            final_angle = angle_r2

    cv2.line(img_Arrow, (x0, y0),
             (x0 - int(r * np.sin(final_angle * 3.14 / 180)),
              y0 + int(r * np.cos(final_angle * 3.14 / 180))), (255, 0, 255),
             1)
    cv2.imwrite(
        os.path.dirname(path) + '/' +
        os.path.splitext(os.path.basename(path))[0] + '-Arrow.jpg', img_Arrow)

    ##################################
    ### ПОИСК МИНИМУМА И МАКСИМУМА ШКАЛЫ
    ##################################
    # form sectors and triangles
    img_sectors = img_bin.copy()
    angle_step = 0.3
    sectors = []
    for alfa in np.arange(0, 360, angle_step):
        temp1 = np.array(
            [[
                x0 - minCtrRadius * ArrowL * np.sin(alfa * np.pi / 180),
                y0 + minCtrRadius * ArrowL * np.cos(alfa * np.pi / 180)
            ],
             [
                 x0 - minCtrRadius * ArrowL * np.sin(
                     (alfa + angle_step) * np.pi / 180),
                 y0 + minCtrRadius * ArrowL * np.cos(
                     (alfa + angle_step) * np.pi / 180)
             ],
             [
                 x0 - maxCtrRadius * ArrowL * np.sin(
                     (alfa + angle_step) * np.pi / 180),
                 y0 + maxCtrRadius * ArrowL * np.cos(
                     (alfa + angle_step) * np.pi / 180)
             ],
             [
                 x0 - maxCtrRadius * ArrowL * np.sin(alfa * np.pi / 180),
                 y0 + maxCtrRadius * ArrowL * np.cos(alfa * np.pi / 180)
             ]]).reshape((-1, 1, 2)).astype(np.int32)
        # temp2 = np.array(
        #                 [[x0+3*np.cos(alfa*np.pi/180)-maxCtrRadius*r*np.sin(alfa*np.pi/180),y0+3*np.sin(alfa*np.pi/180)+maxCtrRadius*r*np.cos(alfa*np.pi/180)],
        #                 [x0-3*np.cos(alfa*np.pi/180)-maxCtrRadius*r*np.sin(alfa*np.pi/180),y0-3*np.sin(alfa*np.pi/180)+maxCtrRadius*r*np.cos(alfa*np.pi/180)],
        #                 [x0-3*np.cos(alfa*np.pi/180),y0-3*np.sin(alfa*np.pi/180)],
        #                 [x0+3*np.cos(alfa*np.pi/180),y0+3*np.sin(alfa*np.pi/180)]]
        # [[x0, y0],
        # [x0-maxCtrRadius*r*np.sin((alfa+angle_step)*np.pi / 180),y0+maxCtrRadius*r*np.cos((alfa+angle_step)*np.pi / 180)],
        # [x0-maxCtrRadius*r*np.sin(alfa*np.pi/180),y0+maxCtrRadius*r*np.cos(alfa*np.pi/180)]]
        # ).reshape((-1, 1, 2)).astype(np.int32)
        sectors.append(temp1)
        # triangles.append(temp2)
        cv2.drawContours(img_sectors, [temp1], 0, (255, 0, 0), 1)
        # cv2.drawContours(img_triangles, [temp2], 0, (255, 0, 0), 1)
    cv2.imwrite(
        os.path.dirname(path) + '/' +
        os.path.splitext(os.path.basename(path))[0] + '-sectors.jpg',
        img_sectors)
    # cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-triangles.jpg', img_triangles)

    # find lines in tor between radius minCtrRadius and maxCtrRadius
    # make tor mask
    img_circle1 = np.zeros(img.shape[0:2], dtype=np.uint8)
    img_circle2 = np.zeros(img.shape[0:2], dtype=np.uint8)

    cv2.circle(img_circle1, (x0, y0), int(ArrowL * maxCtrRadius),
               (255, 255, 255), cv2.FILLED)  # outer white circle
    cv2.circle(img_circle2, (x0, y0), int(ArrowL * minCtrRadius),
               (255, 255, 255), cv2.FILLED)  # inner white circle
    img_tor = cv2.bitwise_xor(img_circle1, img_circle2)
    img_tor = cv2.bitwise_and(img_bin, img_tor)
    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-tor.jpg', img_tor)

    ###########################
    ## поиск всех линий шкалы в торе и вычисление их средней длины
    ###########################
    minLineLength = 20
    maxLineGap = 1
    img_Lines = img.copy()
    lines = cv2.HoughLinesP(img_tor, 3, np.pi / 180, 127, np.array([]),
                            minLineLength, maxLineGap)
    if (lines is None or len(lines) == 0):
        return
    for line in np.array(lines):
        x1, y1, x2, y2 = line[0]
        cv2.line(img_Lines, (x1, y1), (x2, y2), (0, 0, 255), 1)
    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-lines.jpg', img_Lines)
    scale_lines = []
    scale_lengths = []
    # поиск во всем массиве линий тех, которые относятся к линиям шкалы
    for i in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[i]:
            dist01 = dist_2_pts(x0, y0, x1,
                                y1)  # dist from center of circle to point 1
            dist02 = dist_2_pts(x0, y0, x2,
                                y2)  # dist from center of circle to point 2
            dist12 = dist_2_pts(x1, y1, x2, y2)  # line length
            # set diff1 to be the smaller (closest to the center) of the two), makes the math easier
            if (dist01 > dist02):
                temp = dist01
                dist01 = dist02
                dist02 = temp
            # check if line is within an acceptable range
            if (dist02 - dist01 >= 0.90 * dist12):
                scale_lines.append([x1, y1, x2, y2])
                scale_lengths.append(dist12)
    img_ScaleLines = img.copy()
    for i in range(0, len(scale_lines)):
        x1, y1, x2, y2 = scale_lines[i]
        cv2.line(img_ScaleLines, (x1, y1), (x2, y2), (0, 0, 255), 2)
    #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-ScaleLines.jpg', img_ScaleLines)
    ScaleLineLengthAv = np.average(scale_lengths)

    # перебор пересечений секторов и треугольников тора с бинарным исходным изображением
    scale_min_angle = 0
    scale_max_angle = 0
    img_All_intersections = np.zeros(img.shape[0:2], dtype=np.uint8)
    ListNumberOfSectorWP = []
    sector_min_number_of_wp = 80  # нижний порог количества белых пикселей в секторе
    sector_max_number_of_wp = 0  # максимальное значение белых пикселей в одном из секторов
    for j, s, in enumerate(sectors):
        if j > (angle_blank / angle_step) and j < (
            (360 - angle_blank) / angle_step):
            # ПОИСК минимума и максимума ШКАЛЫ
            img_temp = cv2.drawContours(
                img_black.copy(),
                sectors,
                j, (255, 255, 255),
                thickness=cv2.FILLED)  # black image with only 1 white sector
            #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-temp.jpg',img_temp)
            img_intersection = cv2.bitwise_and(
                img_bin, img_temp.astype(np.uint8)
            )  # find intersection between binary image and black image with only 1 white sector
            #cv2.imwrite(os.path.dirname(path) + '/' + os.path.splitext(os.path.basename(path))[0] + '-intersection.jpg', img_intersection)

            lines = cv2.HoughLinesP(img_intersection, 3, np.pi / 180, 127,
                                    np.array([]), minLineLength, maxLineGap)
            if (lines is not None and len(lines) >
                    0):  # если найдена хоть одна линия в пересечении
                #line_lengths = []
                maxL = 0
                for line in np.array(lines):
                    x1, y1, x2, y2 = line[0]
                    #line_lengths.append(dist_2_pts(x1, y1, x2, y2))
                    tempL = dist_2_pts(x1, y1, x2, y2)
                    if tempL > maxL:
                        maxL = tempL
                #LineLengthAv = np.average(line_lengths)
                if maxL > ScaleLineLengthAv:  # if this sector contains scale lines
                    number_wp = np.sum(img_intersection == 255)
                    if number_wp > sector_min_number_of_wp:  # check number of white pixels
                        img_All_intersections = cv2.bitwise_or(
                            img_All_intersections, img_intersection)
                        ListNumberOfSectorWP.append(number_wp)
                        if number_wp > sector_max_number_of_wp:
                            sector_max_number_of_wp = number_wp
                else:
                    ListNumberOfSectorWP.append(0)
            else:
                ListNumberOfSectorWP.append(0)

        else:
            ListNumberOfSectorWP.append(0)

    #plt.plot(range(0, len(ListNumberOfSectorWP)), ListNumberOfSectorWP)
    #plt.plot(range(0, len(ListNumberOfSectorWP)), [0.5 * sector_max_number_of_wp] * len(ListNumberOfSectorWP))

    # вычисление мин и макс шкалы  = первого и последнего пика в массиве ListNumberOfSectorWP с амплитудой > 0,3*sector_max_number_of_wp
    for i in range(2, len(ListNumberOfSectorWP) - 3, 1):
        if  ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i-1] and ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i-2] and ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i+1] \
                and ListNumberOfSectorWP[i] > ListNumberOfSectorWP[i+2] and ListNumberOfSectorWP[i] > 0.5*sector_max_number_of_wp:
            if ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] < sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] < sector_min_number_of_wp:  # если тупой пик слева и справа
                scale_min_angle = i * angle_step + (angle_step / 2)
            elif ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] < sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] > sector_min_number_of_wp:  # если тупой пик слева
                scale_min_angle = i * angle_step
            elif ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] > sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] < sector_min_number_of_wp:  # если тупой пик справа
                scale_min_angle = i * angle_step + angle_step
            else:  # если острый пик
                scale_min_angle = i * angle_step + (angle_step / 2)
            break

    for i in range(len(ListNumberOfSectorWP) - 3, 2, -1):
        if ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i - 1] and ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i - 2] and ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i + 1] \
                and ListNumberOfSectorWP[i] >= ListNumberOfSectorWP[i + 2] and ListNumberOfSectorWP[i] > 0.5 * sector_max_number_of_wp:
            if ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] < sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] < sector_min_number_of_wp:  # если тупой пик слева и справа
                scale_max_angle = i * angle_step + (angle_step / 2)
            elif ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] < sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] > sector_min_number_of_wp:  # если тупой пик слева
                scale_max_angle = i * angle_step
            elif ListNumberOfSectorWP[i] - ListNumberOfSectorWP[
                    i - 1] > sector_min_number_of_wp and ListNumberOfSectorWP[
                        i] - ListNumberOfSectorWP[
                            i +
                            1] < sector_min_number_of_wp:  # если тупой пик справа
                scale_max_angle = i * angle_step + angle_step
            else:  # если острый пик
                scale_max_angle = i * angle_step + angle_step
            break

    cv2.imwrite(
        os.path.dirname(path) + '/' +
        os.path.splitext(os.path.basename(path))[0] + '-AllIntersections.jpg',
        img_All_intersections)

    t2 = cv2.getTickCount()
    time = (t2 - t1) / cv2.getTickFrequency()
    print('Время работы алгоритма: ' + str(time) + " секунд")
    return x0, y0, r, scale_min_angle, scale_max_angle, final_angle, time
예제 #42
0
                cv2.MORPH_RECT, (1, verticalsize))
            vertical = cv2.erode(vertical, verticalStructure)
            vertical = cv2.dilate(vertical, verticalStructure)

            # Then, add the horizontal and vertical edge image using bitwise_or
            grid = cv2.bitwise_or(horizontal, vertical)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
            grid = cv2.dilate(grid, kernel)

            # The grid we obtain might be a bit thicker than the original grid
            # so remove the unwanted thickness using bitwise_and
            grid = cv2.bitwise_and(grid, sudoku)

            # Finally, subtract the grid from our sudoku image and obtain
            # an image with just numbers
            num = cv2.bitwise_xor(sud_c, grid)

            # Obtain the corners of our top-down sudoku with respect to
            # the order of the coordinates obtained during the perspective transform
            if (full_coords[0][0])**2 + (full_coords[0][1])**2 < (
                    full_coords[1][0])**2 + (full_coords[1][1])**2:
                sud_coords = np.array([[0, 0], [0, num.shape[0]],
                                       [num.shape[1], num.shape[0]],
                                       [num.shape[1], 0]])
            else:
                sud_coords = np.array([[num.shape[1], 0], [0, 0],
                                       [0, num.shape[0]],
                                       [num.shape[1], num.shape[0]]])
            """
            num_2 = cv2.cvtColor(num, cv2.COLOR_GRAY2BGR)
            """
예제 #43
0
def mask_image(img, square, opacity=0.80):
    overlay = img.copy()
    cv2.fillPoly(overlay, [square], (255, 255, 255))
    inverse_overlay = cv2.bitwise_not(overlay)
    img2 = cv2.bitwise_xor(inverse_overlay, img)
    cv2.addWeighted(img2, opacity, img, 1 - opacity, 0, img)
예제 #44
0
#!/usr/bin/python

# Standard imports
import cv2
import numpy as np
import os
import sys
import time

im1 = '00500_snap_RGB.bmp'
im2 = '00501_snap_RGB.bmp'

im1 = cv2.imread(im1)
im2 = cv2.imread(im2)

diff = cv2.bitwise_xor(im1, im2)

cv2.imshow('diff', diff) 
예제 #45
0
import cv2 as cv
import numpy as np


blank = np.zeros((400, 400), dtype='uint8')
rectangle = cv.rectangle(blank.copy(), (30,30), (370, 370), 255, -1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, -1)

cv.imshow("rectangle", rectangle)
cv.imshow("circle", circle)

# bitwise AND
bitwise_and = cv.bitwise_and(circle, rectangle)
cv.imshow("AND", bitwise_and)
 
# bitwise OR
bitwise_or = cv.bitwise_or(circle, rectangle)
cv.imshow("OR", bitwise_or)
 
# bitwise XOR

bitwise_xor = cv.bitwise_xor(circle, rectangle)
cv.imshow("XOR", bitwise_xor)

# bitwise NOT

bitwise_not = cv.bitwise_xor(rectangle)
cv.imshow("NOT", bitwise_not)

cv.waitKey(0)
예제 #46
0
def find_ground(img, border, sea, cloud):
    output = cv2.bitwise_xor(img, border)
    output = cv2.bitwise_xor(output, sea)
    output = cv2.bitwise_xor(output, cloud)
    return output
예제 #47
0
def merge_mask_list(mask_list,
                    pred_mask,
                    blk: TextBlock = None,
                    pred_thresh=30,
                    text_window=None,
                    filter_with_lines=False,
                    refine_mode=REFINEMASK_INPAINT):
    mask_list.sort(key=lambda x: x[1])
    linemask = None
    if blk is not None and filter_with_lines:
        linemask = np.zeros_like(pred_mask)
        lines = blk.lines_array(dtype=np.int64)
        for line in lines:
            line[..., 0] -= text_window[0]
            line[..., 1] -= text_window[1]
            cv2.fillPoly(linemask, [line], 255)
        linemask = cv2.dilate(linemask,
                              np.ones((3, 3), np.uint8),
                              iterations=3)

    if pred_thresh > 0:
        e_size = 1
        element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                            (2 * e_size + 1, 2 * e_size + 1),
                                            (e_size, e_size))
        pred_mask = cv2.erode(pred_mask, element, iterations=1)
        _, pred_mask = cv2.threshold(pred_mask, 60, 255, cv2.THRESH_BINARY)
    connectivity = 8
    mask_merged = np.zeros_like(pred_mask)
    for ii, (candidate_mask, xor_sum) in enumerate(mask_list):
        num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
            candidate_mask, connectivity, cv2.CV_16U)
        for label_index, stat, centroid in zip(range(num_labels), stats,
                                               centroids):
            if label_index != 0:  # skip background label
                x, y, w, h, area = stat
                if w * h < 3:
                    continue
                x1, y1, x2, y2 = x, y, x + w, y + h
                label_local = labels[y1:y2, x1:x2]
                label_cordinates = np.where(label_local == label_index)
                tmp_merged = np.zeros_like(label_local, np.uint8)
                tmp_merged[label_cordinates] = 255
                tmp_merged = cv2.bitwise_or(mask_merged[y1:y2, x1:x2],
                                            tmp_merged)
                xor_merged = cv2.bitwise_xor(tmp_merged,
                                             pred_mask[y1:y2, x1:x2]).sum()
                xor_origin = cv2.bitwise_xor(mask_merged[y1:y2, x1:x2],
                                             pred_mask[y1:y2, x1:x2]).sum()
                if xor_merged < xor_origin:
                    mask_merged[y1:y2, x1:x2] = tmp_merged

    if refine_mode == REFINEMASK_INPAINT:
        mask_merged = cv2.dilate(mask_merged,
                                 np.ones((5, 5), np.uint8),
                                 iterations=1)
    # fill holes
    num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
        255 - mask_merged, connectivity, cv2.CV_16U)
    sorted_area = np.sort(stats[:, -1])
    if len(sorted_area) > 1:
        area_thresh = sorted_area[-2]
    else:
        area_thresh = sorted_area[-1]
    for label_index, stat, centroid in zip(range(num_labels), stats,
                                           centroids):
        x, y, w, h, area = stat
        if area < area_thresh:
            x1, y1, x2, y2 = x, y, x + w, y + h
            label_local = labels[y1:y2, x1:x2]
            label_cordinates = np.where(label_local == label_index)
            tmp_merged = np.zeros_like(label_local, np.uint8)
            tmp_merged[label_cordinates] = 255
            tmp_merged = cv2.bitwise_or(mask_merged[y1:y2, x1:x2], tmp_merged)
            xor_merged = cv2.bitwise_xor(tmp_merged, pred_mask[y1:y2,
                                                               x1:x2]).sum()
            xor_origin = cv2.bitwise_xor(mask_merged[y1:y2, x1:x2],
                                         pred_mask[y1:y2, x1:x2]).sum()
            if xor_merged < xor_origin:
                mask_merged[y1:y2, x1:x2] = tmp_merged
    return mask_merged
예제 #48
0
#/* In order to find the local maxima, "distance"
#     * is subtracted from the result of the dilatation of
#     * "distance". All the peaks keep the save value */

peaks = cv2.dilate(distance, kernel, iterations = 3)
ThrObjects = cv2.dilate(ThrObjects , kernel, iterations = 3)

# Now all the peaks should be exactly 0
peaks = peaks - distance

#and the non-peaks 255
[junk,peaks] = cv2.threshold(peaks,0,255,cv2.THRESH_BINARY)
peaks = peaks.astype('uint8')

peaks = cv2.bitwise_xor(peaks,ThrObjects)
peaks = cv2.dilate(peaks, kernel, iterations = 1)

#/* In order to map the peaks, findContours() is used.
# * The results are stored in "contours" */

contours,hierarchy = (cv2.findContours(peaks,
    cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE))


if len(contours)>0:
    moms = []
    centers = []
    circles = []
    i = 0
    x = 0
예제 #49
0
파일: iris.py 프로젝트: henriquehr/AI
def get_iris(image, x, y, r):
    r += 3
    r_orig = r
    result = image.copy()
    colors = []
    coords = []
    image = cv2.equalizeHist(image)
    x_R = 0
    for i in range(30, 90):
        point_r = np.zeros((image.shape[0], image.shape[1]), np.uint8)
        point_l = np.zeros((image.shape[0], image.shape[1]), np.uint8)
        x_R = r + i
        coords.append(x_R)
        '''
            Instead of using circles, using rectangles shifting horizontally, to reduce the noise from the other parts of the eye
        '''
        cv2.rectangle(point_r, (x + r + i - 2, y - 11), (x + r + i, y + 11),
                      (255, 0, 23),
                      thickness=1)
        cv2.rectangle(point_l, (x - r - i - 2, y - 11), (x - r - i, y + 11),
                      (255, 0, 23),
                      thickness=1)
        masked_data_1 = cv2.bitwise_and(image, image, mask=point_r)
        masked_data_2 = cv2.bitwise_and(image, image, mask=point_l)
        points = cv2.bitwise_xor(masked_data_1, masked_data_2)
        avg_color_per_row = cv2.mean(points)
        avg_color = cv2.mean(avg_color_per_row)
        colors.append(avg_color[0])
        #img = cv2.bitwise_xor(image, points)
        #stack = np.hstack((img,points))
        #cv2.imshow("circles", stack)
        #cv2.waitKey()
    '''
        Find the outer border of the iris probably in the worst possible way
    '''
    '''past = 0
    now = 0
    save = []
    save_idx = []
    for i in xrange(len(colors) - 1):
        c_1 = colors[i]
        c_2 = colors[i+1]
        now += c_1 + c_2
        if now < past:
            save.append(now)
            save_idx.append(i)
            r = coords[i]
        past = now
        now = 0
    max = 0
    max_idx = 0
    for i in xrange(len(save) - 1):
        c = save[i] - save[i-1]
        if max < c:
            max = c
            max_idx = save_idx[i]
    r = coords[max_idx] '''
    r = coords[55]  # fixed size because of hamming distance
    '''
        Extract the iris
    '''
    mask = np.zeros((result.shape[0], result.shape[1]), np.uint8)
    cv2.circle(mask, (x, y), (r), (255, 255, 255), r_orig - r)
    result = cv2.bitwise_and(result, result, mask=mask)
    nsamples = 360
    samples = np.linspace(0, 2.0 * np.pi, nsamples)[:-1]
    r_start = r - r_orig
    polar = np.zeros((r_start, nsamples))
    for i_r in range(r_start):
        for t in samples:
            x_pos = (i_r + r_orig) * np.cos(t) + x
            y_pos = (i_r + r_orig) * np.sin(t) + y
            if x_pos < result.shape[1] and y_pos < result.shape[0]:
                polar[int(i_r)][int(t * nsamples / 2.0 /
                                    np.pi)] = result[int(y_pos)][int(x_pos)]
            else:
                polar[int(i_r)][int(t * nsamples / 2.0 / np.pi)] = 0
    return result, polar
예제 #50
0
def match_score(sample, target):
    AND = cv2.bitwise_and(sample, target)
    XOR = cv2.bitwise_xor(sample, target)

    w = 1 - (cv2.countNonZero(target) / 312)
    return cv2.countNonZero(AND) - cv2.countNonZero(XOR)
import cv2
img = cv2.imread('lena.jpg', 1)
k = cv2.imread('jas.jpg',1)
f= img[:]
img = cv2.bitwise_xor(img,k)
cv2.imshow('image', img)
cv2.imshow('im',f)
k = cv2.waitKey(0)

if k == 27:
    cv2.destroyAllWindows()
elif k==ord('s'):
    cv2.imwrite('lena-copy.jpg',img)
 grow = grow + 10
 grow1 = grow1 + 10
 rect[0] = rect[0]
 rect[1] = rect[1]
 rect[2] = rect[2] + grow1
 rect[3] = rect[3] + grow
 masking = np.zeros(org.shape[:2], np.uint8)
 bgdModel1 = np.zeros((1, 65), np.float64)
 fgdModel1 = np.zeros((1, 65), np.float64)
 rect = (rect[0], rect[1], rect[2], rect[3])
 print(faces)
 cv2.grabCut(org, masking, rect, bgdModel1, fgdModel1, 5,
             cv2.GC_INIT_WITH_RECT)
 maskgrow = np.where(((masking == 0)), 0, 1).astype('uint8')
 modimg = org * maskgrow[:, :, np.newaxis]
 grown = cv2.add(cv2.bitwise_xor(modimg, detectimg, mask=maskgrow),
                 detectimg)
 cv2.imshow('img', grown)
 height = maskgrow.shape
 print height
 gray1 = cv2.cvtColor(grown, cv2.COLOR_BGR2GRAY)
 facegrow = face_cascade.detectMultiScale3(gray1,
                                           1.02,
                                           2,
                                           400,
                                           maxSize=(500, 500),
                                           outputRejectLevels=True)
 prev = curr
 i = 0
 j = 0
 wlist = []
예제 #53
0
def isolateForegroundFace(img, imgMask, maskHull, dieProfile, flag_debug):
    """We are passed a grayscale (bw really) img, where white is our foreground labels, and maskHull is our die shape.
	Now we want to extract the likely single face image.
	For d12+ this may mean extracting the region close to center, for d6 it may just be the whole thing.
	"""


    # params
    flag_denoise = False

    # default if we find nothing is just return what we were passed (ie everything is foreground)
    imgFace = img
    faceMask = imgMask
    faceRegion = maskHull
    faceCentroid = dicerfuncs.computeDieFaceCenter(maskHull, dieProfile)

    # denoise?
    if (flag_denoise):
        # img = cv2.fastNlMeansDenoising(img,None,3,5,7)
        img = cv2.fastNlMeansDenoising(img, None, 5, 7, 21)


    # first step would be to guess the shape and face info IFF not explicitly stated in dieProfile
    # ATTN: todo


    # perpare helper mask for foreground extraction, AND do some masking of main img to black out stuff that is NOT ALLOWED in foreground
    (img, extractionMask) = dieProfile.makeForegroundExtractionMask(img, faceMask, maskHull)


    # this get_useFullDieImage is also checkied in makeForegroundExtractionMask where it will return the image itself, usually unchanged
    if (dieProfile.get_useFullDieImage()):
        return (img, extractionMask, maskHull)


    if (dieProfile.get_diecenter()):
        # just use foreground focus dont choose contours
        # mask it
        img = cv2.bitwise_and(extractionMask, img)
        return (img, extractionMask, maskHull)


    # now find all regions (note we use RETR_LIST not RETR_EXTERNAL)
    imgRegions = dicerfuncs.copyCvImage(img)
    # imgDummy, contours, hierarchy = cv2.findContours(imgRegions,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    imgDummy, contours, hierarchy = cv2.findContours(imgRegions, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if contours is None:
        return (imgFace, faceMask, faceRegion)

    # test - show contours on face
    if (flag_debug):
        imgProcessed = dicerfuncs.ConvertBwGrayToBGR(img)
        cv2.drawContours(imgProcessed, contours, -1, (255, 0, 255), 1)
        dicerfuncs.cvImgShow("Face contours", imgProcessed)

    if (False):
        # simplify all contours?
        simplifyContours(contours, imgMask)


    # test extraction mask
    if (flag_debug):
        imgTest = cv2.bitwise_xor(img, extractionMask)
        dicerfuncs.cvImgShow("FocusMask", imgTest)

    # now merge the APPROPRIATE face regions


    # start by finding the closest region to center point where we think the die face should be
    # this should be fairly fast
    contourIndex_closest = None
    contour_closest = None
    closestCentroid = None
    contourDistance_closest = 9999999.9
    for i, cnt in enumerate(contours):
        if (False):
            hull = cv2.convexHull(cnt)
            closestPoint = dicerfuncs.findHullMomentCentroid(hull)
            dist = calcPointPointDistance(faceCentroid, contourCentroid)
        else:
            (dist, closestPoint) = findClosestContourPointDistance(faceCentroid, cnt)
        if (dist < contourDistance_closest):
            # new closest
            contourDistance_closest = dist
            contourIndex_closest = i
            contour_closest = cnt
            closestCentroid = closestPoint

    if (contour_closest is None):
        # no closest contour -- im not sure how this can get here
        return (imgFace, faceMask, faceRegion)

    # do we need this?
    # hull_closest = cv2.convexHull(contour_closest)


    # merge nearby ones given closest starting contour
    # ATTN: there are dif ways we could compute "proximity"
    # a gross one would be centroid distance, but this is not ideal for merging die labels since we probably care most about the DOTS next to 6s and 9s, in whcih case proximity probably should be closest two points
    # this takes time unfortunately
    allpoints = contour_closest
    ignoreContours = list()
    ignoreContours.append(contourIndex_closest)

    while (True):
        didMerge = False
        for i, cnt in enumerate(contours):
            if (i in ignoreContours):
                continue

            # it might be nice to be able to do a QUICK reject
            # hull = cv2.convexHull(cnt)
            # contourCentroid = dicerfuncs.findHullMomentCentroid(cnt)
            # quickdist = calcPointPointDistance(closestCentroid, contourCentroid)
            # if (quickdist > quickRejectCentroidDistance):
            #	continue

            (accept, reject) = checkShouldMergeContour(dieProfile, allpoints, cnt, faceCentroid, closestCentroid,
                                                       extractionMask)
            if (accept):
                # merge it in
                didMerge = True
                ignoreContours.append(i)
                allpoints = np.vstack((allpoints, cnt))
            elif (reject):
                # permanently reject it
                didMerge = False
                ignoreContours.append(i)
            else:
                # leave it for next loop
                pass

        if (not didMerge):
            break

    faceContour = allpoints

    # is it already convex? if not i think we want convex
    faceContour = cv2.convexHull(faceContour)

    # ATTN: todo

    # now mask off this new face region
    (imgFace, faceMask) = maskImageGivenHull(img, faceContour)

    # now return it
    return (imgFace, faceMask, faceContour)
예제 #54
0
import numpy as np
import cv2
rectangle = np.zeros((300, 300), dtype="uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow("Rectangle", rectangle)
cv2.waitKey(0)

circle = np.zeros((300, 300), dtype="uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow("Circle", circle)
cv2.waitKey(0)

bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow("AND", bitwiseAnd)
cv2.waitKey(0)

bitwiseOr = cv2.bitwise_or(rectangle, circle)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)

bitwiseXor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)

bitwiseNot = cv2.bitwise_not(circle)
cv2.imshow("NOT", bitwiseNot)
cv2.waitKey(0)
예제 #55
0
img = cv.imread("04_short_10t.jpg")
img_ori_gray = cv.cvtColor(img_ori, cv.COLOR_BGR2GRAY)
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

# ---------------二值化试验
# -人工阈值√√ ----灰度图直方图,取第一个峰和第二个峰之间的谷值作阈值//PCB有三层亮度:焊盘、覆漆线路、覆漆底板
ret0, th2=cv.threshold(img_ori_gray, 45, 255, cv.THRESH_BINARY_INV)
ret1, th3=cv.threshold(img_gray, 45, 255, cv.THRESH_BINARY)
#h3_ori = copy.copy(th3)
# ------------------------------------------------------------------------------------------------------
# ---大津法????
#ret2,th2 = cv.threshold(img_ori_gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
#ret3,th3 = cv.threshold(img_gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
# ------------------------------------------------------------------------------------------

img_result1 = cv.bitwise_xor(th2,th3)

img_result2 = cv.bitwise_not(img_result1)
kernel_dilate = np.ones((3,3),np.uint8)
kernel_erode = np.ones((3,3),np.uint8)

#img_result1 = cv.medianBlur(img_result1,7)

#开运算:对识别到的缺陷点边缘补偿,顺带去个噪
img_result1 = cv.bitwise_not(img_result1)
img_result1 = cv.erode(img_result1,kernel_erode,iterations= 1)
img_result1 = cv.dilate(img_result1,kernel_dilate,iterations= 1)



oflaw = cv.bitwise_and(img_result1,th3, mask=img_result1)
예제 #56
0
    #cv2.imwrite("horizontal" + str(i) +".jpg",horizontal_lines)
    #Plot the generated image
    #plotting = plt.imshow(image_2,cmap='gray')
    #plt.show()
    if not np.all((vertical_lines == 0)) and not np.all(
        (horizontal_lines == 0)):
        print("found a table")
        # Combine horizontal and vertical lines in a new third image, with both having same weight.
        img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5,
                                 0.0)
        #Eroding and thesholding the image
        img_vh = cv2.erode(~img_vh, kernel, iterations=2)
        thresh, img_vh = cv2.threshold(img_vh, 128, 255,
                                       cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        # cv2.imwrite("img_vh.jpg", img_vh)
        bitxor = cv2.bitwise_xor(img, img_vh)
        bitnot = cv2.bitwise_not(bitxor)
        #Plotting the generated image
        #plotting = plt.imshow(bitnot,cmap='gray')
        #plt.show()

        # Detect contours for following box detection
        contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        def sort_contours(cnts, method="left-to-right"):
            # initialize the reverse flag and sort index
            reverse = False
            i = 0
            # handle if we need to sort in reverse
            if method == "right-to-left" or method == "bottom-to-top":
예제 #57
0
cv2.namedWindow('Canny')
cv2.createTrackbar('min', 'Canny', min_value, 256, nothing)
cv2.createTrackbar('max', 'Canny', max_value, 256, nothing)
cv2.createTrackbar('hough_thresh', 'Canny', hough_thresh, 200, nothing)
cv2.createTrackbar('theta_range', 'Canny', 20, 200, nothing)

while (1):
    min_value = cv2.getTrackbarPos('min', 'Canny')
    max_value = cv2.getTrackbarPos('max', 'Canny')
    hough_thresh = cv2.getTrackbarPos('hough_thresh', 'Canny')
    theta_range = cv2.getTrackbarPos('theta_range', 'Canny')
    edges = cv2.Canny(img.copy(), min_value, max_value)
    edges = np.multiply(edges, mask)
    # eraze vertical lines
    edges = cv2.bitwise_xor(edges, getVertical())
    # eraze horizontal lines
    # edges = cv2.bitwise_xor(edges, getHorizontal(horiz_bins))
    cv2.imshow('edges', edges)
    houghLines(edges, hough_thresh, max(theta_range, 1))
    key = cv2.waitKey(1)
    if key == ord("n"):
        path_index = min(path_index + 1, 22)
        img = getImage(image_path_left, image_path_right, path_index)
        cv2.imshow('original', img)
    elif key == ord("b"):
        path_index = max(path_index - 1, 0)
        img = getImage(image_path_left, image_path_right, path_index)
        cv2.imshow('original', img)
    elif key == ord("e"):
        break
# this function will take few arguments which are the following: img, datatype, 64F is 64 bits flots
# using this 64F due to he negative slope induced by transforming th eimage from withe to black

laplas = cv.Laplacian(img, cv.CV_64F, ksize=1)
#Taking the absolute value of laplacian image and converting this value into unsigned 8 bits integer for our outputs
laplas = np.uint8(np.absolute(laplas))
#The sobel gradient image take 4 argu and the 3rd value is 1 and it means that we using for X and 0 in this case for Y
sobelX = cv.Sobel(img, cv.CV_64F, 1,
                  0)  # 1 for X direction and 0 for y direction
sobelY = cv.Sobel(img, cv.CV_64F, 0, 1)

# now we need to convert this values into unsigned 8 bits integer
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
combineX_Y_OR = cv.bitwise_or(sobelX, sobelY)
combineX_Y_XOR = cv.bitwise_xor(sobelX, sobelY)
combineX_Y_AND = cv.bitwise_and(sobelX, sobelY)
combineX_Y_NOT = cv.bitwise_not(sobelX, sobelY)

titles = [
    'Original', 'Laplacian', 'SobelX', 'SobelY', 'combineX_Y_OR',
    'combineX_Y_XOR', 'combineX_Y_AND', 'combineX_Y_NOT'
]
images = [
    img, laplas, sobelX, sobelY, combineX_Y_OR, combineX_Y_XOR, combineX_Y_AND,
    combineX_Y_NOT
]

for i in range(8):
    plt.subplot(3, 3, i + 1)
    plt.imshow(images[i], 'gray')
예제 #59
0
import numpy as np
import cv2

rectangle = np.zeros((300, 300), dtype = "uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
# cv2.imshow("Rectangle", rectangle)
# cv2.waitKey(0)

circle = np.zeros((300, 300), dtype = "uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
# cv2.imshow("Circle", circle)
# cv2.waitKey(0)

bitwise_and = cv2.bitwise_and(rectangle, circle)
cv2.imshow("AND", bitwise_and)
cv2.waitKey(0)

bitwise_or = cv2.bitwise_or(rectangle, circle)
cv2.imshow("OR", bitwise_or)
cv2.waitKey(0)

bitwise_xor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("XOR", bitwise_xor)
cv2.waitKey(0)

bitwise_not = cv2.bitwise_not(circle)
cv2.imshow("NOT", bitwise_not)
cv2.waitKey(0)

예제 #60
0
파일: image.py 프로젝트: leowenyang/cvman
def bitwiseXOR(img1, img2, mask):
    return cv2.bitwise_xor(img1, img2, mask=mask)