def returnPoints(image):
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	cl1 = clahe.apply(gray)
	
	thresh = cv2.Canny(cl1, 50, 100)
	thresh = cv2.dilate(thresh, None, iterations=3)
	thresh = cv2.erode(thresh, None, iterations=3)
	cv2.bitwise_not ( thresh, thresh );

	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]
	(cnts, _) = contours.sort_contours(cnts)
	pixelsPerMetric = None
	for c in cnts:
		# if the contour is not sufficiently large, ignore it
		if (cv2.contourArea(c) < 300 or cv2.contourArea(c) > 400):
			continue
 
		# compute the rotated bounding box of the contour
		'''orig = image.copy()
		box = cv2.minAreaRect(c)
		box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
		box = np.array(box, dtype="int")

		box = perspective.order_points(box)
		cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
 
		# loop over the original points and draw them
		for (x, y) in box:
			cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)'''
		M = cv2.moments(c)
        	cX = int(M["m10"] / M["m00"]) if (M["m00"]!=0) else int(M["m10"])
       		cY = int(M["m01"] / M["m00"]) if (M["m00"]!=0) else int(M["m01"])
        	cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
		
		#return orig
	return image
Exemple #2
0
import numpy as np
import imutils
import cv2

print("[INFO] loading handwriting OCR model...")
model = load_model('handwriting.model')

image = cv2.imread('images/umbc_address.png')  # test images
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)

edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]

chars = []

for c in cnts:

    (x, y, w, h) = cv2.boundingRect(c)

    if (w >= 5 and w <= 150) and (h >= 15 and h <= 120):

        roi = gray[y:y + h, x:x + w]
        thresh = cv2.threshold(roi, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        (tH, tW) = thresh.shape

        if tW > tH:
def on_message(client, userdata, message):
    #print("Message Recieved: "+message.payload.decode())
    test = json.loads(message.payload.decode())
    print(test["input"])
    data_input = test["input"].lower()
    print(data_input)
    if test["input"].lower() == "take picture":
        video_capture = cv2.VideoCapture(0)
        # Check success
        #if not video_capture.isOpened():
        #  raise Exception("Could not open video device")
        # Read picture. ret === True on success
        ret, frame = video_capture.read()
        # Close device
        video_capture.release()

        #from matplotlib import pyplot as plt
        frameRGB = frame[:, :, ::-1]  # BGR => RGB
        #plt.imshow(frameRGB)
        cv2.imwrite('scanfoto.jpg', frameRGB)
        print("test foto gemaakt")

    if data_input == "scan":
        # load our input image, convert it to grayscale, and blur it slightly
        image = cv2.imread("scanfoto.jpg")
        #image = frameRGB
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 50, 100)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # sort the contours from left-to-right and initialize the bounding box
        # point colors
        (cnts, _) = contours.sort_contours(cnts)
        colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

        #to get nparray in JSONfile
        class NumpyEncoder(json.JSONEncoder):
            def default(self, obj):
                if isinstance(obj, np.ndarray):
                    return obj.tolist()
                return json.JSONEncoder.default(self, obj)

        #clear json file
        #open('jason.txt', 'w').close()

        # loop over the contours individually
        for (i, c) in enumerate(cnts):
            # if the contour is not sufficiently large, ignore it
            #if cv2.contourArea(c) < 100:
            #	continue

            # compute the rotated bounding box of the contour, then
            # draw the contours
            box1 = cv2.minAreaRect(c)
            box1 = cv2.cv.BoxPoints(
                box) if imutils.is_cv2() else cv2.boxPoints(box1)
            box1 = np.array(box1, dtype="int")
            cv2.drawContours(image, [box1], -1, (0, 255, 0), 2)

            # show the original coordinates
            json_dump = json.dumps(
                {
                    "objectNumber": "{}".format(i + 1),
                    'arrayCorners': box1,
                },
                cls=NumpyEncoder)
            print(json_dump)

            print("Object #{}:".format(i + 1))
            print(box1)

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            rect = order_points_old(box1)

            print("test 2")
    if data_input == "scan difference":
        # Set code here
        print("test 3")
    if data_input == "outline":
        frame = cv2.imread("scanfoto.jpg")
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        #lower_blue = np.array([13,150,38])
        #lower_blue =np.array([10,20,38]) tussenstuk
        lower_blue = np.array([10, 240, 38])  #(normaal deze uncomment)
        #lower_blue = np.array([10,40,38])#(normaal deze uncomment)

        upper_blue = np.array([100, 255, 255])  #normaal deze

        mask = cv2.inRange(hsv, lower_blue, upper_blue)
        res = cv2.bitwise_and(frame, frame, mask=mask)
        plt.imshow(res)
        cv2.imwrite('foto-kleur.png', res)

        foto_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        foto_preprocessed = cv2.GaussianBlur(foto_gray, (5, 5), 0)
        _, foto_binary = cv2.threshold(foto_preprocessed, 130, 255,
                                       cv2.THRESH_BINARY)

        # invert image to get foto
        foto_binary = cv2.bitwise_not(mask)
        plt.imshow(cv2.cvtColor(foto_binary, cv2.COLOR_GRAY2RGB))
        cv2.imwrite('foto-binary.png', mask)

        rest = cv2.bitwise_and(foto_binary, foto_binary, mask=mask)

        #noise remove
        morph_kernel = np.ones((2, 2), np.uint8)
        coins_morph = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, morph_kernel)

        plt.imshow(cv2.cvtColor(coins_morph, cv2.COLOR_GRAY2RGB))
        cv2.imwrite('foto-zwawit.png', coins_morph)

        # Converting the image to grayscale.
        #gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

        # Using the Canny filter to get contours
        edges = cv2.Canny(coins_morph, 20, 30)
        # Using the Canny filter with different parameters
        edges_high_thresh = cv2.Canny(coins_morph, 60, 120)
        # Stacking the images to print them together
        # For comparison
        images = np.hstack((coins_morph, edges, edges_high_thresh))
        # Display the resulting frame
        plt.imshow(edges)
        plt.imshow(edges_high_thresh)
        #cv2.imshow('frame', edges)
        cv2.imwrite('outlineGcode.png', edges_high_thresh)
        plt.imshow(edges)
        witzwart = np.invert(edges)
        plt.imshow(witzwart)
        cv2.imwrite('outlineGcodeconvert.png', edges_high_thresh)

        print("test 3 outline gemaakt")
    if data_input == "create g-code":
        #domain name or server ip:
        ftp = FTP('ftp.pxl-ea-ict.be')
        ftp.login(user='******',
                  passwd='password')  # user anonymous, passwd anonymous@

        #ftp.cwd('/outlineGcode.gcode')
        def grabFile():

            filename = 'outlineGcode.gcode'

            localfile = open(filename, 'wb')
            ftp.retrbinary('RETR ' + filename, localfile.write, 2048)

            ftp.quit()
            localfile.close()

        grabFile()
        #session = FTP('ftp.pxl-ea-ict.be','*****@*****.**','p1qeLfqpZpoK')
        #file = open('outlineGcode.gcode','rb')                  # file to send
        #session.storbinary('STOR outlineGcode.gcode', file)     # send the file
        #file.close()                                    # close file and FTP
        #session.quit()
        print("test 7 gcode overgedragen")
    if data_input == "object detect":
        image = cv2.imread("scanfoto.jpg")
        #image = frameRGB
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 50, 100)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # sort the contours from left-to-right and initialize the bounding box
        # point colors
        (cnts, _) = contours.sort_contours(cnts)
        colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

        #to get nparray in JSONfile
        class NumpyEncoder(json.JSONEncoder):
            def default(self, obj):
                if isinstance(obj, np.ndarray):
                    return obj.tolist()
                return json.JSONEncoder.default(self, obj)

        #clear json file
        open('jason.txt', 'w').close()
        json_1 = "{[\"objects\""
        with open('jason.txt', 'a') as f:
            json.dumps(json_1, f)
        print("{[\"objects\":[", file=open("jason.txt", "a"))

        # Set code here
        # Set code here
        for (i, c) in enumerate(cnts):
            # compute the rotated bounding box of the contour, then
            # draw the contours
            box = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(
                box)
            box = np.array(box, dtype="int")
            cv2.drawContours(image, [box], -1, (0, 255, 0), 2)

            #bepalen middelpunt
            x1 = box[0][0]
            x2 = box[2][0]
            y1 = box[0][1]
            y2 = box[2][1]
            midpunt = ((x1 + x2) / 2, (y1 + y2) / 2)
            #make line from coordinates
            myradians = math.atan2((box[0][1]) - (box[1][1]),
                                   (box[0][0]) - (box[1][0]))
            mydegrees = math.degrees(myradians)
            #if (angle < 0) { angle += 2 * M_PI; }

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            rect = order_points_old(box)
            json_dump = json.dumps(
                {
                    "objectNumber": "{}".format(i + 1),
                    'arrayCorners': box,
                    'angle': mydegrees,
                    'centrepoint': midpunt,
                },
                cls=NumpyEncoder)
            with open('jason.txt', 'a') as f:
                json.dumps(json_dump, f)
            print(json_dump)
            print(json_dump, file=open("jason.txt", "a"))
        with open('jason.txt', 'a') as f:
            json.dumps("]}", f)
        print("]}", file=open("jason.txt", "a"))
        session = FTP('username', 'password')
        file = open('jason.txt', 'rb')  # file to send
        session.storbinary('STOR jason.txt', file)  # send the file
        file.close()  # close file and FTP
        session.quit()
        print("test 5 objectdetect")
    if data_input == "find angle and centre point":
        image = cv2.imread("scanfoto.jpg")
        #image = frameRGB
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 50, 100)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # sort the contours from left-to-right and initialize the bounding box
        # point colors
        (cnts, _) = contours.sort_contours(cnts)
        colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

        #to get nparray in JSONfile
        class NumpyEncoder(json.JSONEncoder):
            def default(self, obj):
                if isinstance(obj, np.ndarray):
                    return obj.tolist()
                return json.JSONEncoder.default(self, obj)

        #clear json file
        open('jason.txt', 'w').close()
        # Set code here
        for (i, c) in enumerate(cnts):
            # compute the rotated bounding box of the contour, then
            # draw the contours
            box = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(
                box)
            box = np.array(box, dtype="int")
            cv2.drawContours(image, [box], -1, (0, 255, 0), 2)

            #bepalen middelpunt
            x1 = box[0][0]
            x2 = box[2][0]
            y1 = box[0][1]
            y2 = box[2][1]
            midpunt = ((x1 + x2) / 2, (y1 + y2) / 2)
            #make line from coordinates
            myradians = math.atan2((box[0][1]) - (box[1][1]),
                                   (box[0][0]) - (box[1][0]))
            mydegrees = math.degrees(myradians)
            #if (angle < 0) { angle += 2 * M_PI; }

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            rect = order_points_old(box)
            json_dump = json.dumps(
                {
                    "objectNumber": "{}".format(i + 1),
                    'angle': mydegrees,
                    'centrepoint': midpunt,
                },
                cls=NumpyEncoder)
            print(json_dump, file=open("jason.txt", "a"))
        print(json_dump)
        print("test 4 print middel and corners")
Exemple #4
0
# loop over the contours
for c in cnts:
    # compute the bounding box of the contour, then use the
    # bounding box to derive the aspect ratio
    (x, y, w, h) = cv2.boundingRect(c)
    ar = w / float(h)

    # in order to label the contour as a question, region
    # should be sufficiently wide, sufficiently tall, and
    # have an aspect ratio approximately equal to 1
    if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
        questionCnts.append(c)

# sort the question contours top-to-bottom, then initialize
# the total number of correct answers
questionCnts = contours.sort_contours(questionCnts, method="top-to-bottom")[0]
correct = 0

# each question has 5 possible answers, to loop over the
# question in batches of 5
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
    # sort the contours for the current question from
    # left to right, then initialize the index of the
    # bubbled answer
    cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
    bubbled = None

    # loop over the sorted contours
    for (j, c) in enumerate(cnts):
        # construct a mask that reveals only the current
        # "bubble" for the question
# bw_img1 = cv2.adaptiveThreshold(gray_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,5)
# bw_img2  = cv2.adaptiveThreshold(gray_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,5)
edged = cv2.Canny(gray_img, 50, 10) #cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

# cv2.namedWindow("edged",0)
# cv2.resizeWindow("edged",480,640)
# cv2.imshow("edged",bw_img1)
cv2.imshow("edged", edged)
# cv2.waitKey(0)

cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
# result_img = cv2.drawContours(egg_image, cnts, -1, (0, 0, 255), 2)

# 单独循环轮廓

dst = np.zeros((edged.shape))
dst = np.array(dst,np.uint8)
rad = []
for c in cnts:
    # 如果轮廓不够大,请忽略它
    if cv2.contourArea(c) < 1800:
        continue
    # 计算轮廓的旋转边界框
    dst = cv2.drawContours(dst,[c],-1, 255, thickness=cv2.FILLED)
    maxdist = 0
def grab_contour(threshold_image):
    # sort the contours from left-to-right and initialize the 'pixels per metric' calibration variable
    edge = cv2.Canny(threshold_image, 75, 200)
    cnts = cv2.findContours(threshold_image.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    new_swap_list = []
    # sort the contours from left-to-right and initialize the 'pixels per petric' calibration variable
    (cnts, _) = contours.sort_contours(cnts)
    leftmost_contour = None

    center_points, areas, distances, corners, three_areas = [], [], [], [], []
    testing = []
    testing2 = []
    # three_contours = []
    known_width = 7.6
    focal_length = 295
    # print("<<< Grab Contours >>>")
    # cv2.circle(threshold_image, (277,375), 1, (100, 100, 100), thickness=7, lineType=8, shift=0)
    # cv2.imshow("leftmost circle 277,375", threshold_image)
    # cv2.circle(threshold_image, (276,501), 1, (100, 100, 100), thickness=7, lineType=8, shift=0)
    # cv2.imshow("leftmost circle 276,501", threshold_image)
    # cv2.circle(threshold_image, (157,375), 1, (100, 100, 100), thickness=7, lineType=8, shift=0)
    # cv2.imshow("leftmost circle 157,375", threshold_image)
    # cv2.circle(threshold_image, (156,501), 1, (100, 100, 100), thickness=7, lineType=8, shift=0)
    # cv2.imshow("leftmost circle 156,501", threshold_image)
    # get four largest coutour areas
    for i, c in enumerate(cnts):
        area = cv2.contourArea(c)
        three_areas.append(area)
        sorteddata = sorted(zip(three_areas, cnts),
                            key=lambda x: x[0],
                            reverse=True)
    tessss = []
    # Four largest contours' coordinates
    compare_list = [
        sorteddata[0][1][0][0], sorteddata[1][1][0][0], sorteddata[2][1][0][0],
        sorteddata[3][1][0][0]
    ]
    first, second, third, fourth = compare_lists([compare_list])
    tessss.append(first)
    tessss.append(second)
    tessss.append(third)
    tessss.append(fourth)

    # print(">?>?", np.argsort(tessss))
    for i in np.argsort(tessss):
        new_swap_list.append(sorteddata[i][1])
    # print("123123", new_swap_list)
    # if first == 0:
    #     new_swap_list.append(sorteddata[0][1])
    #     if second == 1:
    #         new_swap_list.append(sorteddata[1][1])
    #         if third == 2:
    #             new_swap_list.append(sorteddata[2][1])
    #             new_swap_list.append(sorteddata[3][1])
    #         else:
    #             new_swap_list.append(sorteddata[3][1])
    #             new_swap_list.append(sorteddata[2][1])
    #     elif second == 2:
    #         new_swap_list.append(sorteddata[2][1])
    #         new_swap_list.append(sorteddata[1][1])
    #     elif second == 3:
    #         if third == 2:
    #             new_swap_list.append(sorteddata[1][1])
    #             new_swap_list.append(sorteddata[3][1])
    #             new_swap_list.append(sorteddata[2][1])
    # elif second == 0:
    #     new_swap_list.append(sorteddata[1][1])
    #     if first == 1:
    #         new_swap_list.append(sorteddata[0][1])
    #         new_swap_list.append(sorteddata[2][1])
    #     elif first == 2:
    #         new_swap_list.append(sorteddata[2][1])
    #         new_swap_list.append(sorteddata[0][1])
    # elif third == 0:
    #     new_swap_list.append(sorteddata[2][1])
    #     if first == 1:
    #         new_swap_list.append(sorteddata[0][1])
    #         new_swap_list.append(sorteddata[1][1])
    #     elif first == 2:
    #         new_swap_list.append(sorteddata[1][1])
    #         new_swap_list.append(sorteddata[0][1])
    #     elif first == 3:
    #         new_swap_list.append(sorteddata[1][1])
    #         new_swap_list.append(sorteddata[0][1])
    # elif fourth == 0:
    #     new_swap_list.append(sorteddata[3][1])
    #     if first == 1:
    #         new_swap_list.append(sorteddata[0][1])
    #         if second == 2:
    #             new_swap_list.append(sorteddata[1][1])
    #             new_swap_list.append(sorteddata[2][1])
    #     elif first == 2:
    #         new_swap_list.append(sorteddata[0][1])
    #         if second == 1:
    #             new_swap_list.append(sorteddata[1][1])
    #             new_swap_list.append(sorteddata[2][1])

    # if minust[1] < 0:  #
    #     new_swap_list.append(sorteddata[1][1]) # leftmost = 2nd largest area
    #     new_swap_list.append(sorteddata[0][1])
    # else:
    #     new_swap_list.append(sorteddata[0][1])
    #     new_swap_list.append(sorteddata[1][1])
    # print("sorted data11[1]1", sorteddata[0][1])
    # print("new_swap_list:", new_swap_list)
    # find the nth largest contour [n-1][1], in this case 2
    # three_contours.append(sorteddata[0][1])
    # three_contours.append(sorteddata[1][1])
    # secondlargestcontour = sorteddata[1][1]
    print(">>new_swap_list", new_swap_list)
    # print(">>two_contours", two_contours)
    for c in new_swap_list:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        area = cv2.contourArea(c)

        if cv2.contourArea(c) < 100:
            continue

        box = approx
        box = np.squeeze(box)

        # order the points in the contour and draw outlines of the rotated rounding box
        box = order_points(box)

        print("box 1111", box)
        box = perspective.order_points(box)
        testing.append(box)
        # print("box 2222:", box)
        (x, y, w, h) = cv2.boundingRect(c)

        # compute area
        area = cv2.contourArea(c)
        areas.append(area)

        # compute center points
        M = cv2.moments(c)
        if M["m00"] != 0:
            cx = int(M["m10"] / M["m00"])
            cy = int(M["m01"] / M["m00"])
        else:
            cx, cy = 0, 0

        center = (cx, cy)
        center_points.append(center)

        c_x = np.average(box[:, 0])
        c_y = np.average(box[:, 1])

        # compute corners from contour image
        # four_corners = corners_from_contour(threshold_image, c)
        corners.append(box)
        # print("corners", corners)

        # compute and return the distance from the maker to the camera
        distances.append(distance_to_camera(known_width, focal_length, w))

        if leftmost_contour is None:
            (tl, tr, br, bl) = box
            (tlblX, tlblY) = midpoint(tl, bl)
            (trbrX, trbrY) = midpoint(tr, br)

            # compute the Euclidean distance between the midpoints, then construct the reference object
            d = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
            leftmost_contour = (box, (c_x, c_y), d / 7.5)
            # first_box = box
            continue
    print("leftmost contour:", leftmost_contour)
    print("corners", corners)
    print("corners[1]", corners[1][0][0])
    # cv2.circle(threshold_image, (corners[1][0][0], corners[1][0][1]), 1, (100, 100, 100), thickness=10, lineType=8, shift=0)

    # cent = midpoint(center_points[1], center_points[2])
    # print("center of center:, ", cent)
    print("box testing", testing)
    testing2.append(testing[1])
    print("testing[1]", testing[1][0][0])
    testing2.append(testing[2])
    testing2.append(testing[3])
    testing2.append(testing[0])
    print("testing2", testing2)
    cv2.circle(threshold_image, (testing[1][0][0], testing[1][0][1]),
               1, (100, 100, 100),
               thickness=10,
               lineType=8,
               shift=0)
    cv2.imshow("First corner?", threshold_image)
    for i in range(0, 4):
        print("w1", corners[i][2][0] - corners[i][3][0])
        print("w2", corners[i][1][0] - corners[i][0][0])
        print("h1", corners[i][3][1] - corners[i][0][1])
        print("h2", corners[i][2][1] - corners[i][1][1])
        print("top angle:", angle_change(corners[i][3], corners[i][2]))
        print("top reverse:", angle_change(corners[i][2], corners[i][3]))
        print("bottom angle:", angle_change(corners[i][1], corners[i][0]))
        print("bottom reverse:", angle_change(corners[i][0], corners[i][1]))
        print("left angle:", angle_change(corners[i][3], corners[i][0]))
        print("left reverse:", angle_change(corners[i][0], corners[i][3]))
        print("right angle:", angle_change(corners[i][2], corners[i][1]))
        print("right reverse:", angle_change(corners[i][1], corners[i][2]))
    # cv2.circle(threshold_image, (int(cent[0]), int(cent[1])), 1, (100, 100, 100), thickness=7, lineType=8, shift=0)
    # print("leftmost_contour[0][0]", leftmost_contour[0][0])
    cv2.circle(threshold_image,
               tuple(leftmost_contour[0][0]),
               1, (100, 100, 100),
               thickness=10,
               lineType=8,
               shift=0)

    return leftmost_contour, center_points, areas, distances, corners
Exemple #7
0
def test(frame,K,weights,parameters,div,color,r):
    
    
    
    def gaussian(data,mean,cov):
        det_cov = np.linalg.det(cov)
        cov_inv = np.linalg.inv(cov)
        diff = np.matrix(data-mean)
        
        
        N = (2.0 * np.pi) ** (-len(data[1]) / 2.0) * (1.0 / (np.linalg.det(cov) ** 0.5)) *\
            np.exp(-0.5 * np.sum(np.multiply(diff*cov_inv,diff),axis=1))
        
        return N
    
    test_image = frame
    nx = test_image.shape[0]
    ny = test_image.shape[1]
    img = test_image
    ch = img.shape[2]
    img = np.reshape(img, (nx*ny,ch))
    
    #weights = np.load('weights_o.npy')
    #parameters = np.load('parameters_o.npy')
    prob = np.zeros((nx*ny,K))
    likelihood = np.zeros((nx*ny,K))
    
    for cluster in range(K):
       prob[:,cluster:cluster+1] = weights[cluster]*gaussian(img,parameters[cluster]['mean'], parameters[cluster]['cov'])
       
       likelihood = prob.sum(1)
       
    
    probabilities = np.reshape(likelihood,(nx,ny))
    
    probabilities[probabilities>np.max(probabilities)/div] = 255
    
    
    
    output = np.zeros_like(frame)
    output[:,:,0] = probabilities
    output[:,:,1] = probabilities
    output[:,:,2] = probabilities
    blur = cv2.GaussianBlur(output,(3,3),5)
    #cv2.imshow("out",output)
    edged = cv2.Canny(blur,50,255 )
    
    cnts,h = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    (cnts_sorted, boundingBoxes) = contours.sort_contours(cnts, method="left-to-right")
    
    hull = cv2.convexHull(cnts_sorted[0])
    (x,y),radius = cv2.minEnclosingCircle(hull)
    
    if radius > r:
        cv2.circle(test_image,(int(x),int(y)-1),int(radius+1),color,4)

        #cv2.imshow("Final output",test_image)
        return test_image
    else:
        #cv2.imshow("Final output",test_image)
        return test_image
	help="whether or not the new order points should should be used")
args = vars(ap.parse_args())

image = cv2.imread("Res/X_windows.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)

edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

for (i, c) in enumerate(cnts):

	if cv2.contourArea(c) < 100:
		continue

	box = cv2.minAreaRect(c)
	box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
	box = np.array(box, dtype="int")
	cv2.drawContours(image, [box], -1, (0, 255, 0), 2)

	# show the original coordinates
	#print("Object #{}:".format(i + 1))
	#--me--
Exemple #9
0
def extract_rows_columns(gray_image):
    inverted = cv2.bitwise_not(gray_image)
    blurred = cv2.GaussianBlur(inverted, (5, 5), 0)

    height, width = gray_image.shape

    thresholded = cv2.threshold(blurred, 128, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

    # A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
    vertical_kernel_height = math.ceil(height * 0.3)
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                (1, vertical_kernel_height))

    # A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
    horizontal_kernel_width = math.ceil(width * 0.3)
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                            (horizontal_kernel_width, 1))

    # Morphological operation to detect vertical lines from an image
    img_temp1 = cv2.erode(thresholded, verticle_kernel, iterations=3)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
    _, vertical_contours, _ = cv2.findContours(verticle_lines_img.copy(),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)
    # Sort all the contours by top to bottom.
    (vertical_contours,
     vertical_bounding_boxes) = sort_contours(vertical_contours,
                                              method="left-to-right")

    filtered_vertical_bounding_boxes = list(
        filter(lambda x: vertical_boxes_filter(x, height),
               vertical_bounding_boxes))

    # Morphological operation to detect horizontal lines from an image
    img_temp2 = cv2.erode(thresholded, hori_kernel, iterations=3)
    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
    _, horizontal_contours, _ = cv2.findContours(horizontal_lines_img.copy(),
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)

    horizontal_contours, horizontal_bounding_boxes = sort_contours(
        horizontal_contours, method="top-to-bottom")

    filtered_horizontal_bounding_boxes = list(
        filter(lambda x: horizontal_boxes_filter(x, width),
               horizontal_bounding_boxes))

    if DEBUG:
        color_image = cv2.cvtColor(gray_image.copy(), cv2.COLOR_GRAY2BGR)
        cv2.drawContours(color_image, vertical_contours, -1, (0, 255, 0), 2)
        cv2.drawContours(color_image, horizontal_contours, -1, (255, 0, 0), 2)

        # for filtered_horizontal_bounding_box in filtered_horizontal_bounding_boxes:
        #     x,y,w,h = filtered_horizontal_bounding_box
        #     cv2.rectangle(color_image,(x,y),(x+w,y+h),(0,255,255),2)
        #
        # for filtered_vertical_bounding_box in filtered_vertical_bounding_boxes:
        #     x,y,w,h = filtered_vertical_bounding_box
        #     cv2.rectangle(color_image,(x,y),(x+w,y+h),(0,255,255),2)

        show_wait_destroy("horizontal_vertical_contours", color_image)

    extracted_rows_columns = []

    for idx_h, horizontal_bounding_box in enumerate(
            filtered_horizontal_bounding_boxes):
        if idx_h == 0:
            continue
        hx_p, hy_p, hw_p, hh_p = filtered_horizontal_bounding_boxes[
            idx_h - 1]  #previous horizontal box
        hx_c, hy_c, hw_c, hh_c = horizontal_bounding_box

        extracted_columns = []
        for idx_v, vertical_bounding_box in enumerate(
                filtered_vertical_bounding_boxes):
            if idx_v == 0:
                continue
            vx_p, vy_p, vw_p, vh_p = filtered_vertical_bounding_boxes[
                idx_v - 1]  #previous horizontal box
            vx_c, vy_c, vw_c, vh_c = vertical_bounding_box
            table_cell = gray_image[hy_p:hy_c + hh_c, vx_p:vx_c + vw_c]

            blurred = cv2.GaussianBlur(table_cell, (5, 5), 0)
            #cv2.rectangle(color_image,(vx_p,hy_p),(vx_c+vw_c,hy_c+hh_c),(255,0,0),2)

            thresholded = cv2.threshold(blurred, 128, 255,
                                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

            im2, contours, hierarchy = cv2.findContours(
                thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)

            rect = cv2.minAreaRect(contours[0])
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            extracted = four_point_transform(
                table_cell.copy(),
                box.reshape(4, 2))[1:-1, 1:-1]  #remove 1 px from each side
            ret, extracted = cv2.threshold(extracted, 165, 255,
                                           cv2.THRESH_BINARY)
            extracted_columns.append(extracted)

            # cv2.drawContours(color_image, [contours[0]], -1, (0,255,0), 3)

        extracted_rows_columns.append(extracted_columns)

    #show_wait_destroy("horizontal_lines_img",color_image)
    return extracted_rows_columns
    def digitalrec(image_org):
        number_List = []
        # image_org = cv2.imread(image)
        # image_org = image.copy()

        # hsv
        # hsv = cv.cvtColor(image_org, cv.COLOR_BGR2HSV)
        # lower_hsv = np.array([156, 43, 46]) #156
        # upper_hsv = np.array([180, 255, 255]) # 180
        # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)
        # dst = cv.bitwise_and(image_org, image_org, mask=mask)

        #transefer image to gray
        image_gray = cv2.cvtColor(image_org, cv2.COLOR_RGB2GRAY)

        meanvalue = image_gray.mean()  # meanvalue + 65
        # print("meanvalue",meanvalue)

        ret, image_bin = cv2.threshold(image_gray, 230, 255,
                                       cv2.THRESH_BINARY)  #220
        # cv2.imshow("image_bin",image_bin)

        gray_res = cv2.resize(image_bin,
                              None,
                              fx=1,
                              fy=1,
                              interpolation=cv2.INTER_CUBIC)

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        # 去白點
        Open_img = cv2.morphologyEx(gray_res,
                                    cv2.MORPH_OPEN,
                                    kernel,
                                    iterations=1)
        # 處理線之間縫隙
        Closed_img = cv2.morphologyEx(Open_img,
                                      cv2.MORPH_CLOSE,
                                      kernel,
                                      iterations=5)

        #kernel = np.ones((3,3), np.uint8)
        #Closed_img = cv2.dilate(Closed_img, kernel, iterations = 1)

        # cv2.imshow("gray_res",gray_res)
        # cv2.imshow("Open_img",Open_img)
        # cv2.imshow("Closed_img",Closed_img)
        # cv2.waitKey()
        # cv2.destroyAllWindows()

        try:
            try:
                excep, cnts, hierarchy = cv2.findContours(
                    Closed_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                cnts, boundingBoxes = contours.sort_contours(
                    cnts, method="left-to-right")
            except Exception as e:
                print(e)

            for i in range(0, len(cnts)):
                x, y, w, h = cv2.boundingRect(cnts[i])
                cv2.rectangle(Closed_img, (x, y), (x + w, y + h), (255, 0, 0),
                              2)
                # print("x : ",x,"y : ",y,"w : ",w,"h : ",h)
                # Height > Width
                if h > w and h / w > 2.5 and h > 45:
                    # print("if x : ",x,"y : ",y,"w : ",w,"h : ",h)
                    cv2.rectangle(Closed_img, (x - 20, y), (x + w + 2, y + h),
                                  (255, 0, 0), 2)
                    Spimg = Closed_img[y + 2:y + h - 2, x - 20:x + w]
                    # cv2.imwrite("Result/Result_img/Contours1.jpg",Spimg)
                    number = Segment.TubeIdentification(Spimg)
                    #cv2.imwrite("Result/Result_img/Line1.jpg",Spimg)
                    if number != -1:
                        number_List.append(number)
                        # cv2.rectangle(image_org, (x-80,y), (x+w+2,y+h), (153,153,0), 2)
                        cv2.putText(image_org, str(number), (x - 20, y - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255),
                                    3, cv2.LINE_AA)

                elif h > w and h > 45:
                    cv2.rectangle(Closed_img, (x, y), (x + w, y + h),
                                  (255, 0, 0), 2)
                    # print("elif x : ",x,"y : ",y,"w : ",w,"h : ",h)
                    Spimg = Closed_img[y + 2:y + h - 2, x + 2:x + w - 2]
                    # cv2.imwrite("Result/Result_img/ContoursD"+str(i)+".jpg",Spimg)
                    number = Segment.TubeIdentification(Spimg)
                    #cv2.imwrite("Result/Result_img/Line9.jpg",Spimg)
                    if number != -1:
                        number_List.append(number)
                        # cv2.rectangle(image_org, (x,y), (x+w,y+h), (153,153,0), 2)
                        cv2.putText(image_org, str(number), (x, y - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255),
                                    3, cv2.LINE_AA)  # 4,5
                else:
                    pass

            # cv2.imshow("ctoimg",Closed_img)
            # cv2.imshow("ctos_img",image_org)
            # # cv2.imwrite("./image_org1.jpg",image_org)
            # # # cv2.imwrite("./Spimg.jpg",Spimg)
            # cv2.waitKey()
            # cv2.destroyAllWindows()
            if len(number_List) == 3:
                return Closed_img, image_org, int(
                    str(number_List[0]) + str(number_List[1]) +
                    str(number_List[2]))
            else:
                return Closed_img, image_org, 0
        except Exception as e:
            print(e)
            return Closed_img, image_org, number_List
Exemple #11
0
def performOCR(image):
    ref = cv2.imread('ocr_a_reference')
    ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
    ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]

    # find contours in the OCR-A image (i.e,. the outlines of the digits)
    # sort them from left to right, and initialize a dictionary to map
    # digit name to the ROI
    refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)
    refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1]
    refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
    digits = {}

    # loop over the OCR-A reference contours
    for (i, c) in enumerate(refCnts):
        # compute the bounding box for the digit, extract it, and resize
        # it to a fixed size
        (x, y, w, h) = cv2.boundingRect(c)
        roi = ref[y:y + h, x:x + w]
        roi = cv2.resize(roi, (57, 88))

        # update the digits dictionary, mapping the digit name to the ROI
        digits[i] = roi

    # initialize a rectangular (wider than it is tall) and square
    # structuring kernel
    rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
    sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

    # load the input image, resize it, and convert it to grayscale
    image = cv2.bitwise_not(image)
    image = imutils.resize(image, width=900)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # apply a tophat (whitehat) morphological operator to find light
    # regions against a dark background (i.e., the credit card numbers)
    tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)

    # compute the Scharr gradient of the tophat image, then scale
    # the rest back into the range [0, 255]
    gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
    gradX = np.absolute(gradX)
    (minVal, maxVal) = (np.min(gradX), np.max(gradX))
    gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))
    gradX = gradX.astype("uint8")

    #cv2.imshow("Image", image)
    #cv2.waitKey(0)

    # apply a closing operation using the rectangular kernel to help
    # cloes gaps in between credit card number digits, then apply
    # Otsu's thresholding method to binarize the image
    gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
    thresh = cv2.threshold(gradX, 0, 255,
                           cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

    # apply a second closing operation to the binary image, again
    # to help close gaps between credit card number regions
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
    #cv2.imshow("Image", image)
    #cv2.waitKey(0)

    # find contours in the thresholded image, then initialize the
    # list of digit locations
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    locs = []
    cv2.imshow("Image", image)
    cv2.waitKey(0)

    # loop over the contours
    for (i, c) in enumerate(cnts):
        # compute the bounding box of the contour, then use the
        # bounding box coordinates to derive the aspect ratio
        (x, y, w, h) = cv2.boundingRect(c)
        ar = w / float(h)

        # since credit cards used a fixed size fonts with 4 groups
        # of 4 digits, we can prune potential contours based on the
        # aspect ratio
        if ar > 1.5 and ar < 1.75:
            # contours can further be pruned on minimum/maximum width
            # and height
            if (w > 10 and w < 60) and (h > 10 and h < 30):
                # append the bounding box region of the digits group
                # to our locations list
                locs.append((x, y, w, h))

    # sort the digit locations from left-to-right, then initialize the
    # list of classified digits
    locs = sorted(locs, key=lambda x: x[0])
    output = []

    # loop over the 4 groupings of 4 digits
    for (i, (gX, gY, gW, gH)) in enumerate(locs):
        # initialize the list of group digits
        groupOutput = []

        # extract the group ROI of 4 digits from the grayscale image,
        # then apply thresholding to segment the digits from the
        # background of the credit card
        group = gray[gY - 5:gY + gH + 5, gX - 5:gX + gW + 5]
        group = cv2.threshold(group, 0, 255,
                              cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

        # detect the contours of each individual digit in the group,
        # then sort the digit contours from left to right
        digitCnts = cv2.findContours(group.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
        digitCnts = digitCnts[0] if imutils.is_cv2() else digitCnts[1]
        digitCnts = contours.sort_contours(digitCnts,
                                           method="left-to-right")[0]

        # loop over the digit contours
        for c in digitCnts:
            # compute the bounding box of the individual digit, extract
            # the digit, and resize it to have the same fixed size as
            # the reference OCR-A images
            (x, y, w, h) = cv2.boundingRect(c)
            roi = group[y:y + h, x:x + w]
            roi = cv2.resize(roi, (57, 88))

            # initialize a list of template matching scores
            scores = []

            # loop over the reference digit name and digit ROI
            for (digit, digitROI) in digits.items():
                # apply correlation-based template matching, take the
                # score, and update the scores list
                result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF)
                (_, score, _, _) = cv2.minMaxLoc(result)
                scores.append(score)

            # the classification for the digit ROI will be the reference
            # digit name with the *largest* template matching score
            groupOutput.append(str(np.argmax(scores)))
            #print(np.argmax(scores))
            #print(scores)

        # draw the digit classifications around the group
        cv2.rectangle(image, (gX - 5, gY - 5), (gX + gW + 5, gY + gH + 5),
                      (0, 0, 255), 2)
        cv2.putText(image, "".join(groupOutput), (gX, gY - 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
        cv2.imshow("Image", image)
        cv2.waitKey(0)

        # update the output digits list
        output.extend(groupOutput)

    # display the output credit card information to the screen
    #print("Credit Card Type: {}".format(FIRST_NUMBER[output[0]]))
    print("Rack Number: {}".format("".join(output)))
    cv2.imshow("Image", image)
    cv2.waitKey(0)
Exemple #12
0
def number_recognition():
    # load the example image
    image = cv2.imread("1.jpg")

    # pre-process the image by resizing it, converting it to
    # graycale, blurring it, and computing an edge map
    image = imutils.resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, 50, 200, 255)

    # find contours in the edge map, then sort them by their
    # size in descending order
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    displayCnt = None

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if the contour has four vertices, then we have found
        # the thermostat display
        if len(approx) == 4:
            displayCnt = approx
            break

    # extract the thermostat display, apply a perspective transform
    # to it
    warped = four_point_transform(gray, displayCnt.reshape(4, 2))
    output = four_point_transform(image, displayCnt.reshape(4, 2))

    # threshold the warped image, then apply a series of morphological
    # operations to cleanup the thresholded image

    # https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html

    # threshold = 3
    # kernel = 4
    # transform = 7

    thresh11 = cv2.threshold(warped, 0, 255,
                             cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    thresh12 = cv2.threshold(warped, 0, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    thresh21 = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
    thresh22 = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY_INV,11,2)
    thresh31 = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                cv2.THRESH_BINARY,11,2)
    thresh32 = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                cv2.THRESH_BINARY_INV,11,2)

    thresh_list = {
        "thresh11": thresh11,
        "thresh12": thresh12,
        "thresh21": thresh21,
        "thresh22": thresh22,
        "thresh31": thresh31,
        "thresh32": thresh32,
    }

    images_list = {
        "thresh11": thresh11,
        "thresh12": thresh12,
        "thresh21": thresh21,
        "thresh22": thresh22,
        "thresh31": thresh31,
        "thresh32": thresh32,
    }

    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))

    # Rectangular Kernel
    # kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))

    # Elliptical Kernel
    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))

    # Cross-shaped Kernel
    # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))

    kernel = np.ones((5, 5), np.uint8)

    for thresh in thresh_list:
        images_list["erode" + thresh] = cv2.erode(thresh_list[thresh],
                                                  kernel,
                                                  iterations=1)
        images_list["dilate" + thresh] = cv2.dilate(thresh_list[thresh],
                                                    kernel,
                                                    iterations=1)
        images_list["opening" + thresh] = cv2.morphologyEx(
            thresh_list[thresh], cv2.MORPH_OPEN, kernel)
        images_list["closing" + thresh] = cv2.morphologyEx(
            thresh_list[thresh], cv2.MORPH_CLOSE, kernel)
        images_list["gradient" + thresh] = cv2.morphologyEx(
            thresh_list[thresh], cv2.MORPH_GRADIENT, kernel)
        images_list["tophat" + thresh] = cv2.morphologyEx(
            thresh_list[thresh], cv2.MORPH_TOPHAT, kernel)
        images_list["blackhat" + thresh] = cv2.morphologyEx(
            thresh_list[thresh], cv2.MORPH_BLACKHAT, kernel)

    # for image in images_list:
    #     cv2.imshow("1", images_list[image])
    #     print(image)
    # cv2.waitKey(0)

    for image in images_list:
        cnts = cv2.findContours(images_list[image].copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        digitCnts = []

        # loop over the digit area candidates
        for c in cnts:
            # compute the bounding box of the contour
            (x, y, w, h) = cv2.boundingRect(c)

            # if the contour is sufficiently large, it must be a digit
            if w >= 15 and (h >= 30 and h <= 40):
                digitCnts.append(c)

        # sort the contours from left-to-right, then initialize the
        # actual digits themselves
        try:
            digitCnts = contours.sort_contours(digitCnts,
                                               method="left-to-right")[0]
            digits = []

            # loop over each of the digits
            for c in digitCnts:
                # extract the digit ROI
                (x, y, w, h) = cv2.boundingRect(c)
                roi = images_list[image][y:y + h, x:x + w]

                # compute the width and height of each of the 7 segments
                # we are going to examine
                (roiH, roiW) = roi.shape
                (dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
                dHC = int(roiH * 0.05)

                # define the set of 7 segments
                segments = [
                    ((0, 0), (w, dH)),  # top
                    ((0, 0), (dW, h // 2)),  # top-left
                    ((w - dW, 0), (w, h // 2)),  # top-right
                    ((0, (h // 2) - dHC), (w, (h // 2) + dHC)),  # center
                    ((0, h // 2), (dW, h)),  # bottom-left
                    ((w - dW, h // 2), (w, h)),  # bottom-right
                    ((0, h - dH), (w, h))  # bottom
                ]
                on = [0] * len(segments)

                # loop over the segments
                for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
                    # extract the segment ROI, count the total number of
                    # thresholded pixels in the segment, and then compute
                    # the area of the segment
                    segROI = roi[yA:yB, xA:xB]
                    total = cv2.countNonZero(segROI)
                    area = (xB - xA) * (yB - yA)

                    # if the total number of non-zero pixels is greater than
                    # 50% of the area, mark the segment as "on"
                    if total / float(area) > 0.5:
                        on[i] = 1

                # lookup the digit and draw it on the image
                digit = DIGITS_LOOKUP[tuple(on)]
                digits.append(digit)
        except:
            digits = 0

        scale_percent = 600  # percent of original size
        width = int(images_list[image].shape[1] * scale_percent / 100)
        height = int(images_list[image].shape[0] * scale_percent / 100)
        dim = (width, height)
        # resize image
        resized = cv2.resize(images_list[image],
                             dim,
                             interpolation=cv2.INTER_AREA)

        cv2.imwrite("after_images/" + image + '.png', resized)

        images_list[image] = None
        images_list[image] = digits

    return images_list
Exemple #13
0
    def read_numbers(x, y, w, h, max_digits=5):
        """ Method to ocr numbers.
            Returns int.
        """
        text = []

        crop = screen[y: y + h, x: x + w]
        crop = cv2.resize(crop, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)

        # # 使用阈值进行二值化
        thresh = cv2.threshold(crop, 0, 255, cv2.THRESH_OTSU)[1]
        # cv2.imwrite('thresh1.png', thresh)

        #  在阈值图像中查找轮廓
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        cnts = grab_contours(cnts)
        cnts = contours.sort_contours(cnts, method="left-to-right")[0]
        numpy.argmax
        if len(cnts) > max_digits:
            return 0

        # 循环处理每一个数字
        for c in cnts:
            scores = []

            # 计算轮廓的边界框
            (x, y, w, h) = cv2.boundingRect(c)

            # 画圈,debug 用
            # cv2.rectangle(thresh, (x, y), (x + w, y + h), (255, 255, 255), 2)
            # cv2.imshow("crop", thresh)
            # cv2.waitKey()

            # 获取ROI区域
            roi = thresh[y: y + h, x: x + w]
            # cv2.imwrite(f"{v}.png", roi)
            # cv2.imshow("crop", roi)
            # cv2.waitKey()

            # 分别计算每一段的宽度和高度
            row, col = roi.shape[:2]

            width = round(abs((50 - col)) / 2) + 5
            height = round(abs((94 - row)) / 2) + 5

            # 边界扩展
            resized = cv2.copyMakeBorder(
                roi, top=height, bottom=height, left=width, right=width, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]
            )

            # cv2.imshow("resized", resized)
            # cv2.waitKey()

            for x in range(0, 10):
                template = cv2.imread("assets/number/{}.png".format(x), 0)
                result = cv2.matchTemplate(resized, template, cv2.TM_CCOEFF_NORMED)
                (_, score, _, _) = cv2.minMaxLoc(result)
                scores.append(score)

            # 获取最大值下标
            text.append(str(numpy.argmax(scores)))

        text = "".join(text)
        return int(text)
Exemple #14
0
def calculate_areas():
    widthObject = float(input("Introduzca el largo del objeto: "))
    image = cv2.imread("Pieces/imagenes/pieces.jpg")
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    cv2.imshow("Gray", gray)
    edged = cv2.Canny(gray, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)
    # Se muestra la imagen
    cv2.imshow("Edges", edged)
    cv2.waitKey(0)
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    (cnts, _) = contours.sort_contours(cnts, method="left-to-right")
    sd = ShapeDetector()
    generalArea = 0
    # Se obtiene el area del objeto
    for c in cnts:
        if cv2.contourArea(c) < 200:
            continue
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")
        # Se ordenan los puntos del contorno en tal manera que
        # aparezcan en el siguiente orden: Esquina superior izquierda,
        # esquina superior derecha, esquina inferior izquierda y esquina
        # inferior derecha.
        box = perspective.order_points(box)
        #Esquina Superior Izquierda
        cXSI = box[0, 0]
        cYSI = box[0, 1]
        #Esquina Superior Derecha
        cYSD = box[1, 1]
        #Esquina Inferior Derecha
        cXID = box[2, 0]
        cYID = box[2, 1]
        #Esquina Inferior Izquierda
        cXII = box[0, 0]
        width2 = cXID - cXII
        height2 = cYID - cYSD
        height2 += cYSI
        width2 += cXSI
        roi = gray[int(cYSI - 5):int(height2 + 5),
                   int(cXSI - 5):int(width2 + 5)]
    cv2.imshow("ROI", roi)
    blurred = cv2.GaussianBlur(roi, (3, 3), 0)
    ret, thresh = cv2.threshold(blurred, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    ret, threshInv = cv2.threshold(blurred, 0, 255,
                                   cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Thresh Inv", threshInv)

    negativeArea = determinateArea(threshInv, -1, (0, 0, 0), image)
    normalArea = determinateArea(thresh, -1, (255, 255, 255), image)
    newW = roi.shape[1] - 1
    ppmm = newW / (widthObject * mm)
    height = widthObject / (roi.shape[1] / roi.shape[0])
    print(roi.shape)
    print("Height: ", height)
    areaPixels = roi.shape[0] * roi.shape[1]
    areaMM = ppmm**2
    print("PPmm: ", ppmm)
    cv2.drawContours(image, [box.astype("int")], -1, (255, 255, 0), 2)
    generalArea = normalArea + negativeArea
    irregularArea = generalArea - negativeArea
    print("Area en mm: ", areaMM)
    print("Area en pixeles: ", areaPixels / areaMM)
    print("Area Total: ", generalArea / areaMM)
    print("Area Negativa: ", negativeArea / areaMM)
    print("Irregular area: ", irregularArea / areaMM)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #15
0
def measure_dim(loc):

    # ap = argparse.ArgumentParser()
    # ap.add_argument("-i", "--image", required = True, help="path to input image")
    # ap.add_argument("-w", "--width", type=float, required=True, help="width of the object")
    # args = vars(ap.parse_args())
    image = cv2.imread(loc)
    # image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (7, 7), 0)

    edged = cv2.Canny(gray, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    (cnts, _) = contours.sort_contours(cnts)
    pixelsPerMetric = None

    for c in cnts:
        if cv2.contourArea(c) < 100:
            continue
        orig = image.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")
        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 255), 2)

    for (x, y) in box:
        cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

    (tl, tr, br, bl) = box
    (tltrX, tltrY) = midpoint(tl, tr)
    (blbrX, blbrY) = midpoint(bl, br)
    (tlblX, tlblY) = midpoint(tl, bl)
    (trbrX, trbrY) = midpoint(tr, br)

    #Draw the midpoints on the image
    cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

    #intersect the lines between midpoints
    cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
             (255, 255, 255), 2)
    cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
             (255, 255, 255), 2)
    #compute the Euclidean distance between midpoints
    dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
    dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
    #We initialize the pixels per metric has not been established
    if pixelsPerMetric is None:
        pixelsPerMetric = dB / 750
    dimA = dA / pixelsPerMetric
    dimB = dB / pixelsPerMetric
    #to compute the final object size
    cv2.putText(orig, "{:.1f} feet".format(dimA * 10),
                (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
                0.65, (255, 0, 0), 2)
    cv2.putText(orig, "{:.1f} feet".format(dimB),
                (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
                (255, 0, 0), 2)
    area = dimA * dimB
    dims = area
    print(f'The dims: {dims}')
    return dims
Exemple #16
0
	"T", "U", "A", "D"]
 
# load the reference MICR image from disk, convert it to grayscale,
# and threshold it, such that the digits appear as *white* on a
# *black* background
ref = cv2.imread(args["reference"])
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
ref = imutils.resize(ref, width=400)
ref = cv2.threshold(ref, 0, 255, cv2.THRESH_BINARY_INV |
	cv2.THRESH_OTSU)[1]
  # find contours in the MICR image (i.e,. the outlines of the
# characters) and sort them from left to right
refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1]
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
# extract the digits and symbols from the list of contours, then
# initialize a dictionary to map the character name to the ROI
refROIs = extract_digits_and_symbols(ref, refCnts,
	minW=10, minH=20)[0]
chars = {}
 
# loop over the reference ROIs
for (name, roi) in zip(charNames, refROIs):
	# resize the ROI to a fixed size, then update the characters
	# dictionary, mapping the character name to the ROI
	roi = cv2.resize(roi, (36, 36)) 
	chars[name] = roi
  # initialize a rectangular kernel (wider than it is tall) along with
# an empty list to store the output of the check OCR
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))
# loop over the contours
for c in cnts:
	# compute the bounding box of the contour, then use the
	# bounding box to derive the aspect ratio
	(x, y, w, h) = cv2.boundingRect(c)
	ar = w / float(h)

	# in order to label the contour as a question, region
	# should be sufficiently wide, sufficiently tall, and
	# have an aspect ratio approximately equal to 1
	if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
		questionCnts.append(c)

# sort the question contours top-to-bottom, then initialize
# the total number of correct answers
questionCnts = contours.sort_contours(questionCnts,
	method="top-to-bottom")[0]
correct = 0

# each question has 5 possible answers, to loop over the
# question in batches of 5
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
	# sort the contours for the current question from
	# left to right, then initialize the index of the
	# bubbled answer
	cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
	bubbled = None

	# loop over the sorted contours
	for (j, c) in enumerate(cnts):
		# construct a mask that reveals only the current
		# "bubble" for the question
def image_callback(ros_image):
    print 'got an image'
    global bridge, midpoint

    #convert ros_image into an opencv image

    frame0 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame1 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame2 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame3 = bridge.imgmsg_to_cv2(ros_image, "bgr8")

    (h, w, d) = frame1.shape
    # print(frame1.shape)
    pts = deque(maxlen=args["buffer"])
    counter = 0
    (dX, dY) = (0, 0)
    direction = ""

    time.sleep(1 / 42)
    cv2.rectangle(frame0, (0, 0), (25, 25), (0, 0, 255), 2)

    diff_size = cv2.absdiff(frame0, frame3)

    diff_coordinates = cv2.absdiff(frame1, frame2)

    #image processing for coordinates
    gray = cv2.cvtColor(diff_coordinates, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(thresh, None, iterations=1)

    # cv2.imshow("imaged dilated", dilated.copy())
    # cv2.imshow("frame1 copy", frame1.copy())

    # image processing for SIZE
    gray_size = cv2.cvtColor(diff_size, cv2.COLOR_BGR2GRAY)
    blur_size = cv2.GaussianBlur(gray_size, (5, 5), 0)
    _, thresh_size = cv2.threshold(blur_size, 20, 255, cv2.THRESH_BINARY)
    dilated_size = cv2.dilate(thresh_size, None, iterations=1)

    # cv2.imshow("dilated size", dilated_size)

    edged = cv2.Canny(diff_size, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # cv2.imshow("edged copy", edged.copy())

    # HERE THERE ARE TWO CONTOUR CAPTURING METHODS. CON FOR THE COORDINATES AND CNTS FOR THE SIZE

    _, con, _ = cv2.findContours(dilated, cv2.RETR_TREE,
                                 cv2.CHAIN_APPROX_SIMPLE)

    # this handles the contours for the size. note the contours need to be sorted so that we can use our square as ref

    # compared edged to dilated_size and dilated_size performs better
    cnts = cv2.findContours(dilated_size, cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    (cnts, _) = contours.sort_contours(cnts)

    # cv2.imshow("image edged,", edged)

    pixelsPerMetric = None

    for (i, c) in enumerate(cnts):

        if cv2.contourArea(c) < 250:
            continue

        box = cv2.minAreaRect(c)
        box = cv2.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)

        for (x, y) in box:
            cv2.circle(frame1, (int(x), int(y)), 2, (0, 0, 255), -1)

        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        cv2.circle(frame1, (int(tltrX), int(tltrY)), 1, (255, 0, 0), -1)
        cv2.circle(frame1, (int(blbrX), int(blbrY)), 1, (255, 0, 0), -1)
        cv2.circle(frame1, (int(tlblX), int(tlblY)), 1, (255, 0, 255), -1)
        cv2.circle(frame1, (int(trbrX), int(trbrY)), 1, (255, 0, 255), -1)

        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))  # height in pixels
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))  # width in pixels

        if pixelsPerMetric is None:
            #     # pixelsPerMetric = dB / args["width"]
            pixelsPerMetric = 0.22  # top right contour is about a quarter inch by a quarter inch # his was found by meaasuing that 106 px were in an inch
        #250
        dimA = dA / pixelsPerMetric  # pixels divided by the appx pixel size in inches of the first countour
        dimB = dB / pixelsPerMetric

        appx_area = dA * dB

        cv2.putText(frame1, '{:.1f}" px in x'.format(dimB),
                    (int(tltrX - 15), int(tltrY - 10)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
        cv2.putText(frame1, '{:.1f}" px in y'.format(dimA),
                    (int(trbrX - 120), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
                    0.4, (0, 0, 255), 1)
        cv2.putText(frame1, "{:.1f} pxsq".format(appx_area),
                    (int(trbrX - 140), int(trbrY + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)

        # below is for coordinates
    if len(con) > 0:

        c = max(con, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        if radius > 0:

            cv2.circle(frame1, center, 1, (0, 0, 255), -1)
            pts.appendleft(center)

    for c in con:
        M = cv2.moments(c)

        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])

        ex = (Decimal(cX) - Decimal(frame_x)) / Decimal(frame_x)
        why = -1 * (Decimal(cY) - Decimal(frame_y)) / Decimal(frame_y)

        (x, y, w, h) = cv2.boundingRect(c)

        if cv2.contourArea(c) < 250:
            continue
        cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame1, 'UTEP: {}'.format('DETECTED'), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255), 3)
        cv2.putText(frame1, "dx: {},        dy: {}".format(ex, why),
                    (10, frame1.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 3)
        cv2.putText(frame1, "dx: {},        dy: {}".format(cX, cY),
                    (10, frame1.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 3)

        for i in np.arange(1, len(pts)):
            if pts[i - 1] is None or pts[i] is None:
                continue

            if counter >= 10 and i == 1 and pts[-10] is not None:
                dX = pts[-10][0] - pts[i][0]
                dY = pts[-10][1] - pts[i][1]
                (dirX, dirY) = ("", "")

                if np.abs(dX) > 20:
                    dirX = "east" if np.sign(dX) == 1 else "west"

                if np.abs(dY) > 20:
                    dirY = "Nort" if np.sign(dY) == 1 else "south"

                if dirX != "" and dirY != "":
                    direction = "{}-{}.format" (dirY, dirX)

                else:
                    direction = dirX if dirX != "" else dirY
            thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 1.5)
            cv2.line(frame1, pts[i - 1], pts[i], (255, 0, 0), thickness)
    # out.write(frame1)
    cv2.imshow('feed', frame1)

    frame1 = frame2
    frame0 = frame3
    frame2 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame3 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    #
    # cv2.imshow("image window", frame1)
    cv2.waitKey(3)
Exemple #19
0
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# loop over the (unsorted) contours and label them
for (i, c) in enumerate(cnts):
    orig = contours.label_contour(orig, c, i, color=(240, 0, 159))

# show the original image
cv2.imshow("Original", orig)

# loop over the sorting methods
for method in ("left-to-right", "right-to-left", "top-to-bottom",
               "bottom-to-top"):
    # sort the contours
    (cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)
    clone = image.copy()

    # loop over the sorted contours and label them
    for (i, c) in enumerate(cnts):
        sortedImage = contours.label_contour(clone, c, i, color=(240, 0, 159))

    # show the sorted contour image
    cv2.imshow(method, sortedImage)

# wait for a keypress
cv2.waitKey(0)

# 返回图像列表
from imutils import paths
Exemple #20
0
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.copyMakeBorder(gray, 20, 20, 20, 20,
		cv2.BORDER_REPLICATE)

	# threshold the image to reveal the digits
	thresh = cv2.threshold(gray, 0, 255,
		cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

	# find contours in the image, keeping only the four largest ones,
	# then sort them from left-to-right
	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4]
	cnts = contours.sort_contours(cnts)[0]

	# initialize the output image as a "grayscale" image with 3
	# channels along with the output predictions
	output = cv2.merge([gray] * 3)
	predictions = []

	# loop over the contours
	for c in cnts:
		# compute the bounding box for the contour then extract the
		# digit
		(x, y, w, h) = cv2.boundingRect(c)
		roi = gray[y - 5:y + h + 5, x - 5:x + w + 5]

		# pre-process the ROI and classify it then classify it
		roi = preprocess(roi, 28, 28)
Exemple #21
0
    # ref = cv2.imread(micrpath + micrfile, 0)
    ref = cv2.imread(
        "D:\\iC4_Pro_Project\\ic4_pro_ocr\\micrfolder\\micr_e13b_reference.png"
    )
    # ref = cv2.imread(micrfile)
    ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
    ref = imutils.resize(ref, width=400)
    ref = cv2.threshold(ref, 0, 255,
                        cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # find contours in the MICR image (i.e,. the outlines of the
    # characters) and sort them from left to right
    refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)
    refCnts = imutils.grab_contours(refCnts)
    refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]

    # extract the digits and symbols from the list of contours, then
    # initialize a dictionary to map the character name to the ROI
    # refROIs = extract_digits_and_symbols(ref, refCnts,		# initial line of code
    # 	minW=10, minH=20)[0]									# initial line of code
    refROIs = extract_digits_and_symbols(ref, refCnts, minW=10, minH=20)[0]
    chars = {}

    # loop over the reference ROIs
    for (name, roi) in zip(charNames, refROIs):
        # resize the ROI to a fixed size, then update the characters
        # dictionary, mapping the character name to the ROI
        # roi = cv2.resize(roi, (36, 36)) 		# initial line of code
        roi = cv2.resize(roi, (36, 36))
        chars[name] = roi
Exemple #22
0
def get_plant_height(image, zoom_ratio):
    image_height = get_height(image)
    image_width = get_width(image)

    # define range of orange color in HSV

    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    hsv2 = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    hsv3 = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    lower_orange = np.array([11, 43, 46])
    upper_orange = np.array([26, 255, 255])
    lower_purple = np.array([100, 50, 50])
    upper_purple = np.array([200, 200, 200])
    lower_green = np.array([30, 200, 100])
    upper_green = np.array([100, 255, 190])
    mask = cv2.inRange(hsv, lower_orange, upper_orange)
    mask2 = cv2.inRange(hsv2, lower_green, upper_green)
    mask3 = cv2.inRange(hsv3, lower_purple, upper_purple)
    res = cv2.add(mask, mask3)

    cv2.imshow('res', res)
    a = np.zeros(get_width(image))
    stem_top = []
    stem_bottom = []
    cv2.imshow('image', image)

    # corp_threshold = int(560 * (4 / 5))
    # print(a)
    # mask[0:corp_threshold] = a
    # mask[550:560] = a

    if zoom_ratio < 0.25:
        mask = cv2.erode(mask, None, iterations=3)
        mask = cv2.dilate(mask, None, iterations=2)
        j = 0
        cv2.imshow('mask', mask)
        for i in range(image_height):
            for k in range(image_width):
                if mask[i][k] == 255:
                    stem_top = [k, i]

                    # print(stem_top)

                    j = 1
                    break
            if j == 1:
                break
        stem_bottom = [145, 535]
        orig = image.copy()
    else:

        kernel = np.ones((10, 1), np.uint8)  # 1, 13

        # cv2.imshow("mask2", mask)

        res = cv2.erode(res, None, iterations=1)
        res[0:100] = a
        res[493:image_height] = a

        # print(res.shape)

        cv2.imshow('res1', res)
        res = cv2.erode(res, kernel, iterations=1)
        cv2.imshow('res2', res)
        kernel2 = np.ones((10, 1), np.uint8)
        res = cv2.dilate(res, kernel2, iterations=2)
        cv2.imshow('res2', res)
        j = 0
        for i in range(image_height):
            for k in range(image_width):
                if res[i][k] == 255:
                    stem_top = [k, i]

                    # print(stem_top)

                    j = 1
                    break
            if j == 1:
                break

        mask2[0:445] = a
        kernel = np.ones((1, 10), np.uint8)
        mask2 = cv2.erode(mask2, kernel, iterations=1)

        # cv2.imshow("mask", mask2)

        mask2 = cv2.dilate(mask2, kernel, iterations=2)

        # cv2.imshow("mask1", mask2)

        cnts = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        (cnts, _) = contours.sort_contours(cnts)
        for c in cnts:
            if cv2.contourArea(c) < 10:
                continue
            orig = image.copy()
            box = cv2.minAreaRect(c)
            box = \
                (cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box))
            box = np.array(box, dtype='int')
            box = perspective.order_points(box)
            cv2.drawContours(orig, [box.astype('int')], -1, (0, 255, 0), 2)
            (tl, tr, br, bl) = box

            midX = (tl[0] + tr[0]) / 2
            midY = (tl[1] + bl[1]) / 2
            stem_bottom = [midX, midY]

    plant_height = dist.euclidean((stem_top[0], stem_top[1]),
                                  (stem_bottom[0], stem_bottom[1]))
    real_plant_height = plant_height * zoom_ratio
    real_plant_height = round(real_plant_height, 2)
    cv2.line(orig, (int(stem_top[0]), int(stem_top[1])),
             (int(stem_bottom[0]), int(stem_bottom[1])), (0, 0, 255), 2)
    cv2.putText(
        orig,
        '{:.2f}'.format(real_plant_height),
        (int(stem_top[0]), int(stem_top[1])),
        cv2.FONT_HERSHEY_SIMPLEX,
        0.65,
        (0, 0, 0),
        2,
    )
    cv2.imshow('orig', orig)
    cv2.waitKey(0)
    return real_plant_height
def detectHeight(image_path, height):
    # Load image
    image = cv2.imread(image_path)
    #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Converts to grayscale
    #gray = cv2.GaussianBlur(image, (7, 7), 0) # Blur image slightly

    # Perform edge detection
    edged = cv2.Canny(image, 50, 100)

    # Perform dilation and erosion to close gaps between object edges
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # Find contours in edge map
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    # Sort the contours from left-to-right
    #this is jank as hell but it should handle the error we're getting
    try:
        (cnts, _) = contours.sort_contours(cnts)
    except:
        return 0
    pixelsPerMetric = None

    heights = []

    # Loop over each contour
    for c in cnts:
        # Ignore if the contour is not sufficiently large
        if cv2.contourArea(c) < 100:
            continue

        # Compute rotated bounding box of contour
        orig = image.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")

        # Order the points in the contour such that they appear
        # in top-left, top-right, bottom-right, and bottom-left order,
        box = perspective.order_points(box)

        # Unpack the ordered bounding box, then compute the midpoint
        # between the top-left and top-right coordinates and
        # the midpoint between bottom-left and bottom-right coordinates
        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        # Compute the midpoint between the top-left and top-right points
        # and the midpoint between the top-right and bottom-right
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # Compute the Euclidean distance between the midpoints
        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

        # If the pixels per metric has not been initialized, then
        # compute it as the ratio of pixels to centimetres
        if pixelsPerMetric is None:
            pixelsPerMetric = dA / float(height)

        # Compute the size of the object
        dimA = dA / pixelsPerMetric  # height
        dimB = dB / pixelsPerMetric  # width

        heights.append(dimA)

        # Draw outline of the rotated bounding box
        # cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)

        # # Loop over the points and draw them
        # for (x, y) in box:
        #     cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

        # # Draw the midpoints on the image
        # cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

        # # Draw lines between the midpoints
        # cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
        #     (255, 0, 255), 2)
        # cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
        #     (255, 0, 255), 2)

        # # Draw the object sizes on the image
        # cv2.putText(orig, "{:.1f}cm".format(dimA),
        #     (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
        #     0.65, (255, 255, 255), 2)
        # cv2.putText(orig, "{:.1f}cm".format(dimB),
        #     (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
        #     0.65, (255, 255, 255), 2)

        # # Show the output image
        # cv2.imshow("Image", orig)
        # cv2.waitKey(0)

    # Currently returns height of tallest object (excluding first item)
    ''' hotfixing the hotfix?
    if (len(heights) == 0):
        return 0
    elif (len(heights) == 1):
        return heights[0]
    return max(heights[1:]) '''
    if (len(heights) == 0):
        return 0
    elif (len(heights) == 1):
        return heights[0]
    else:
        return max(heights[1:])
Exemple #24
0
def draw_inflorescence(res, zoom_ratio):
    # gray scale

    gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    # Gaussian filter

    gray = cv2.GaussianBlur(gray, (7, 7), 0)
    # detect the edge

    edged = cv2.Canny(gray, 50, 100)
    # close the gap between edges

    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)
    # find contour of the object

    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    # sort the contour from left to right

    (cnts, _) = contours.sort_contours(cnts)

    # initialize 'pixels per metric'

    pixelsPerMetric = None

    # Loop through each contour

    for c in cnts:
        # If the area of the current contour is too small, consider it may be noise, and ignore it

        if cv2.contourArea(c) < 50:
            continue
        # Calculate the outcut rectangle according to the contour of the object

        orig = image.copy()
        box = cv2.minAreaRect(c)
        box = \
            (cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box))
        box = np.array(box, dtype='int')

        # Sort the contour points according to the order of top-left, top-right, bottom-right and bottom-left,
        # and draw the BB of outer tangent, which is represented by green line

        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype('int')], -1, (0, 0, 255), 1)

        # Draw the four vertices of BB, represented by small red circles

        for (x, y) in box:
            cv2.circle(orig, (int(x), int(y)), 1, (0, 0, 255), -1)

        # Calculate the center point coordinates of top-left
        # and top-right and bottom-left and bottom-right respectively

        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        # Calculate the center point coordinates of top-left and top-right and top-righ and bottom-right respectively

        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # Draw the center point of the four edges of BB, represented by a small blue circle
        # cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
        # cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

        # Draw a line between the center points, indicated by a magenta line
        # cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
        # ....(255, 0, 255), 2)
        # cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
        # ....(255, 0, 255), 2)

        # Calculate the Euclidean distance between two center points, that is, the distance of the picture

        height = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
        width = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

        # Initialize the measurement index value, the width of the reference object in the picture
        #  has been calculated by Euclidean distance, and the actual size of the reference object is known

        # if pixelsPerMetric is None:
        # ....pixelsPerMetric = dB / args["width"]

        # Calculate the actual size (width and height) of the target, expressed in feet

        real_height = round(height * zoom_ratio, 2)
        real_width = round(width * zoom_ratio, 2)

        # Draw the result in the image

        cv2.putText(
            orig,
            '{:.1f}'.format(real_width),
            (int(tltrX - 15), int(tltrY - 10)),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.65,
            (0, 0, 0),
            2,
        )
        cv2.putText(
            orig,
            '{:.1f}'.format(real_height),
            (int(trbrX + 10), int(trbrY)),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.65,
            (0, 0, 0),
            2,
        )

        # show result
        cv2.imshow('Orig', orig)
        cv2.waitKey(0)
    return (real_height, real_width)
Exemple #25
0
def getSealInputArray():
    features_array = []
    featuresarray = []
    root = tk.Tk()
    file_path = ['E:\Level4_Project\WritingFeatures.csv']
    file_path1 = filedialog.askopenfilenames(parent=root, title='Choose a file')
    print(file_path1)
    file_path = root.tk.splitlist(file_path1)
    # print (file_path)
    for imagePath in file_path:
        print(imagePath)
        img = cv2.imread(imagePath)
        if img is None:
            print("Enter Image")
        def midpoint(ptA, ptB):
            return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
        # construct the argument parse and parse the arguments
        ap = argparse.ArgumentParser()
        args = vars(ap.parse_args())
        # load the image, convert it to grayscale, and blur it slightly
        # ------------------------------------------------------------------------------------------image = cv2.imread(args["image"])
        image = cv2.imread(imagePath)
        # cv2.namedWindow("Input Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Input Image", image)
        # cv2.waitKey(0)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # cv2.namedWindow("Gray Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Gray Image", gray)
        # cv2.waitKey(0)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        # cv2.namedWindow("GaussianBlur Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("GaussianBlur Image", gray)
        # cv2.waitKey(0)
        ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
                                    cv2.THRESH_BINARY, 11, 2)
        # cv2.namedWindow("Adaptive Threshold Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Adaptive Threshold Image", th2)
        # cv2.waitKey(0)
        th3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
                                    cv2.THRESH_BINARY, 11, 2)
        # cv2.namedWindow("Adaptive Threshold GAUSSIAN Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Adaptive Threshold GAUSSIAN Image", th3)
        # cv2.waitKey(0)
        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 50, 100)
        # cv2.namedWindow("Edge Detection Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Edge Detection Image", edged)
        # cv2.waitKey(0)
        edged = cv2.dilate(edged, None, iterations=1)
        # cv2.namedWindow("Dilate Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Dilate Image", edged)
        # cv2.waitKey(0)
        edged = cv2.erode(edged, None, iterations=1)
        # cv2.namedWindow("Erode Image", cv2.WINDOW_NORMAL)
        # cv2.imshow("Erode Image", edged)
        # cv2.waitKey(0)
        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        # sort the contours from left-to-right and initialize the
        # 'pixels per metric' calibration variable
        (cnts, _) = contours.sort_contours(cnts)
        pixelsPerMetric = None
        # loop over the contours individually
        # print(cnts)
        for c in cnts:
            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < 1000:
                continue
            area = cv2.contourArea(c)
            Perimeter = cv2.arcLength(c, True)
            M = cv2.moments(c)
            centroid_x = int(M['m10'] / M['m00'])
            centroid_y = int(M['m01'] / M['m00'])
            x, y, w, h = cv2.boundingRect(c)
            aspect_ratio = float(w) / h
            # Extent is the ratio of contour area to bounding rectangle area.
            rect_area = w * h
            extent = float(area) / rect_area
            hull = cv2.convexHull(c)
            hull_area = cv2.contourArea(hull)
            solidity = float(area) / hull_area
            # Equivalent Diameter is the diameter of the circle whose area is same as the contour area.
            equi_diameter = np.sqrt(4 * area / np.pi)
            # the angle at which object is directed.
            (x, y), (MA, ma), angle = cv2.fitEllipse(c)
            mask = np.zeros(edged.shape, np.uint8)
            cv2.drawContours(mask, [c], 0, 255, -1)
            pixelpoints = np.transpose(np.nonzero(mask))
            approx = cv2.approxPolyDP(c, 0.1 * cv2.arcLength(c, True), True)
            # features_array = []
            print("Contour Area:", area)
            f1 = round(area, 8)
            features_array.append(f1)

            f2 = round(Perimeter, 8)
            print("Contour Perimeter:", Perimeter)
            features_array.append(f2)

            f3 = round(centroid_x, 8)
            print("Cenrtoid x:", f3)

            f4 = round(centroid_y, 8)
            # features_array.append(centroid_x)
            print("Cenrtoid y:", f4)

            f5 = round(aspect_ratio, 8)
            # atures_array(centroid_y)
            print("Aspect Ratio(The ratio of width to height):", aspect_ratio)
            features_array.append(f5)

            f6 = round(extent, 8)
            print("Extent(The Ratio of contour area to bounding rectangle area):", extent)
            features_array.append(f6)

            f7 = round(hull_area, 8)
            print("Hull Area(The minimum set of points that define a polygon containing all the points):",
                  hull_area)
            features_array.append(f7)

            f8 = round(solidity, 8)
            print("Solidity Area(The ratio of contour area to its convex hull area):", solidity)
            features_array.append(f8)

            f9 = round(equi_diameter, 8)
            print("Equivalent Diameter(The diameter of the circle whose area is same as the contour area):",
                  equi_diameter)
            features_array.append(f9)

            f10 = round(angle, 8)
            print("Orientation(The angle at which object is directed):", angle)
            features_array.append(f10)
            # print("Pixel Points(All the points which comprises that object):", pixelpoints)
            # print("approximation Epsilon:", approx)
            f11 = round(rect_area, 8)
            features_array.append(f11)

            orig = image.copy()
            for i in enumerate(c):
                rect = cv2.boundingRect(c)
                # box = cv2.minAreaRect(c)
                x, y, w, h = rect
                box = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cropped = image[y: y + h, x: x + w]
                # cv2.imshow("Show Boxes", cropped)
                # cv2.waitKey(0)
                # cv2.destroyAllWindows()
                cv2.imwrite("x" + str(i) + ".jpg", cropped)
                break
                h, w = np.shape(area)
            # print image properties.
            print("Width of contour area:", str(w))

            # = round(str(w), 8)
            features_array.append(w)

            # f12 = round(str(h), 8)
            print("Height of contour area:", str(h))
            features_array.append(h)

            print(features_array)
            featuresarray = []
            self.featuresarray = features_array
            test_user = getCompanyOfSeal.getCompanySealOfTestInput(imagePath)
            print(test_user)
            return (features_array, test_user)

            #  with open(r'E:\Level4_Project\WritingFeatures.csv', mode='a+', encoding='UTF-8', errors='strict', buffering=1) as csvFile:
            # with open(r'E:\Level4_Project\WritingFeatures.csv', mode='a+', newline='') as csvFile:
            #     writer = csv.writer(csvFile)
            #     writer.writerow(features_array)
            #
            # csvFile.close()

            # print("Writing complete")  # compute the rotated bounding box of the contour
            box = cv2.minAreaRect(c)
            print(box)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
            box = np.array(box, dtype="int")
            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            box = perspective.order_points(box)
            cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
            # loop over the original points and draw them
            for (x, y) in box:
                cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
            # unpack the ordered bounding box, then compute the midpoint
            # between the top-left and top-right coordinates, followed by
            # the midpoint between bottom-left and bottom-right coordinates
            (tl, tr, br, bl) = box
            (tltrX, tltrY) = midpoint(tl, tr)
            (blbrX, blbrY) = midpoint(bl, br)
            # compute the midpoint between the top-left and top-right points,
            # followed by the midpoint between the top-righ and bottom-right
            (tlblX, tlblY) = midpoint(tl, bl)
            (trbrX, trbrY) = midpoint(tr, br)
            (x1, y1) = tl
            (x2, y2) = tr
            (x3, y3) = br
            (x4, y4) = bl
            # draw the midpoints on the image
            cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
            # draw lines between the midpoints
            cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                     (255, 0, 255), 2)
            cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                     (255, 0, 255), 2)
            # compute the Euclidean distance between the midpoints
            dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
            dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
            # if the pixels per metric has not been initialized, then
            # compute it as the ratio of pixels to supplied metric
            # (in this case, inches)
            if pixelsPerMetric is None:
                pixelsPerMetric = dB
            # compute the size of the object
            dimA = dA / pixelsPerMetric
            dimB = dB / pixelsPerMetric
            # draw the object sizes on the image
            cv2.putText(orig, "{:.1f}in".format(dimA),
                        (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
                        0.65, (255, 255, 255), 2)
            cv2.putText(orig, "{:.1f}in".format(dimB),
                        (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
                        0.65, (255, 255, 255), 2)
            # show the output image
            # cv2.namedWindow("Object Detected Image", cv2.WINDOW_NORMAL)
            # cv2.imshow("Object Detected Image", orig)
            # cv2.waitKey(0)
            # cv2.namedWindow("Segmented Image", cv2.WINDOW_NORMAL)
            # cv2.imshow("Segmented Image", cropped)
            # cv2.waitKey(0)
            cv2.destroyAllWindows()
            break
    def process_rgb_image(self, rgb_image):
        central_list = []
        sumuv = []

        strresuv = {}
        uvuv = uv()
        strpoint_uv = structure_point()

        rgb = rgb_image
        #print "rgb_image\n",rgb
        t = 0
        if rgb_image is not None:
            # ap = argparse.ArgumentParser()
            # ap.add_argument("-i", "--image", required=True,
            #                 help="path to the image file")
            # args = vars(ap.parse_args())
            # load the image, convert it to grayscale, and blur it
            # image = cv2.imread(args["image"])

            # image = cv2.imread("1.jpg")
            image = rgb_image
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            blurred = cv2.GaussianBlur(gray, (11, 11), 0)
            # threshold the image to reveal light regions in the
            # blurred image
            thresh = cv2.threshold(blurred, 205, 255, cv2.THRESH_BINARY)[1]
            # cv2.imshow("Image_Gaussian", thresh)
            # perform a series of erosions and dilations to remove
            # any small blobs of noise from the thresholded image
            thresh = cv2.erode(thresh, None, iterations=2)
            thresh = cv2.dilate(thresh, None, iterations=4)
            # perform a connected component analysis on the thresholded
            # image, then initialize a mask to store only the "large"
            # components

            labels = measure.label(thresh, neighbors=8, background=0)
            mask = np.zeros(thresh.shape, dtype="uint8")
            # print("labels",labels)
            # loop over the unique components
            for label in np.unique(labels):
                # if this is the background label, ignore it
                if label == 0:
                    continue

                # otherwise, construct the label mask and count the
                # number of pixels
                labelMask = np.zeros(thresh.shape, dtype="uint8")
                labelMask[labels == label] = 255
                numPixels = cv2.countNonZero(labelMask)
                print("numPixels", numPixels)
                # if the number of pixels in the component is sufficiently
                # large, then add it to our mask of "large blobs"
                if numPixels > 10:
                    mask = cv2.add(mask, labelMask)
            # find the contours in the mask, then sort them from left to
            # right
            cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            cnts = contours.sort_contours(cnts)[0]
            print("cnts", cnts)
            # loop over the contours
            for (i, c) in enumerate(cnts):
                # draw the bright spot on the image
                (x, y, w, h) = cv2.boundingRect(c)
                ((cX, cY), radius) = cv2.minEnclosingCircle(
                    c)  # seraching the minimum square circle
                print("The minimum circle center", (cX, cY))

                cv2.circle(image, (int(cX), int(cY)), int(radius), (0, 0, 255),
                           3)
                # print("")
                cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
                strresuv.update({i + 1: [int(cX), int(cY)]})
                # strresuv.append({i+1:[cX,cY]})
            print "srereuv", strresuv
            if len(strresuv) == 4:
                strpoint_uv.tile_id = 0
                strpoint_uv.f1th_uv.uvinfo = strresuv[1]
                strpoint_uv.s2th_uv.uvinfo = strresuv[2]
                strpoint_uv.t3th_uv.uvinfo = strresuv[3]
                strpoint_uv.f4th_uv.uvinfo = strresuv[4]
                self.strpoint_pub.publish(strpoint_uv)
            else:
                print "please wait ----------"

            #     strresuv.append([i+1,(cX,cY)])
            # if len(strresuv) != 0:
            #     self.strpoint_pub.publish([self.resuv[-1][0], self.resuv[-1][1]])
            # else:
            #     print "wait detecting point-------"
            cv2.namedWindow('Structure_point_detecting_window_edges',
                            cv2.WINDOW_NORMAL)
            cv2.imshow('Structure_point_detecting_window_edges', thresh)

            cv2.namedWindow('Structure_point_window', cv2.WINDOW_NORMAL)
            cv2.imshow('Structure_point_window', image)
            cv2.waitKey(8)

            # # 再将opencv格式额数据转换成ros image格式的数据发布
            try:
                self.image_pub.publish(
                    self.bridge.cv2_to_imgmsg(rgb_image, "bgr8"))
            except CvBridgeError as e:
                print e
        return central_list
Exemple #27
0
# load the reference OCR-A image from disk, convert it to grayscale,
# and threshold it, such that the digits appear as *white* on a
# *black* background
# and invert it, such that the digits appear as *white* on a *black*
ref = cv2.imread(args["reference"])
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]

# find contours in the OCR-A image (i.e,. the outlines of the digits)
# sort them from left to right, and initialize a dictionary to map
# digit name to the ROI
refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
                           cv2.CHAIN_APPROX_SIMPLE)
refCnts = imutils.grab_contours(refCnts)
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
digits = {}

# loop over the OCR-A reference contours
for (i, c) in enumerate(refCnts):
    # compute the bounding box for the digit, extract it, and resize
    # it to a fixed size
    (x, y, w, h) = cv2.boundingRect(c)
    roi = ref[y:y + h, x:x + w]
    roi = cv2.resize(roi, (57, 88))

    # update the digits dictionary, mapping the digit name to the ROI
    digits[i] = roi

# initialize a rectangular (wider than it is tall) and square
# structuring kernel
def horisontal_lines(v1):
    y_len, x_len = v1.shape
    v1mask = v1.copy()
    v1cleansheet = v1.copy()
    v1cleansheet[:, :] = 0

    __, ctsk, hierarchy = cv2.findContours(v1.copy(), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    bigC = []
    for c in ctsk:
        x, y, w, h = cv2.boundingRect(c)
        #if h>15:
        if x > 10:
            bigC.append(c)

    (ctsk_index_sort, _) = contours_.sort_contours(bigC,
                                                   method="top-to-bottom")
    cX = []
    cY = []
    ct = []
    ht = []
    wt = []
    for c in ctsk_index_sort:
        x, y, w, h = cv2.boundingRect(c)
        M = cv2.moments(c)
        try:
            cX.append(int(M["m10"] / M["m00"]))
            cY.append(int(M["m01"] / M["m00"]))
            ct.append(c)
            ht.append(h)
            wt.append(w)
        except:
            pass

    labelcXcY = []
    m = 10000
    nblines = 0
    while len(cX) > 2 and nblines < len(ctsk):
        index = np.argsort(wt)[::-1]
        nblines = nblines + 1
        [vx, vy, x, y] = cv2.fitLine(ct[index[0]], cv2.DIST_L2, 0, 0.01, 0.01)
        #cv2.line(v1mask, (x-m*vx[0], y-m*vy[0]), (x+m*vx[0], y+m*vy[0]), (255,255,255),10)

        pt = []
        xyz = []
        for i in range(len(cX)):
            ss, ss1 = pnt2line((cX[i], cY[i], 0),
                               (x - round(m * vx[0]), y - round(m * vy[0]), 0),
                               (x + round(m * vx[0]), y + round(m * vy[0]), 0))
            pt.append(ss)
            xyz.append([(cX[i]), (cY[i]), (0)])

        izd = np.where(np.asarray(pt) < 5)
        izd = np.argsort(pt)[:np.min((2, len(izd[0])))]
        xyz_ = np.asarray(xyz)
        ddd = xyz_[:, :][izd, :2]

        [vx1, vy1, x1, y1] = cv2.fitLine(ddd, cv2.DIST_L2, 0, 0.01, 0.01)
        #cv2.line(v1mask, (x1-round(m*vx1[0]), y1-round(m*vy1[0])), (x1+round(m*vx1[0]), y1+round(m*vy1[0])), (255,255,255),5)
        #cv2.line(v1mask, (x1-round(m*vx1[0]), (y1-20)-round(m*vy1[0])), (x1+round(m*vx1[0]), (y1-20)+round(m*vy1[0])), (255,255,255),2)
        #cv2.line(v1mask, (x1-round(m*vx1[0]), (y1+20)-round(m*vy1[0])), (x1+round(m*vx1[0]), (y1+20)+round(m*vy1[0])), (255,255,255),2)
        #cv2.line(v1mask,(0,ybot),(x_len,ytop),(255,255,255),1)

        pt = []
        xyz = []
        for i in range(len(cX)):
            ss, ss1 = pnt2line(
                (cX[i], cY[i], 0),
                (x1 - round(m * vx1[0]), y1 - round(m * vy1[0]), 0),
                (x1 + round(m * vx1[0]), y1 + round(m * vy1[0]), 0))
            pt.append(ss)
            xyz.append([(cX[i]), (cY[i]), (0)])

            izd = np.where(np.asarray(pt) < 15)
            izd = np.argsort(pt)[:np.min((4, len(izd[0])))]
            xyz_ = np.asarray(xyz)
            ddd = xyz_[:, :][izd, :2]

        pt = []
        xyz = []
        for i in range(len(cX)):
            ss, ss1 = pnt2line(
                (cX[i], cY[i], 0),
                (x1 - round(m * vx1[0]), y1 - round(m * vy1[0]), 0),
                (x1 + round(m * vx1[0]), y1 + round(m * vy1[0]), 0))
            pt.append(ss)
            xyz.append([(cX[i]), (cY[i]), (0)])

        izd = np.where(np.asarray(pt) < 15)
        izd = np.argsort(pt)[:np.min((8, len(izd[0])))]
        xyz_ = np.asarray(xyz)
        ddd = xyz_[:, :][izd, :2]

        [vx1, vy1, x1, y1] = cv2.fitLine(ddd, cv2.DIST_L2, 0, 0.01, 0.01)

        pt = []
        xyz = []
        for i in range(len(cX)):
            ss, ss1 = pnt2line(
                (cX[i], cY[i], 0),
                (x1 - round(m * vx1[0]), y1 - round(m * vy1[0]), 0),
                (x1 + round(m * vx1[0]), y1 + round(m * vy1[0]), 0))
            pt.append(ss)
            xyz.append([(cX[i]), (cY[i]), (0)])

        izd = np.where(np.asarray(pt) < 25)
        izd = np.argsort(pt)[:np.min((25, len(izd[0])))]
        xyz_ = np.asarray(xyz)
        ddd = xyz_[:, :][izd, :2]

        [vx1, vy1, x1, y1] = cv2.fitLine(ddd, cv2.DIST_L2, 0, 0.01, 0.01)

        labelcXcY.append(
            ([cX[i] for i in izd], [cY[i]
                                    for i in izd], [nblines for i in izd]))

        izd = np.where(np.asarray(pt) < 25)
        cX = np.delete(cX, izd)
        cY = np.delete(cY, izd)
        ct = np.delete(ct, izd)
        ht = np.delete(ht, izd)
        wt = np.delete(wt, izd)

        for i in range(nblines):
            if len(labelcXcY[i][0]) > 2:
                z = np.polyfit(labelcXcY[i][0], labelcXcY[i][1], 1)
                f = np.poly1d(z)
                xs = np.linspace(0, m, 1000)
                ys = f(xs)
                ddd = np.vstack((xs, ys)).T
                cv2.polylines(v1cleansheet, np.int32([ddd]), 0,
                              (255, 255, 255), 2)
    return v1cleansheet
    def medir_imagen(self, medida_ref, cedula):
        print("llego la medida de referencia 3=", medida_ref)
        print("medir_imagen")
        print("llego la cedula para guardar la foto procesada=", cedula)
        img1 = cv2.imread("Temporal_hsv.png")

        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(
            img1, cv2.MORPH_OPEN, kernel
        )  #apertura =elimina ruido externo. Útil para eliminar objetos pequeños
        closing = cv2.morphologyEx(
            opening, cv2.MORPH_CLOSE, kernel
        )  #clausura =elimina ruido interno, elimina puntos negro en la imagen. Útil para eliminar pequeños agujeros (regiones oscuras).
        erosion = cv2.erode(
            closing, kernel, iterations=1
        )  #erosion= elimina ruidos blancos, erosiona los límites del objeto en primer plano, el grosor o el tamaño del objeto en primer plano disminuye o simplemente disminuye la región blanca en la imagen
        dilation = cv2.dilate(
            erosion, kernel, iterations=1
        )  #dilatación= Es justo lo opuesto a la erosión,aumenta la región blanca en la imagen o aumenta el tamaño del objeto en primer plano. Es útil para unir partes rotas de un objeto
        img_limpia = dilation.copy(
        )  #copiamos el resultado de las operaciones morfologicas en una nueva variable
        """Proceso de reconocimiento de bordes """

        canny = cv2.Canny(
            img_limpia, 0, 255
        )  #canny = es un algoritmo detección de bordes que utiliza varias etapas para detectar una amplia gama de bordes en las imágenes (img,umbral min,unbral max)
        alto, ancho, dimensiones = img1.shape
        print(
            'alto,ancho,dimensiones en px =', alto, ancho, dimensiones
        )  # se imprime la forma de la imaggen (matriz) (px ancho,px alto,dimensiones)
        print(len(
            img1))  # se imprime la longitud de la imagen (# de px de ancho)
        contornos, _ = cv2.findContours(
            canny.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
        )  #findContours= encuentra los contorno ya detectados con canny
        mask = np.zeros(
            img1.shape[:], dtype="uint8"
        )  #*255                              #se crea una matriz de ceros, llamada mask con el mismo tamaño de la imagen

        for c in contornos:  #con el for se recorre px a px del contorno en contrado con findContours
            area = cv2.contourArea(c)  #se calcula el area los contornos
            if area > 2500:  #si  el area es mayor de 3000 es dibujada

                cv2.drawContours(
                    img_limpia, [c], -1, (255, 255, 255), 1
                )  #se dibujan los contornos internos de color blanco(255,255,255) y grosor  de 2px

            elif area < 2499:  #las areas menjores a 3000 se les aplica la mascara
                img_limpia = mask

        edged = cv2.Canny(img_limpia, 50, 100)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(
            cnts
        )  # toma el valor de tupla apropiado en función de si estamos utilizando OpenCV 2.4, 3 o 4.

        # Ordenar contornos de izquierda a derecha ya que el contorno más a la izquierda es un objeto de referencia
        (cnts, _) = contours.sort_contours(cnts)

        # Eliminar contornos que no sean lo suficientemente grandes
        cnts = [x for x in cnts if cv2.contourArea(x) > 100]

        # Dimensiones del objeto de referencia

        ref_object = cnts[0]
        box = cv2.minAreaRect(ref_object)
        box = cv2.boxPoints(box)
        print("box=", box)  #encuentra los puntos
        box = np.array(box, dtype="int")
        box = perspective.order_points(box)
        (
            tl, tr, br, bl
        ) = box  #ordena en sentido de izq superior, derecha superior, derecha inferior, izquierda inferior
        print(tl, tr, br, bl)
        dist_in_pixel = euclidean(tl, tr)
        dist_in_cm = medida_ref
        pixel_per_cm = dist_in_pixel / dist_in_cm
        i = 0
        ancho = []
        alto = []
        esquinas = []
        contornos = []

        # Dibuja los contornos que quedaron
        conta = 0
        for cnt in cnts:
            conta = conta + 1
            print("contador=", conta)

        if conta <= 3:
            for cnt in cnts:
                i = 0
                box = cv2.minAreaRect(
                    cnt
                )  #calcula el area del contorno --Box en 2D que contiene los siguientes detalles: (centro (x, y), (ancho, alto), ángulo de rotación)
                box = cv2.boxPoints(
                    box
                )  #saca los puntos de las esquinas -- para dibujar este rectángulo, necesitamos 4 esquinas del rectángulo.
                box = np.array(box, dtype="int")  #transforma todo  a entero
                box = perspective.order_points(
                    box
                )  #ordena los puntos en sentido de las manecillas del reloj
                (tl, tr, br, bl) = box
                contornos.append(box)
                esquinas.append(box.astype("int"))
                print("tl, tr, br, bl=", tl, tr, br, bl)
                cv2.drawContours(img_limpia, [box.astype("int")], -1,
                                 (0, 0, 255), 1)
                #        mid_pt_horizontal = (tl[0] + int(abs(tr[0] - tl[0])/2), tl[1] + int(abs(tr[1] - tl[1])/2))
                #        mid_pt_verticle = (tr[0] + int(abs(tr[0] - br[0])/2), tr[1] + int(abs(tr[1] - br[1])/2))
                wid = euclidean(tl, tr) / pixel_per_cm
                ancho.append(wid)
                print("wid=", wid)
                ht = euclidean(tr, br) / pixel_per_cm
                alto.append(ht)
                print("altos=", ht)

            #    cv2.putText(img_limpia, "{:.1f}cm".format(wid), (int(mid_pt_horizontal[0] - 15), int(mid_pt_horizontal[1] - 10)),
            #    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
            #    cv2.putText(img_limpia, "{:.1f}cm".format(ht), (int(mid_pt_verticle[0] + 10), int(mid_pt_verticle[1])),
            #    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)

            print("esquinas=", esquinas)

            print("estos son los contornos=", contornos)

            box0 = contornos[0]
            box1 = contornos[1]
            box2 = contornos[2]

            #        print('box0=', box0)
            #        print('box1=', box1)
            #        print('box2=', box2)
            #
            x1, y1 = box1[1]
            x2, y2 = box1[2]
            img_linea = np.array(
                cv2.line(img_limpia, (x1, y1), (x2, y2), (0, 255, 0), 1))

            xx1, yy1 = box2[0]
            xx2, yy2 = box2[3]
            img_linea = np.array(
                cv2.line(img_limpia, (xx2, yy2), (xx1, yy1), (0, 255, 255), 1))

            #cv2.imshow('imagen limpia', img_limpia)
            cv2.imwrite("rectangulos.png", img_limpia)
            cv2.imwrite(
                "../Imagenes_Huellas/Muestras_procesada/%d.png" % cedula,
                img_limpia)

        centroides = []
        if conta > 3:
            """ Dibuja los contornos que quedaron"""

            for cnt in cnts:
                print("contador  if conta>3: =", conta)
                i = 0
                box = cv2.minAreaRect(
                    cnt
                )  #calcula el area del contorno --Box en 2D que contiene los siguientes detalles: (centro (x, y), (ancho, alto), ángulo de rotación)
                #print("datoss=",box)
                centroide, acho_alto, angulo = box
                x, y = centroide
                centroide = np.array(centroide, dtype="int")
                centroides.append(centroide)
                #print("Angulo=",angulo)
                box = cv2.boxPoints(
                    box
                )  #calcula los puntos de las esquinas -- para dibujar este rectángulo, necesitamos 4 esquinas del rectángulo.
                box = np.array(box, dtype="int")  #transforma todo  a entero
                box = perspective.order_points(
                    box
                )  #ordena los puntos en sentido de las manecillas del reloj
                (tl, tr, br, bl) = box
                contornos.append(box)
                esquinas.append(box.astype("int"))
                #print("tl, tr, br, bl=",tl, tr, br, bl)
                wid = euclidean(tl, tr) / pixel_per_cm
                ancho.append(wid)
                #print("wid=",wid)
                ht = euclidean(tr, br) / pixel_per_cm
                alto.append(ht)
                #print("altos=",ht)

            print("cuanto quedo conta=", conta)
            """ Se dibuja  la linea entre centroides"""

            print("centroides=", centroides)
            cx1, cy1 = centroides[1]
            cx2, cy2 = centroides[2]
            img_linea = np.array(
                cv2.line(img_limpia, (cx1, cy1), (cx2, cy2), (100, 100, 0), 1))

            cx3, cy3 = centroides[3]
            cx4, cy4 = centroides[4]
            img_linea = np.array(
                cv2.line(img_limpia, (cx3, cy3), (cx4, cy4), (100, 100, 0),
                         1))  #fin linea entre centroides

            #cv2.imshow('img2',img_limpia)
            cv2.imwrite(
                "rectangulos_linea_centroide.png",
                img_limpia)  #se guarda la imagen con la linea entre centroides
            """Se carga la imagen y le dibujamos los cuadros"""

            img_linea_amarilla = cv2.imread("rectangulos_linea_centroide.png")
            #cv2.imshow('img3',img_linea_amarilla)

            edged2 = cv2.Canny(img_linea_amarilla, 50, 100)
            edged2 = cv2.dilate(edged2, None, iterations=1)
            edged2 = cv2.erode(edged2, None, iterations=1)
            cnts2 = cv2.findContours(edged2.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
            cnts2 = imutils.grab_contours(cnts2)
            (cnts2, _) = contours.sort_contours(cnts2)

            # Dibuja los contornos que quedaron
            contornos2 = []
            for cnt in cnts2:
                i = 0
                box = cv2.minAreaRect(
                    cnt
                )  #calcula el area del contorno --Box en 2D que contiene los siguientes detalles: (centro (x, y), (ancho, alto), ángulo de rotación)
                print("datoss=", box)

                box = cv2.boxPoints(
                    box
                )  #saca los puntos de las esquinas -- para dibujar este rectángulo, necesitamos 4 esquinas del rectángulo.
                box = np.array(box, dtype="int")  #transforma todo  a entero
                box = perspective.order_points(
                    box
                )  #ordena los puntos en sentido de las manecillas del reloj
                (tl, tr, br, bl) = box
                contornos2.append(box)
                esquinas.append(box.astype("int"))
                print("tl, tr, br, bl=", tl, tr, br, bl)
                cv2.drawContours(img_linea_amarilla, [box.astype("int")], -1,
                                 (0, 0, 255), 1)

            box0 = contornos2[0]
            box1 = contornos2[1]
            box2 = contornos2[2]
            print("-----box1=", box1)
            x0, y0 = box1[0]
            x1, y1 = box1[1]
            x2, y2 = box1[2]
            img_linea = np.array(
                cv2.line(img_linea_amarilla, (x1, y1), (x2, y2), (0, 255, 0),
                         1))
            xx1, yy1 = box2[0]
            xx2, yy2 = box2[3]
            img_linea = np.array(
                cv2.line(img_linea_amarilla, (xx2, yy2), (xx1, yy1),
                         (0, 255, 255), 1))
            #cv2.imshow('img4',img_linea_amarilla)
            cv2.imwrite("rectangulos.png", img_linea_amarilla)
        """"Empieza el calculo de las medidas fundamentales"""

        rectangulos = Image.open(
            'rectangulos.png')  #se lee la imagen con la libreria Pillow
        datos = list(
            rectangulos.getdata()
        )  #aqui guardamos en una lista los colores de cada  px de la forma (b,g,r)
        pixels = rectangulos.load(
        )  #load Asigna almacenamiento para la imagen y carga los datos de píxeles.
        width, height = rectangulos.size  #size nos arroja el ancho y alto de la imagen en px
        ancho = width
        alto = height
        img_limpia2 = img_limpia.copy()
        """pie izquierdo"""

        verde = (0, 255, 0)
        blanco = (255, 255, 255)
        i = 1
        for y in range(
                alto
        ):  #recorrido de rectangulos para hallar las medidas fundamentales
            for x in range(ancho):
                r, g, b = pixels[x, y]
                #-rectangulo 1 rojo-#
                if (r, g, b) == verde:
                    if i == 1:
                        #guardar estas posiciones en un array
                        R_xx2 = copy.copy(x)
                        R_yy2 = copy.copy(y)
                        R_xx3 = R_xx2 - 2
                        r2, g2, b2 = pixels[R_xx3, R_yy2]
                        #print(f'verde x2= {R_xx3} y2={R_yy2}')
                        if (r2, g2, b2) == blanco:
                            #pixels[x, y] = (255, 255,255)
                            print("medida fundamental =", x, y)
                            x1, y1 = box1[1]
                            print("x1,y1=", x1, y1)
                            mfxx1 = int(x - x1)
                            mfyy1 = int(y - y1)

                            print('mfxx1,mfyy1=', mfxx1, mfyy1)
                            x0, y0 = box1[0]
                            x0mf = int(x0 + mfxx1)
                            y0mf = int(y0 + mfyy1)

                            print('x0mf,y0mf=', x0mf, y0mf)
                            #                    blanco=(255,255,255)
                            cv2.line(img_limpia2, (x, y), (x0mf, y0mf),
                                     (255, 0, 0), 1)  #mf azul

                            mfxxx1 = x + mfxx1
                            mfyyy1 = y + mfyy1

                            print("medida fundamental 2 por derecha=", mfxxx1,
                                  mfyyy1)
                            x0mf2 = int(x0mf + mfxx1)
                            y0mf2 = int(y0mf + mfyy1)
                            print("medida fundamental 2 por izquierda=", x0mf2,
                                  y0mf2)
                            cv2.line(img_limpia2, (mfxxx1, mfyyy1),
                                     (x0mf2, y0mf2), (255, 255, 100), 1)  #mf
                            i = i + 1
        """pie derecho"""
        print("pied derecho")
        ii = 1
        #        amarillo=(255,255,0) #amarillo BGR
        for y in range(
                alto
        ):  #recorrido de rectangulos para hallar las medidas fundamentales
            for x in range(ancho):
                r, g, b = pixels[x, y]
                #-rectangulo 1 rojo-#
                if (r, g, b) == (255, 255, 0):

                    if ii == 1:
                        #guardar estas posiciones en un array
                        R_xx2 = copy.copy(x)
                        R_yy2 = copy.copy(y)
                        R_xx3 = R_xx2 + 2
                        r2, g2, b2 = pixels[R_xx3, R_yy2]
                        #print(f'verde x2= {R_xx3} y2={R_yy2}')
                        if (r2, g2, b2) == blanco:
                            #pixels[x, y] = (255, 255,255)
                            print("medida fundamental  del cuadro 2=", x, y)
                            x1, y1 = box2[0]
                            print("x1,y1=", x1, y1)
                            mfxx1 = int(x - x1)
                            mfyy1 = int(y - y1)
                            print('DISTANCIA mfxx1,mfyy1=', mfxx1, mfyy1)
                            x0, y0 = box2[1]
                            x0mf = int(x0 + mfxx1)
                            y0mf = int(y0 + mfyy1)

                            print('x0mf,y0mf=', x0mf, y0mf)
                            #                    blanco=(255,255,255)
                            cv2.line(img_limpia2, (x, y), (x0mf, y0mf),
                                     (255, 0, 0), 1)  #mf azul

                            mfxxx1 = x + mfxx1
                            mfyyy1 = y + mfyy1

                            print("medida fundamental 2 por derecha=", mfxxx1,
                                  mfyyy1)
                            x0mf2 = int(x0mf + mfxx1)
                            y0mf2 = int(y0mf + mfyy1)
                            print("medida fundamental 2 por izquierda=", x0mf2,
                                  y0mf2)
                            cv2.line(img_limpia2, (mfxxx1, mfyyy1),
                                     (x0mf2, y0mf2), (255, 255, 120),
                                     1)  #mf fucsia
                            ii = ii + 1
                            """Medios pies"""
        print("imagen procesada con cedula= ", cedula)
        #
        cv2.imwrite("../Imagenes_Huellas/Muestras_procesada/%d.png" % cedula,
                    img_limpia2)
        cv2.imwrite("rectangulos.png", img_limpia2)
        rectangulos = Image.open(
            'rectangulos.png')  #se lee la imagen con la libreria Pillow
        datos = list(
            rectangulos.getdata()
        )  #aqui guardamos en una lista los colores de cada  px de la forma (b,g,r)
        pixels = rectangulos.load(
        )  #load Asigna almacenamiento para la imagen y carga los datos de píxeles.
        width, height = rectangulos.size  #size nos arroja el ancho y alto de la imagen en px
        ancho = width
        alto = height
        blanco = (255, 255, 255)
        """medio pie Izq"""
        j = 1
        medio_pie_izq = []
        for y in range(
                alto
        ):  #recorrido de rectangulos para hallar las medidas fundamentales
            for x in range(ancho):
                r, g, b = pixels[x, y]

                #-rectangulo 1 rojo-#
                if (r, g, b) == (100, 255, 255):
                    if j == 1:

                        #guardar estas posiciones en un array
                        R_xx2 = copy.copy(x)
                        R_yy2 = copy.copy(y)
                        R_yy3 = R_yy2 - 1
                        r2, g2, b2 = pixels[R_xx2, R_yy3]
                        #print("R_xx2,R_yy3",R_xx2,R_yy3)

                        if (r2, g2, b2) == (255, 255, 255):
                            #pixels[x, y] = (255, 255,255)
                            print("este es un punto=", x, y)
                            medio_pie = []
                            medio_pie.append(x)
                            medio_pie.append(y)
                            medio_pie_izq.append(medio_pie)
        print(" ")
        print("medio_pie=", medio_pie_izq)
        largo_pie_izquierdo = euclidean(box1[0], box1[3]) / pixel_per_cm
        largo_pie_izquierdo = round(largo_pie_izquierdo, 2)

        ancho_pie_izq = euclidean(box1[0], box1[1]) / pixel_per_cm
        ancho_pie_izq = round(ancho_pie_izq, 2)

        ancho_medio_pie_izq = euclidean(medio_pie_izq[0],
                                        medio_pie_izq[1]) / pixel_per_cm
        ancho_medio_pie_izq = round(ancho_medio_pie_izq, 2)

        HC1 = (ancho_pie_izq - ancho_medio_pie_izq) / ancho_pie_izq
        HC1 = HC1 * 100
        HC1 = round(HC1, 2)

        print("HC:", HC1)
        print("largo_pie_izquierdo=", largo_pie_izquierdo)
        print("ancho_pie_izq=", ancho_pie_izq)
        print("ancho_medio_pie_izq=", ancho_medio_pie_izq)
        print(" ")

        if HC1 >= 0 and HC1 <= 34.99:
            print("pie plano")
            tipo_pie = " pie plano"
        elif HC1 > 35 and HC1 <= 39.99:
            print("pie plano/normal")
            tipo_pie = "pie plano/normal"
        elif HC1 >= 40 and HC1 <= 54.99:
            print("pie normal")
            tipo_pie = "pie normal"
        elif HC1 >= 55 and HC1 <= 59.99:
            print("pie normal")
            tipo_pie = "pie normal"
        elif HC1 >= 60 and HC1 <= 74.99:
            print("pie cavo")
            tipo_pie = "pie cavo"
        elif HC1 >= 75 and HC1 <= 84.99:
            print("pie cavo fuerte")
            tipo_pie = "pie cavo fuerte"
        elif HC1 >= 85 and HC1 <= 100:
            print("pie cavo extremo")
            tipo_pie = "pie cavo extremo"

        largo_pie_izquierdo = str(largo_pie_izquierdo)
        ancho_pie_izq = str(ancho_pie_izq)
        ancho_medio_pie_izq = str(ancho_medio_pie_izq)
        HC1 = str(HC1)
        tipo_pie = str(tipo_pie)

        self.textEdit_izq1.setText(largo_pie_izquierdo)
        self.textEdit_izq2.setText(ancho_pie_izq)
        self.textEdit_izq3.setText(ancho_medio_pie_izq)
        self.textEdit_izq4.setText(HC1 + " " + tipo_pie)
        """medio pie Derecho"""
        jj = 1
        medio_pie_derecho = []
        for y in range(
                alto
        ):  #recorrido de rectangulos para hallar las medidas fundamentales
            for x in range(ancho):
                r, g, b = pixels[x, y]

                #-rectangulo 1 rojo-#255,255,120
                if (r, g, b) == (120, 255, 255):
                    if jj == 1:

                        #guardar estas posiciones en un array
                        R_xx2 = copy.copy(x)
                        R_yy2 = copy.copy(y)
                        R_yy3 = R_yy2 - 1
                        r2, g2, b2 = pixels[R_xx2, R_yy3]
                        #print("R_xx2,R_yy3",R_xx2,R_yy3)

                        if (r2, g2, b2) == (255, 255, 255):
                            #pixels[x, y] = (255, 255,255)
                            print("este es un punto=", x, y)
                            medio_pie = []
                            medio_pie.append(x)
                            medio_pie.append(y)
                            medio_pie_derecho.append(medio_pie)

        print("medio_pie_derecho=", medio_pie_derecho)
        largo_pie_derecho = euclidean(box2[0], box2[3]) / pixel_per_cm
        largo_pie_derecho = round(largo_pie_derecho, 2)

        ancho_pie_derecho = euclidean(box2[0], box2[1]) / pixel_per_cm
        ancho_pie_derecho = round(ancho_pie_derecho, 2)

        ancho_medio_pie_derecho = euclidean(
            medio_pie_derecho[0], medio_pie_derecho[1]) / pixel_per_cm
        ancho_medio_pie_derecho = round(ancho_medio_pie_derecho, 2)

        HC = (ancho_pie_derecho - ancho_medio_pie_derecho) / ancho_pie_derecho
        HC = HC * 100
        HC = round(HC, 2)

        print("HC=", HC)
        print("largo_pie_derecho=", largo_pie_derecho)
        print("ancho_pie_derecho=", ancho_pie_derecho)
        print("ancho_medio_pie_derecho=", ancho_medio_pie_derecho)

        if HC >= 0 and HC <= 34.99:
            print("pie plano")
            tipo_pie2 = " pie plano"
        elif HC > 35 and HC <= 39.99:
            print("pie plano/normal")
            tipo_pie2 = "pie plano/normal"
        elif HC >= 40 and HC <= 54.99:
            print("pie normal")
            tipo_pie2 = "pie normal"
        elif HC >= 55 and HC <= 59.99:
            print("pie normal")
            tipo_pie2 = "pie normal"
        elif HC >= 60 and HC <= 74.99:
            print("pie cavo")
            tipo_pie2 = "pie cavo"
        elif HC >= 75 and HC <= 84.99:
            print("pie cavo fuerte")
            tipo_pie2 = "pie cavo fuerte"
        elif HC >= 85 and HC <= 100:
            print("pie cavo extremo")
            tipo_pie2 = "pie cavo extremo"

        largo_pie_derecho = str(largo_pie_derecho)
        ancho_pie_derecho = str(ancho_pie_derecho)
        ancho_medio_pie_derecho = str(ancho_medio_pie_derecho)
        HC = str(HC)
        tipo_pie2 = str(tipo_pie2)

        self.textEdit_dere1.setText(largo_pie_derecho)
        self.textEdit_dere2.setText(ancho_pie_derecho)
        self.textEdit_dere3.setText(ancho_medio_pie_derecho)
        self.textEdit_dere4.setText(HC + " " + tipo_pie2)

        #cv2.imshow("imagen limpia2", img_limpia2)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
Exemple #30
0
def main():
    input_files = "C:\\Users\\user\\Desktop\\codetest\\size-of-objects\\images"
    width = 7

    DIMA = []
    DIMB = []
    for image_path in os.listdir(input_files):
        input_path = os.path.join(input_files, image_path)
        image = cv2.imread(input_path)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 50, 100)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)
        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # sort the contours from left-to-right and initialize the
        # 'pixels per metric' calibration variable
        (cnts, _) = contours.sort_contours(cnts)
        pixelsPerMetric = None
        # create workbook

        # loop over the contours individually
        for c in cnts:
            if cv2.contourArea(c) < 100:
                continue

            orig = image.copy()
            box = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(
                box)
            box = np.array(box, dtype="int")

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            box = perspective.order_points(box)
            cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
            # loop over the original points and draw them
            for (x, y) in box:
                cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
            # unpack the ordered bounding box, then compute the midpoint
            # between the top-left and top-right coordinates, followed by
            # the midpoint between bottom-left and bottom-right coordinates
            (tl, tr, br, bl) = box
            (tltrX, tltrY) = midpoint(tl, tr)
            (blbrX, blbrY) = midpoint(bl, br)
            # compute the midpoint between the top-left and top-right points,
            # followed by the midpoint between the top-righ and bottom-right
            (tlblX, tlblY) = midpoint(tl, bl)
            (trbrX, trbrY) = midpoint(tr, br)
            # draw the midpoints on the image
            cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
            # draw lines between the midpoints
            cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                     (255, 0, 255), 2)
            cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                     (255, 0, 255), 2)

            # compute the Euclidean distance between the midpoints
            dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
            dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
            # if the pixels per metric has not been initialized, then
            # compute it as the ratio of pixels to supplied metric
            # (in this case, inches)
            if pixelsPerMetric is None:
                pixelsPerMetric = dB / width

            # compute the size of the object and convert to cm
            dimA = dA / pixelsPerMetric
            DIMA.append(dimA * 2.54)

            dimB = dB / pixelsPerMetric
            DIMB.append(dimB * 2.54)

            # draw the object sizes on the image


#            cv2.putText(orig, "{:.1f}in".format(dimA),
#               (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
#              0.65, (255, 255, 255), 2)
#         cv2.putText(orig, "{:.1f}in".format(dimB),
#            (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
#           0.65, (255, 255, 255), 2)

#      # show the output image
#     cv2.imshow("Image", orig)
#    cv2.waitKey(0)

#remove reference values and add dimensions of the object of interest
# to the excel sheet in cms
    for i in DIMA[::2]:
        DIMA.remove(i)

    for j in DIMB[::2]:
        DIMB.remove(j)

    df = pd.DataFrame.from_dict({
        'Extracted_Diameter': DIMB,
        'Extracted_Overcross': DIMA
    })
    df.to_excel(
        'C:\\Users\\user\\Desktop\\codetest\\size-of-objects\\output.xlsx',
        header=True,
        index=False)
    def camera_callback(self, data):
        # Return if node is not enabled
        if (not self.is_node_enabled):
            return


        # Node is enabled, process the camera data
        try:
                cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
                print(e)

        self.tool_ROI = RegionOfInterest()
        self.tool_pos = ObjectPose()

        self.tool_pos.header.stamp.secs = self.camera_secs
        self.tool_pos.header.stamp.nsecs = self.camera_nsecs

        WW=self.image_width
        HH=self.image_height

        fx=self.camera_K[0]
        fy=self.camera_K[4]
        u0=self.camera_K[5]
        v0=self.camera_K[2]

        K=np.matrix([[fx, 0, u0, 0], [0, fy, v0, 0], [0, 0, 1, 0]])
        K_INV=pinv(K)

        img = cv_image.copy()

        output = img.copy()

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (11, 11), 0)
        #thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV)[1]
        thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,2)

        # perform edge detection, then perform a dilation + erosion to close gaps in between object edges
        edged = cv2.Canny(blurred, 20, 150)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)

        edged2 = auto_canny(blurred)
        edged3 = cv2.dilate(edged2.copy(), None, iterations=1)
        edged4 = cv2.erode(edged3.copy(), None, iterations=1)

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(12,24))
        filled = cv2.morphologyEx(edged4, cv2.MORPH_CLOSE, kernel)


        # find contours in the thresholded image and initialize the shape detector
        #cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        #cnts = cv2.findContours(edged4.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        cnts = cv2.findContours(filled.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # sort the contours from left-to-right and initialize the
        (cnts, _) = contours.sort_contours(cnts)

        # loop over the contours
        simil = []
        cX_v= []
        cY_v= []
        wr_cent_v= []
        wr_contours = []
        t_h_v = []
        wr_tc_v = []
        wrenches = []
        wr_count = 0
        toolIdentified = False
        for c in cnts:
            # compute the center of the contour, then detect the name of the
            # shape using only the contour
            M = cv2.moments(c)
            hu = cv2.HuMoments(M)

            retSim1 = cv2.matchShapes(cnts_s[0],c,1,0.0)
            retSim2 = cv2.matchShapes(cnts_s[0],c,2,0.0)
            retSim3 = cv2.matchShapes(cnts_s[0],c,3,0.0)

            # multiply the contour (x, y)-coordinates by the resize ratio,
            # then draw the contours and the name of the shape on the image
            c = c.astype("float")
            c *= 1
            c = c.astype("int")
            text = "{}".format(shape)

            area = cv2.contourArea(c)

            # approximate the contour
            #peri = cv2.arcLength(c, True)
            #approx = cv2.approxPolyDP(c, 0.01 * peri, True)
            (x, y, w, h) = cv2.boundingRect(c)

            #print(img.shape[0])
            # if the contour is too large or too small, ignore it
            if h < 0.3*img.shape[0] or x<5 or y<5 or x+w > WW-5 or y+h > HH-5:
                continue

            aspectRatio = w / float(h)

            (xc,yc),radius = cv2.minEnclosingCircle(c)
            minEncCirArea = math.pi*(radius**2)

            minEncircleA_ratio = minEncCirArea/(area+1)

            # compute the rotated bounding box of the contour
            box = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
            box = np.array(box, dtype="int")

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            box = perspective.order_points(box)
            (tl, tr, br, bl) = box

            #out3=img.copy()
            #print(aspectRatio,retSim1,retSim2,retSim3) 
            #cv2.drawContours(out3, [c], -1, (0, 0, 255), 3)
            #cv2.imshow("out3", out3)
            #cv2.waitKey(3)

            dA = distance.euclidean(tl, bl)
            dB = distance.euclidean(tl, tr)

            aspectRatio2 = dB/dA

            minRectArea_ratio = (dA*dB)/(area+1)
            
            hull = cv2.convexHull(c)
            hull_area = cv2.contourArea(hull)
            solidity = float(area)/(hull_area+1)

            keepRatio = aspectRatio > 0.9 and aspectRatio < 1.15
            #keepSimilarity = retSim1 < 2.9 and retSim2 < 2 and retSim3 < 1.5  # tune for outdoor specially retSim2 
            keepSimilarity = retSim2 < 2 and retSim3 < 1.0 
            #keepSolidity = solidity > 0.4 and solidity < 0.7
            #keepAreaRatio = minRectArea_ratio > 2 and minRectArea_ratio < 3

            #Circle = len(approx)>8 and aspectRatio > 0.8 and aspectRatio < 1.3 and minEncircleA_ratio > 1 and minEncircleA_ratio < 1.3

            if keepRatio and keepSimilarity:

                wr_count = wr_count + 1

                #cX = int((M["m10"] / M["m00"]))
                #cY = int((M["m01"] / M["m00"]))

                #cX_v = np.hstack((cX_v,cX))
                #cY_v = np.hstack((cY_v,cY))

                #wr_cent = (cX,cY)

                wrs_contour = c

                cv2.rectangle(output, (x,y), (x+w,y+h), (255,0,0), 2)

                cv2.drawContours(output, [box.astype("int")], -1, (0, 0, 255), 2)
                cv2.drawContours(output, [c], -1, (0, 255, 0), 2)

                subimg = img[y+h/2:y+h,x:x+w]
                subfilled = filled[y+h/2:y+h,x:x+w]
                subout = subimg.copy()

                #cv2.imshow("subimg", subimg)
                #cv2.waitKey(3)

                subcnts = cv2.findContours(subfilled.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
                subcnts = subcnts[0] if imutils.is_cv2() else subcnts[1]

                detected_sizes = []
                wr_subcnts = ()
                if len(subcnts) > 0: 

                    # sort the contours from left-to-right and initialize the
                    (subcnts, _) = contours.sort_contours(subcnts)

                    subcnt_idx = 0
                    for subc in subcnts:
                        
                        cv2.drawContours(subout, [subc], -1, (0, 255, 0), 2)

                        (x_sub, y_sub, w_sub, h_sub) = cv2.boundingRect(subc)

                        # compute the rotated bounding box of the contour
                        sbox = cv2.minAreaRect(subc)
                        sbox = cv2.cv.BoxPoints(sbox) if imutils.is_cv2() else cv2.boxPoints(sbox) 
                        sbox = np.array(sbox, dtype="int")

                        # order the points in the contour such that they appear
                        # in top-left, top-right, bottom-right, and bottom-left
                        # order, then draw the outline of the rotated bounding
                        # box
                        sbox = perspective.order_points(sbox)
                        (tl_sub, tr_sub, br_sub, bl_sub) = sbox

                        dH_sub = distance.euclidean(tl_sub, bl_sub)
                        dW_sub = distance.euclidean(tl_sub, tr_sub)

                        #if dH_sub*dW_sub*1.0 < 0.02*h*w:
                        if h_sub < 0.2*h:
                            continue

                        subcnt_idx = subcnt_idx + 1;

                        wrs_Z = fx*(act_wrs_w/w)

                        wr_h=wrs_Z*dH_sub/fx
                        wr_w=wrs_Z*dW_sub/fx

                        #wr_h=wrs_Z*h_sub/fx
                        #wr_w=wrs_Z*w_sub/fx

                        h_offset = wrs_Z*(h/2)/fx - 0.01

                        tool_wm = wr_w
                        tool_hm= wr_h + h_offset

                        #pnl_cX = pnl_x + pnl_w/2

                        #pnl_cY = pnl_y + pnl_h/2

                        #p_pxl_hom=np.matrix([[pnl_cY],[pnl_cX],[1]])
                        #P_mtr_hom=np.dot(K_INV,p_pxl_hom)
                        #P_mtr=P_mtr_hom*(pnl_Z/P_mtr_hom[2][0])

                        cv2.drawContours(subout, [sbox.astype("int")], -1, (0, 0, 255), 2)

                        #cv2.rectangle(output, (x_sub+x,y_sub+y), (x_sub+x+w_sub,y_sub+y+h_sub+h/2), (255,0,0), 2)

                        #print(tl_sub)
                        #print('Hello')
                        cv2.putText(subout, "W={:.2f}".format(wr_w*1000), (x_sub,y_sub+h_sub/2-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
                        cv2.putText(subout, "H={:.2f}".format((wr_h + h_offset)*1000), (x_sub,y_sub+h_sub/2+10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
            
                        #cv2.circle(output, (pnl_cX, pnl_cY), 3, (0, 0, 255), -1)

                        size = self.label(np.array([tool_wm*1000,tool_hm*1000]))
                        #size = self.label(np.array([tool_wm*1000]))

                        detected_sizes=np.append(detected_sizes,size) 

                        #print(type(wr_subcnts),type(subc))
                        wr_subcnts = wr_subcnts + (subc,)

                        if size == self.tool_size:
                            #tool_contour = wr
                            toolIdentified = True

                            tool_indx = subcnt_idx    
                      
                            #tool_x = x_sub+x 
                            #tool_y = y_sub+y
                            #tool_w = w_sub
                            #tool_h = h_sub+h/2
                            
                            #print(tool_x,tool_y,tool_w,tool_h)
                            #print(x,y,w,h,x_sub,y_sub,w_sub,h_sub)

                cv2.imshow("subout", subout)
                cv2.waitKey(3)

                #print(detected_sizes)


                if len(detected_sizes) > 4 and len(detected_sizes) < 7 :
                    if toolIdentified:

                        self.tool_indx_vec = np.append(self.tool_indx_vec,tool_indx)  # is np.append memory an issue?
                        self.tool_indx_vec = self.tool_indx_vec[-2*self.win_size:]

                        tool_indx_win = self.tool_indx_vec[-self.win_size:] 
                        hist, bin_edges = np.histogram(tool_indx_win,array(range(1,len(detected_sizes)+1)), density=True)

                        self.right_tool_idx = np.argmax(hist)
                        self.confidence = hist[self.right_tool_idx]
                        #print(array(range(1,len(detected_sizes)+1)))

                    if self.right_tool_idx > 0 and len(self.tool_indx_vec) > self.win_size:      
                        (x_sub, y_sub, w_sub, h_sub) = cv2.boundingRect(wr_subcnts[self.right_tool_idx])
                        tool_x = x_sub+x 
                        tool_y = y_sub+y
                        tool_w = w_sub
                        tool_h = h_sub+h/2

                        #subout2 = subimg.copy()
                        #cv2.rectangle(subout2, (int(x_sub), int(y_sub)), (int(x_sub) + int(w_sub), int(y_sub) + int(h_sub)), (255, 0, 255), 2)
                        #cv2.imshow("subout2", subout2)
                        #cv2.waitKey(10)
                        
                        cv2.rectangle(output, (int(tool_x), int(tool_y)), (int(tool_x) + int(tool_w), int(tool_y) + int(tool_h)), (255, 0, 255), 2)

                        self.tool_ROI.x_offset = tool_x
                        self.tool_ROI.y_offset = tool_y
                        self.tool_ROI.width = tool_w
                        self.tool_ROI.height = tool_h

                        #self.tool_ROI_pub.publish(self.tool_ROI)

                        tool_cX = tool_x + tool_w/2
                        tool_cY = tool_y + tool_h/2

                        tool_pxl_hom=np.matrix([[tool_cY],[tool_cX],[1]])
                        tool_mtr_hom=np.dot(K_INV,tool_pxl_hom)
                        tool_mtr=tool_mtr_hom*(wrs_Z/tool_mtr_hom[2][0])

                        cv2.putText(output, "X={}".format(-tool_mtr[0][0]), (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
                        cv2.putText(output, "Y={}".format(-tool_mtr[1][0]), (30, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
                        cv2.putText(output, "Z={}".format(tool_mtr[2][0]), (30, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)

                        cv2.putText(output, "Confidence={:.2f}".format(self.confidence), (30, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (255, 0, 255), 2)

                        cv2.circle(output, (int(tool_cX), int(tool_cY)), 3, (0, 0, 255), -1)

                        self.tool_pos.pose.position.x = -tool_mtr[0][0]
                        self.tool_pos.pose.position.y = -tool_mtr[1][0]
                        self.tool_pos.pose.position.z = tool_mtr[2][0]

                        #self.tool_pos.header.stamp.secs = int(str(self.camera_secs)[-3:]) 
                        self.tool_pos.header.stamp.secs = self.camera_secs
                        self.tool_pos.header.stamp.nsecs = self.camera_nsecs 

                        self.tool_pos.confidence = self.confidence

                        #self.tool_pos_pub.publish(self.tool_pos)
                            

        self.tool_pos_pub.publish(self.tool_pos)
        self.tool_ROI_pub.publish(self.tool_ROI)

        # show the output image
        cv2.imshow("out2", output)
        cv2.waitKey(3)
Exemple #32
0
def main():
    # essential variables
    distance_list = []

    # manage runtime arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--image', required = True,
        help = 'path to input image')
    arguments = vars(parser.parse_args())

     # load, preprocess image
    image = cv2.imread(arguments['image'])
    pre_image, ratio = preprocess_image(image)

    # find edges
    edges = auto_canny(pre_image)

    cv2.imwrite('edges.jpg', edges) # debug

    # find contours of the page
    screen_contours = process_contours(edges)

    # transform the image
    warped_image = four_point_transform(image,
        screen_contours.reshape(4, 2) * ratio)

    # preprocess image for landmark detection
    landmark_image = cv2.cvtColor(warped_image, cv2.COLOR_BGR2GRAY)
    landmark_image = cv2.GaussianBlur(landmark_image, (7, 7), 0)

    # detect edges, remove gaps between edges
    landmark_edges = auto_canny(landmark_image)
    landmark_edges = cv2.dilate(landmark_edges, None, iterations = 5)
    landmark_edges = cv2.erode(landmark_edges, None, iterations = 5)

    cv2.imwrite('output.jpg', landmark_edges) # debug

    # find, sort contours
    mark_contours = cv2.findContours(landmark_edges.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    
    mark_contours = mark_contours[1]

    (mark_contours, _) = contours.sort_contours(mark_contours)

    # loop over contours 
    for contour in mark_contours:
        if cv2.contourArea(contour) < 100: # tweak this value
            continue

        # finds rectangle covering the landmark
        bounding_box = cv2.minAreaRect(contour)

        # remove artifact straight lines
        (points, dimension, angle) = bounding_box
        if dimension[0] or dimension[1] < 7: # tweak this value
            if points[0] or points[1] < 3: # tweak this value
                if abs(angle) < 5: # tweak this value
                    continue

        # points for bounding box from rectangle
        bounding_box = cv2.boxPoints(bounding_box)
        bounding_box = np.array(bounding_box, dtype='int')
        bounding_box = perspective.order_points(bounding_box)
        tl = bounding_box[0]

        # distance between top-left corner and landmark
        distance_list.append(int(dist.euclidean((0, 0), (tl[0], tl[1]))))

    print('points   :', distance_list) # debug
    print('count    :', len(distance_list)) # debug
Exemple #33
0
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = imutils.auto_canny(gray)

# find contours in the edge map
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)

# loop over the (unsorted) contours and label them
for (i, c) in enumerate(cnts):
    orig = contours.label_contour(orig, c, i, color=(240, 0, 159))

# show the original image
cv2.imshow("Original", orig)

# loop over the sorting methods
for method in ("left-to-right", "right-to-left", "top-to-bottom", "bottom-to-top"):
    # sort the contours
    (cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)
    clone = image.copy()

    # loop over the sorted contours and label them
    for (i, c) in enumerate(cnts):
        sortedImage = contours.label_contour(clone, c, i, color=(240, 0, 159))

    # show the sorted contour image
    cv2.imshow(method, sortedImage)

# wait for a keypress
cv2.waitKey(0)
def circleInflamArea(input_image_path,
                     input_temp_path,
                     output_image_path,
                     inflam_thresh_temp,
                     grey_threshold=50,
                     area_threshold=500):
    """
    input_image_path is the path of the segment lung thermal image
    Example: "output/demo_thermal_lung.jpg"

    input_temp_path is the path of text file that contains pixel-wise
    temperature information
    Example: "input/demo_temp.txt"

    output_image_path is the path of the lung thermal image with
    inflammation area circled
    Example: "output/"

    inflam_thresh_temp is the temperature value represents the minimum
    temperature that is considered as inflammation
    Example: 34.2

    grey_threshold is the threshold of grey channel value used to filter
    potential noise of the inflammation area selection
    Exmaple: 50

    area_threshold is the threshold of pixel number required for an area
    to be considered as inflammation area
    Example: 300
    """
    # Import thermal image and pixel-wise temperature information file
    inflam_lung = cv2.imread(input_image_path)
    temp = np.loadtxt(input_temp_path)

    # Filter pixels that have higher than threshold temperature
    mask = temp > inflam_thresh_temp
    gray = cv2.cvtColor(inflam_lung, cv2.COLOR_BGR2GRAY)
    cropped = gray * mask

    # Process image to create a mask of inflammation area
    blurred = cv2.GaussianBlur(cropped, (11, 11), 0)
    thresh = cv2.threshold(blurred, grey_threshold, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.erode(thresh, None, iterations=2)
    thresh = cv2.dilate(thresh, None, iterations=4)

    # Process image to reduce noise of the mask
    labels = measure.label(thresh, connectivity=2, background=0)
    mask = np.zeros(thresh.shape, dtype="uint8")
    for label in np.unique(labels):
        if label == 0:
            continue

        labelMask = np.zeros(thresh.shape, dtype="uint8")
        labelMask[labels == label] = 255
        numPixels = cv2.countNonZero(labelMask)

        if numPixels > area_threshold:
            mask = cv2.add(mask, labelMask)

    # Find the contour of each inflammation area and draw circles if found any contours
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    if len(cnts) > 0:
        print("circleInflamArea Message: Found %s Inflammable Area(s)" %
              len(cnts))
        cnts = contours.sort_contours(cnts)[0]

        for c in cnts:
            ((cX, cY), radius) = cv2.minEnclosingCircle(c)
            cv2.circle(inflam_lung, (int(cX), int(cY)),
                       int(radius) + 10, (0, 0, 255), 2)
    else:
        print("circleInflamArea Message: No Inflammation Area Found")

    # Output the processed image
    out_name = input_image_path.split('/')[-1].split('.')[0] + '_circled.png'
    cv2.imwrite(output_image_path + out_name, inflam_lung)
edged = cv2.erode(edged, None, iterations=1)
# show the image
# cv2.imshow("Image", edged) # 415 x 600 grayscale image (0 or 255)
# cv2.waitKey(0)

# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)  # python list of 6 x [variable] (ragged)

# cnts dimensions: 6 x [174, 54, 66, 62, 221, 384]
# cnts[0][0] = [[474 241]]

# sort the contours from left-to-right and initialize the bounding box
# point colors
(cnts, _) = contours.sort_contours(
    cnts)  # new dimensions: 6 x [66, 221, 62, 174, 384, 54]
colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

# loop over the contours individually
for (i, c) in enumerate(cnts):

    # if the contour is not sufficiently large, ignore it
    if cv2.contourArea(c) < 100:
        continue

    # compute the rotated bounding box of the contour, then
    # draw the contours
    box = cv2.minAreaRect(c)
    box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(
        box)  # 4x2 array (floats)
    box = np.array(box, dtype="int")  # 4x2 np.ndarray (ints)
def brightspot_detect(image_file, width):
    pattern = cv2.imread(image_file)
    gray = cv2.cvtColor(pattern, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 0)
    thresh = cv2.threshold(blurred, 65, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.erode(thresh, None, iterations=2)
    thresh = cv2.dilate(thresh, None, iterations=4)

    edged = cv2.Canny(blurred, 100, 255)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    (cnts, _) = contours.sort_contours(cnts)
    pixelsPerMetric = None
    for c in cnts:
        # if the contour is not sufficiently large, ignore it
        if cv2.contourArea(c) < 100:
            continue
        # compute the rotated bounding box of the contour
        orig = pattern.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")
        # order the points in the contour such that they appear
        # in top-left, top-right, bottom-right, and bottom-left
        # order, then draw the outline of the rotated bounding
        # box
        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
        # loop over the original points and draw them
        for (x, y) in box:
            cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
    (tl, tr, br, bl) = box
    (tltrX, tltrY) = midpoint(tl, tr)
    (blbrX, blbrY) = midpoint(bl, br)
    # compute the midpoint between the top-left and top-right points,
    # followed by the midpoint between the top-righ and bottom-right
    (tlblX, tlblY) = midpoint(tl, bl)
    (trbrX, trbrY) = midpoint(tr, br)
    # draw the midpoints on the image
    cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
    cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
    # draw lines between the midpoints
    cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
             (255, 0, 255), 2)
    cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
             (255, 0, 255), 2)
    # compute the Euclidean distance between the midpoints
    dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
    dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
    # if the pixels per metric has not been initialized, then
    # compute it as the ratio of pixels to supplied metric
    # (in this case, inches)
    if pixelsPerMetric is None:
        pixelsPerMetric = dB / width

    fig = plt.figure("Images")
    images = ("Pass", thresh)
    name = images[0]
    image = images[1]
    # show the image
    ax = fig.add_subplot(1, 1, 1)
    ax.set_title(name)
    plt.imshow(image, cmap=plt.cm.gray)
    plt.axis("on")

    plt.show()
    return fig
    def process_image(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.medianBlur(gray, 5)

        # Get Parameters from ROS interface
        MinThreshold = rospy.get_param('~MinThreshold')
        MaxThreshold = rospy.get_param('~MaxThreshold')
        MinAreaThreshold = rospy.get_param('~MinAreaThreshold')
        ReferenceMeasure = rospy.get_param('~ReferenceMeasure')

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, MinThreshold, MaxThreshold)

        kernel = np.ones((3, 3), np.uint8)
        edged = cv2.dilate(edged, kernel, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # sort the contours from left-to-right and initialize the
        # 'pixels per metric' calibration variable
        (cnts, _) = contours.sort_contours(cnts)

        # compute the rotated bounding box of the contour
        processed_frame = frame.copy()

        # loop over the contours individually
        for c in cnts:

            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < MinAreaThreshold:
                continue

            hull = cv2.convexHull(c, returnPoints=True)

            # compute the rotated bounding box of the contour
            box = cv2.minAreaRect(hull)
            box = cv2.cv.BoxPoints(box)
            box = np.array(box, dtype="int")

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding

            # box
            box = perspective.order_points(box)
            cv2.drawContours(processed_frame, [
                box.astype("int")], -1, (0, 0, 255), 2)

            # loop over the processed_frame points and draw them
            for (x, y) in box:
                cv2.circle(processed_frame, (int(x), int(y)),
                           5, (0, 0, 255), -1)

            # unpack the ordered bounding box, then compute the midpoint
            # between the top-left and top-right coordinates, followed by
            # the midpoint between bottom-left and bottom-right coordinates
            (tl, tr, br, bl) = box
            (tltrX, tltrY) = self.midpoint(tl, tr)
            (blbrX, blbrY) = self.midpoint(bl, br)

            # compute the midpoint between the top-left and top-right points,
            # followed by the midpoint between the top-righ and bottom-right
            (tlblX, tlblY) = self.midpoint(tl, bl)
            (trbrX, trbrY) = self.midpoint(tr, br)

            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < 10000:
                # draw the midpoints on the frame
                cv2.circle(processed_frame, (int(tltrX),
                                             int(tltrY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(blbrX),
                                             int(blbrY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(tlblX),
                                             int(tlblY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(trbrX),
                                             int(trbrY)), 5, (255, 0, 0), -1)

                # draw lines between the midpoints
                cv2.line(processed_frame, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                         (255, 0, 255), 2)
                cv2.line(processed_frame, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                         (255, 0, 255), 2)

            # compute the Euclidean distance between the midpoints
            dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
            dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

            # if the pixels per metric has not been initialized, then
            # compute it as the ratio of pixels to supplied metric
            # (in this case, inches)
            if self.pixelsPerMetric is None:
                self.pixelsPerMetric = dB / ReferenceMeasure

            # compute the size of the object
            dimA = dA / self.pixelsPerMetric
            dimB = dB / self.pixelsPerMetric

            if cv2.contourArea(c) < 10000:
                # draw the object sizes on the frame
                cv2.putText(processed_frame, "{:.1f}mm".format(dimA),
                            (int(tltrX - 15), int(tltrY - 10)
                             ), cv2.FONT_HERSHEY_SIMPLEX,
                            0.70, (0, 0, 255), 2)
                cv2.putText(processed_frame, "{:.1f}mm".format(dimB),
                            (int(trbrX + 10), int(trbrY)
                             ), cv2.FONT_HERSHEY_SIMPLEX,
                            0.70, (0, 0, 255), 2)

        return processed_frame
	def camera_callback(self, data):
		# Return if node is not enabled
		if (not self.is_node_enabled):
		    return


		# Node is enabled, process the camera data
		try:
			cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
		except CvBridgeError as e:
			print(e)

		img = cv_image.copy()
		#img_org = cv2.imread('/home/mbzirc-01/Pictures/wrenches_blk_2.jpg')
		#img_org = cv2.imread('/home/mbzirc-01/Pictures/panel_query.JPG')
		#img = imutils.resize(img_org, width=640)

		output = img.copy()

		gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
		blurred = cv2.GaussianBlur(gray, (5, 5), 0)
		#thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV)[1]
		thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,3,2)

		# perform edge detection, then perform a dilation + erosion to close gaps in between object edges
		edged = cv2.Canny(blurred, 20, 150)
		edged = cv2.dilate(edged, None, iterations=1)
		edged = cv2.erode(edged, None, iterations=1)
		edged = cv2.erode(edged, None, iterations=1)

		edged2 = auto_canny(blurred)
		edged3 = cv2.dilate(edged2.copy(), None, iterations=1)
		edged4 = cv2.erode(edged3.copy(), None, iterations=1)

		kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(4,50))
		filled = cv2.morphologyEx(edged4, cv2.MORPH_CLOSE, kernel)

		"""
		cv2.imshow("thresh", thresh)
		cv2.waitKey(10)

		cv2.imshow("edged4", edged4)
		cv2.waitKey(10)

		cv2.imshow("filled", filled)
		cv2.waitKey(10)
		"""

		# find contours in the thresholded image and initialize the shape detector
		#cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
		cnts = cv2.findContours(filled.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
		cnts = cnts[0] if imutils.is_cv2() else cnts[1]

		# sort the contours from left-to-right and initialize the
		(cnts, _) = contours.sort_contours(cnts)

		#zlab = ToolLabeler()

		# loop over the contours
		simil = []
		wr_contours = []
		cr_contours = []
		pnl_contour = []
		dim_v = []
		wr_count = 0
		cr_count = 0
		circleDetected = False
		toolIdentified = False
		for c in cnts:
			# compute the center of the contour, then detect the name of the
			# shape using only the contour
			M = cv2.moments(c)
			#cX = int((M["m10"] / M["m00"]))
			#cY = int((M["m01"] / M["m00"]))

			retSim = cv2.matchShapes(cnts_s[0],c,3,0.0)
			simil = np.hstack((simil,retSim))

		 
			# multiply the contour (x, y)-coordinates by the resize ratio,
			# then draw the contours and the name of the shape on the image
			c = c.astype("float")
			c *= 1
			c = c.astype("int")
			text = "{}".format(shape)

			# if the contour is too large or too small, ignore it
			if cv2.contourArea(c) < 80 or cv2.contourArea(c) > 0.1*img.shape[0]*img.shape[1]:
				continue

			# approximate the contour
			peri = cv2.arcLength(c, True)
			approx = cv2.approxPolyDP(c, 0.01 * peri, True)
			(x, y, w, h) = cv2.boundingRect(approx)
			aspectRatio = w / float(h)

			#print(len(approx),aspectRatio)
		 
			# compute the rotated bounding box of the contour
			#orig = image.copy()
			box = cv2.minAreaRect(c)
			box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
			box = np.array(box, dtype="int")
		 
			# order the points in the contour such that they appear
			# in top-left, top-right, bottom-right, and bottom-left
			# order, then draw the outline of the rotated bounding
			# box
			box = perspective.order_points(box)
			#cv2.drawContours(output, [box.astype("int")], -1, (0, 255, 0), 2)
			#print('hello',[box.astype("int")])
			(tl, tr, br, bl) = box

			dA = distance.euclidean(tl, bl)
			dB = distance.euclidean(tl, tr)

			keepRatio = aspectRatio > 0.1 and aspectRatio < 0.3
			keepSimilarity = retSim > 0.8

			Circle = len(approx)>8 and aspectRatio > 0.8 and aspectRatio < 1.2 

			if keepRatio and keepSimilarity:
				wr_count = wr_count + 1
				#wr_contours.append(c)
				wr_contours = np.append(wr_contours,c)
				#wr_contours = np.concatenate((wr_contours,c))
				cv2.drawContours(output, [c], -1, (0, 255, 0), 2)

				(t_x, t_y, t_w, t_h) = cv2.boundingRect(c)
				dim = (t_w, t_h)
				dim_v = np.append(dim_v,dim)

				size = self.label(np.array([t_w,t_h]))

				if size == self.tool_size:
					tool_contour = c
					toolIdentified = True
	

			if Circle:
				cr_count = cr_count + 1
				cr_contours = np.append(cr_contours,c) 
				#cv2.drawContours(output, [c], -1, (255, 0, 0), 2)			

			#size = zlab.label(np.array([dA,dB]))

			#print(int(bl[0]))

			#cv2.putText(output, "({:d},{:d})".format(int(dA),int(dB)), (int(bl[0])-15,int(bl[1])+25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 2)
			#cv2.putText(output, size, (int(bl[0])-15,int(bl[1])+55), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
		 

			# show the output image
			cv2.imshow("out1", output)
			cv2.waitKey(10)

		wr_contours = np.reshape(wr_contours,(1,-1,2))	
		wr_contours = wr_contours.astype(int)	
		(wrs_x, wrs_y, wrs_w, wrs_h) = cv2.boundingRect(wr_contours)
		cv2.rectangle(output, (wrs_x, wrs_y), (wrs_x + wrs_w, wrs_y + wrs_h), (255, 0, 0), 2)
		self.wrenches_ROI.x_offset = wrs_x
		self.wrenches_ROI.y_offset = wrs_y
		self.wrenches_ROI.width = wrs_w
		self.wrenches_ROI.height = wrs_h
		self.wrenches_ROI_pub.publish(self.wrenches_ROI)

		"""	
		wrs_Rect = cv2.minAreaRect(wr_contours)
		wrs_Rect = cv2.cv.BoxPoints(wrs_Rect) if imutils.is_cv2() else cv2.boxPoints(wrs_Rect)
		wrs_Rect = np.array(wrs_Rect, dtype="int")
		wrs_Rect = perspective.order_points(wrs_Rect)
		(wrs_tl, wrs_tr, wrs_br, wrs_bl) = wrs_Rect
		wrs_dA = distance.euclidean(wrs_tl, wrs_bl)
		wrs_dB = distance.euclidean(wrs_tl, wrs_tr)
		cv2.drawContours(output, [wrs_Rect.astype("int")], -1, (255, 0, 0), 2)
		"""

		if cr_count == 1 :

			cr_contours = np.reshape(cr_contours,(1,-1,2))
			cr_contours = cr_contours.astype(int)
			(vlv_x, vlv_y, vlv_w, vlv_h) = cv2.boundingRect(cr_contours)
			cv2.rectangle(output, (vlv_x, vlv_y), (vlv_x + vlv_w, vlv_y + vlv_h), (255, 0, 0), 2)
			self.valve_ROI.x_offset = vlv_x
			self.valve_ROI.y_offset = vlv_y
			self.valve_ROI.width = vlv_w
			self.valve_ROI.height = vlv_h

			self.valve_ROI_pub.publish(self.valve_ROI)

			# If the wrenches are detected, return the panel region of interest
			pnl_contour = np.append(wr_contours,cr_contours)
			pnl_contour = np.reshape(pnl_contour,(1,-1,2))	
			pnl_contour = pnl_contour.astype(int)	
			(pnl_x, pnl_y, pnl_w, pnl_h) = cv2.boundingRect(pnl_contour)
			cv2.rectangle(output, (pnl_x, pnl_y), (pnl_x + pnl_w, pnl_y + pnl_h), (0, 0, 255), 2)

			#print(dim_v)

			rospy.loginfo("Found Panel")

			self.panel_ROI.x_offset = pnl_x
			self.panel_ROI.y_offset = pnl_y
			self.panel_ROI.width = pnl_w
			self.panel_ROI.height = pnl_h

			self.panel_ROI_pub.publish(self.panel_ROI)

			#result = WrenchDetectionResult()

			#result.ROI = [pnl_x, pnl_y, pnl_w, pnl_h]
			#self.server.set_succeeded(result)

			# Disable the node since it found its target
			#self.is_node_enabled = False

		if toolIdentified:
			(tool_x, tool_y, tool_w, tool_h) = cv2.boundingRect(tool_contour)
			cv2.rectangle(output, (tool_x, tool_y), (tool_x + tool_w, tool_y + tool_h), (255, 0, 255), 2)

			self.tool_ROI.x_offset = tool_x
			self.tool_ROI.y_offset = tool_y
			self.tool_ROI.width = tool_w
			self.tool_ROI.height = tool_h

			self.tool_ROI_pub.publish(self.tool_ROI)
			
	

		# show the output image
		cv2.imshow("out2", output)
		cv2.waitKey(10)