Beispiel #1
0
def adapRotate(option, *args):

    path = 'rotate/real' if option.upper() == 'T' else 'rotate/fake'

    os.makedirs(path, exist_ok=True)
    gray = args[0].copy()

    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY_INV, 25, 15)
    conts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
    conts = grab_contours(conts)

    clone = args[1].copy()
    for cont in conts:
        box = cv2.minAreaRect(cont)
        x, y, theta = box
        box = np.int0(cv2.BoxPoints(box) if is_cv2() else cv2.boxPoints(box))
        cv2.drawContours(clone, [box], -1, (0, 255, 0), 1)

        print(
            f'x1 : {x[0]:.2f}, x2: {x[1]:.2f} \ny1 : {y[0]:.2f}, y2 : {y[1]:.2f}\ntheta : {theta:.2f}\n'
        )

    cv2.imwrite(f'{path}/{fileName}.jpg', clone)
 def RotatedRatio(self):
     # compute the rotated bounding box of the contour===================================================
     box_rotated = cv2.minAreaRect(self.region)
     box_rotated = cv2.BoxPoints(
         box_rotated) if imutils.is_cv2() else cv2.boxPoints(box_rotated)
     box_rotated = np.array(box_rotated, dtype="int")
     b = box_rotated
     # order the points in the contour such that they appear
     # in top-left, top-right, bottom-right, and bottom-left
     # order, then draw the outline of the rotated bounding
     # box
     box_rotated = perspective.order_points(box_rotated)
     # unpack the ordered bounding box, then compute the midpoint
     # between the top-left and top-right coordinates, followed by
     # the midpoint between bottom-left and bottom-right coordinates
     (tl, tr, br, bl) = box_rotated
     (tltrX, tltrY) = propriété_forme.midpoint(tl, tr)
     (blbrX, blbrY) = propriété_forme.midpoint(bl, br)
     # compute the midpoint between the top-left and top-right points,
     # followed by the midpoint between the top-righ and bottom-right
     (tlblX, tlblY) = propriété_forme.midpoint(tl, bl)
     (trbrX, trbrY) = propriété_forme.midpoint(tr, br)
     points = ((tltrX, tltrY), (blbrX, blbrY), (tlblX, tlblY), (trbrX,
                                                                trbrY))
     # compute the Euclidean distance between the midpoints
     dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
     dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
     ratio_rotated = round(dA / (dB + 0.0000000000001), 4)
     return dA, dB, ratio_rotated, points
    def RotatedBoundingBox(self):
        cnt = self.region
        #rect = cv2.minAreaRect(cnt)

        box_rotated = cv2.minAreaRect(self.region)
        box_rotated = cv2.BoxPoints(
            box_rotated) if imutils.is_cv2() else cv2.boxPoints(box_rotated)
        box = np.array(box_rotated, dtype="int")
        #box = cv2.boxPoints(rect)  # data is float
        #box = np.int0(box)  # turn data into integer.

        return box, box
def take_photo():
    camera = PiCamera()  # Open port to camera.
    camera.resolution = (300, 300)  # Take small photo.
    camera.zoom = (0.25, 0.25, 0.5, 0.5
                   )  # Zoom in to avoid photographing platform edges.
    camera.start_preview()
    sleep(2)  # 2 seconds is minimum delay for camera to prepare.
    camera.capture('/home/pi/Desktop/img/image3.jpg')  # Take photo.
    camera.stop_preview()
    camera.close()  # Close open connection to camera port.
    image = cv2.imread(
        '/home/pi/Desktop/img/image3.jpg')  # Load image for analysis.
    gray = cv2.cvtColor(image,
                        cv2.COLOR_BGR2GRAY)  # Convert image to grayscale.
    edged = cv2.Canny(
        gray, 25, 100)  # Apply edge detector with threshold (converts to B&W).
    edged = cv2.dilate(edged, (5, 5), iterations=5)  # Pixel expansion.
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    (cnts, _) = contours.sort_contours(cnts)
    colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0)
              )  # Bounding box line colors.

    for (i, c) in enumerate(cnts):
        if cv2.contourArea(c) < 100:
            continue
        global area  # Use this variable to determine if object is large or small.
        area += cv2.contourArea(c)
        print('contour area: ', cv2.contourArea(c))
        print('area (inside): ', area)
        box = cv2.minAreaRect(c)
        box = cv2.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")
        cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
        rect = order_points(box)
        for ((x, y), color) in zip(rect, colors):
            cv2.circle(image, (int(x), int(y)), 5, color, -1)
        cv2.putText(image, "Object #{}".format(i + 1),
                    (int(rect[0][0] - 15), int(rect[0][1] - 15)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.55, (255, 255, 255), 2)
        cv2.imshow("window", image)
        cv2.waitKey(3000)
        cv2.destroyAllWindows()
Beispiel #5
0
def main():
	# construct the argument parse and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())

	# grab the reference to the current frame, list of ROI
	# points and whether or not it is ROI selection mode
	global frame, roiPts, inputMode

	# if the video path was not supplied, grab the reference to the
	# camera
	if not args.get("video", False):
		camera = cv2.VideoCapture(0)

	# otherwise, load the video
	else:
		camera = cv2.VideoCapture(args["video"])

	# setup the mouse callback
	cv2.namedWindow("frame")
	cv2.setMouseCallback("frame", selectROI)

	# initialize the termination criteria for cam shift, indicating
	# a maximum of ten iterations or movement by a least one pixel
	# along with the bounding box of the ROI
	termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
	roiBox = None

	# keep looping over the frames
	while True:
		# grab the current frame
		(grabbed, frame) = camera.read()

		# check to see if we have reached the end of the
		# video
		if not grabbed:
			break

		# if the see if the ROI has been computed
		if roiBox is not None:
			# convert the current frame to the HSV color space
			# and perform mean shift
			hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
			backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)

			# apply cam shift to the back projection, convert the
			# points to a bounding box, and then draw them
			(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
			pts = np.int0(cv2.BoxPoints(r))
			cv2.polylines(frame, [pts], True, (0, 255, 0), 2)

		# show the frame and record if the user presses a key
		cv2.imshow("frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# handle if the 'i' key is pressed, then go into ROI
		# selection mode
		if key == ord("i") and len(roiPts) < 4:
			# indicate that we are in input mode and clone the
			# frame
			inputMode = True
			orig = frame.copy()

			# keep looping until 4 reference ROI points have
			# been selected; press any key to exit ROI selction
			# mode once 4 points have been selected
			while len(roiPts) < 4:
				cv2.imshow("frame", frame)
				cv2.waitKey(0)

			# determine the top-left and bottom-right points
			roiPts = np.array(roiPts)
			s = roiPts.sum(axis = 1)
			tl = roiPts[np.argmin(s)]
			br = roiPts[np.argmax(s)]

			# grab the ROI for the bounding box and convert it
			# to the HSV color space
			roi = orig[tl[1]:br[1], tl[0]:br[0]]
			roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
			#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)

			# compute a HSV histogram for the ROI and store the
			# bounding box
			roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
			roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
			roiBox = (tl[0], tl[1], br[0], br[1])

		# if the 'q' key is pressed, stop the loop
		elif key == ord("q"):
			break

	# cleanup the camera and close any open windows
	camera.release()
	cv2.destroyAllWindows()
Beispiel #6
0
# find contours in the edge map
cnts = cv2.findContours(image_res_thre.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and initialize the bounding box point colors
(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))
coords = []
testing = []
testing2 = []
for (i, c) in enumerate(cnts):
    if cv2.contourArea(c) < 100:
        continue

    box = cv2.minAreaRect(c)
    box = cv2.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
    print("type", type(box))
    one = box[1]
    print("one", one)
    print("box", box)
    box = np.array(box, dtype="int")
    cv2.drawContours(image, [box], -1, (0,255,0), 2)
    print("Object #{}:".format(i+1))
    print(box)

    rect = order_points_old(box)

    print("2113213122", rect)

    # cv2.circle(image, (267, 388), 8, (10, 150, 150), -1)
    # cv2.circle(image, (390, 383), 10, (110, 15, 50), -1)
def image_callback(ros_image):
    print 'got an image'
    global bridge, midpoint, boundaries

    #convert ros_image into an opencv image

    frame0 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame1 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame2 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame3 = bridge.imgmsg_to_cv2(ros_image, "bgr8")

    (h, w, d) = frame1.shape
    # print(frame1.shape)
    pts = deque(maxlen=args["buffer"])
    counter = 0
    (dX, dY) = (0, 0)
    direction = ""

    time.sleep(1 / 42)
    cv2.rectangle(frame0, (0, 0), (25, 25), (0, 0, 255), 2)

    diff_size = cv2.absdiff(frame0, frame3)

    diff_coordinates = cv2.absdiff(frame1, frame2)

    ######## color filtering for red
    for (lower, upper) in boundaries:

        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        mask = cv2.inRange(frame1, lower, upper)

    cv2.imshow("mask", mask)

    ####### color filtering for red

    #image processing for coordinates
    gray = cv2.cvtColor(diff_coordinates, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(thresh, None, iterations=1)

    # cv2.imshow("imaged dilated", dilated.copy())
    # cv2.imshow("frame1 copy", frame1.copy())

    # image processing for SIZE
    gray_size = cv2.cvtColor(diff_size, cv2.COLOR_BGR2GRAY)
    blur_size = cv2.GaussianBlur(gray_size, (5, 5), 0)
    _, thresh_size = cv2.threshold(blur_size, 20, 255, cv2.THRESH_BINARY)
    dilated_size = cv2.dilate(thresh_size, None, iterations=1)

    # cv2.imshow("dilated size", dilated_size)

    edged = cv2.Canny(diff_size, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # cv2.imshow("edged copy", edged.copy())

    ## shapes
    gray_shapes = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    blurred_shapes = cv2.GaussianBlur(gray_shapes, (5, 5), 0)
    thresh_shapes = cv2.threshold(blurred_shapes, 60, 255,
                                  cv2.THRESH_BINARY)[1]

    ## by this point we're ready to find and draw shape contours

    # first extract controus from the image
    cnts_shapes = cv2.findContours(thresh_shapes.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    cnts_shapes = imutils.grab_contours(cnts_shapes)
    for c in cnts_shapes:
        cv2.drawContours(frame1, [c], -1, (0, 255, 0), 2)

    # display the total number of shapes on the image
    text = "I found {} total shapes".format(len(cnts_shapes))
    cv2.putText(frame1, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 255), 2)

    # shapes

    # HERE THERE ARE TWO CONTOUR CAPTURING METHODS. CON FOR THE COORDINATES AND CNTS FOR THE SIZE

    _, con, _ = cv2.findContours(dilated, cv2.RETR_TREE,
                                 cv2.CHAIN_APPROX_SIMPLE)

    # this handles the contours for the size. note the contours need to be sorted so that we can use our square as ref

    # compared edged to dilated_size and dilated_size performs better
    cnts = cv2.findContours(dilated_size, cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    (cnts, _) = contours.sort_contours(cnts)

    # cv2.imshow("image edged,", edged)

    pixelsPerMetric = None

    for (i, c) in enumerate(cnts):

        if cv2.contourArea(c) < 250:
            continue

        box = cv2.minAreaRect(c)
        box = cv2.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)

        for (x, y) in box:
            cv2.circle(frame1, (int(x), int(y)), 2, (0, 0, 255), -1)

        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        cv2.circle(frame1, (int(tltrX), int(tltrY)), 1, (255, 0, 0), -1)
        cv2.circle(frame1, (int(blbrX), int(blbrY)), 1, (255, 0, 0), -1)
        cv2.circle(frame1, (int(tlblX), int(tlblY)), 1, (255, 0, 255), -1)
        cv2.circle(frame1, (int(trbrX), int(trbrY)), 1, (255, 0, 255), -1)

        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))  # height in pixels
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))  # width in pixels

        if pixelsPerMetric is None:
            #     # pixelsPerMetric = dB / args["width"]
            pixelsPerMetric = 0.22  # top right contour is about a quarter inch by a quarter inch # his was found by meaasuing that 106 px were in an inch
        #250
        dimA = dA / pixelsPerMetric  # pixels divided by the appx pixel size in inches of the first countour
        dimB = dB / pixelsPerMetric

        appx_area = dA * dB

        cv2.putText(frame1, '{:.1f}" px in x'.format(dimB),
                    (int(tltrX - 15), int(tltrY - 10)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
        cv2.putText(frame1, '{:.1f}" px in y'.format(dimA),
                    (int(trbrX - 120), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
                    0.4, (0, 0, 255), 1)
        cv2.putText(frame1, "{:.1f} pxsq".format(appx_area),
                    (int(trbrX - 140), int(trbrY + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)

        # below is for coordinates
    if len(con) > 0:

        c = max(con, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        if radius > 0:

            cv2.circle(frame1, center, 1, (0, 0, 255), -1)
            pts.appendleft(center)

    for c in con:
        M = cv2.moments(c)

        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])

        ex = (Decimal(cX) - Decimal(frame_x)) / Decimal(frame_x)
        why = -1 * (Decimal(cY) - Decimal(frame_y)) / Decimal(frame_y)

        (x, y, w, h) = cv2.boundingRect(c)

        if cv2.contourArea(c) < 250:
            continue
        cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame1, 'UTEP: {}'.format('DETECTED'), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255), 3)
        cv2.putText(frame1, "dx: {},        dy: {}".format(ex, why),
                    (10, frame1.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 3)
        cv2.putText(frame1, "dx: {},        dy: {}".format(cX, cY),
                    (10, frame1.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 3)

        for i in np.arange(1, len(pts)):
            if pts[i - 1] is None or pts[i] is None:
                continue

            if counter >= 10 and i == 1 and pts[-10] is not None:
                dX = pts[-10][0] - pts[i][0]
                dY = pts[-10][1] - pts[i][1]
                (dirX, dirY) = ("", "")

                if np.abs(dX) > 20:
                    dirX = "east" if np.sign(dX) == 1 else "west"

                if np.abs(dY) > 20:
                    dirY = "Nort" if np.sign(dY) == 1 else "south"

                if dirX != "" and dirY != "":
                    direction = "{}-{}.format" (dirY, dirX)

                else:
                    direction = dirX if dirX != "" else dirY
            thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 1.5)
            cv2.line(frame1, pts[i - 1], pts[i], (255, 0, 0), thickness)
    # out.write(frame1)
    cv2.imshow('feed', frame1)

    frame1 = frame2
    frame0 = frame3
    frame2 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    frame3 = bridge.imgmsg_to_cv2(ros_image, "bgr8")
    #
    # cv2.imshow("image window", frame1)
    cv2.waitKey(3)
def grab_contour(threshold_image):
    # sort the contours from left-to-right and initialize the 'pixels per metric' calibration variable
    cnts = cv2.findContours(threshold_image.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    # sort the contours from left-to-right and initialize the bounding box point colors
    (cnts, _) = contours.sort_contours(cnts)
    new_swap_list = []
    leftmost_contour = None
    center_points, areas, distances, corners, three_areas, coords, testing = [], [], [], [], [], [], []

    known_width = 7.6
    focal_length = 300

    for (i, c) in enumerate(cnts):
        area = cv2.contourArea(c)
        three_areas.append(area)
        sorteddata = sorted(zip(three_areas, cnts),
                            key=lambda x: x[0],
                            reverse=True)
        if cv2.contourArea(c) <= 30:
            continue

        box = cv2.minAreaRect(c)
        box = cv2.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")
        cv2.drawContours(threshold_image, [box], -1, (0, 255, 0), 2)
        rect = order_points(box)

        testing.append(rect)
        coords.append(rect[0])

    index_ordered_list = []  # whole point values in ordered index values

    # Four largest contours' coordinates
    compare_list = [
        sorteddata[0][1][0][0], sorteddata[1][1][0][0], sorteddata[2][1][0][0],
        sorteddata[3][1][0][0]
    ]
    first, second, third, fourth = compare_lists([compare_list])

    index_ordered_list.extend((first, second, third, fourth))

    for i in np.argsort(index_ordered_list):
        new_swap_list.append(sorteddata[i][1])

    for c in new_swap_list:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        area = cv2.contourArea(c)

        if cv2.contourArea(c) <= 30:
            continue

        box = approx
        box = np.squeeze(box)

        # order the points in the contour and draw outlines of the rotated rounding box
        box = order_points(box)
        box = perspective.order_points(box)
        testing.append(box)

        (x, y, w, h) = cv2.boundingRect(c)

        # compute area
        area = cv2.contourArea(c)
        areas.append(area)

        # compute center points
        M = cv2.moments(c)
        if M["m00"] != 0:
            cx = int(M["m10"] / M["m00"])
            cy = int(M["m01"] / M["m00"])
        else:
            cx, cy = 0, 0

        center = (cx, cy)
        center_points.append(center)

        c_x = np.average(box[:, 0])
        c_y = np.average(box[:, 1])

        # compute corners from contour image
        # four_corners = corners_from_contour(threshold_image, c)
        corners.append(box)

        # compute and return the distance from the maker to the camera
        distances.append(distance_to_camera(known_width, focal_length, w))

        if leftmost_contour is None:
            (tl, tr, br, bl) = box
            (tlblX, tlblY) = midpoint(tl, bl)
            (trbrX, trbrY) = midpoint(tr, br)

            # compute the Euclidean distance between the midpoints, then construct the reference object
            d = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
            leftmost_contour = (box, (c_x, c_y), d / 7.5)
            # first_box = box
            continue

    # swap to order center_points
    if center_points[1][0] <= center_points[2][0]:
        tmp = center_points[2]
        center_points[2] = center_points[1]
        center_points[1] = tmp

    if corners[1][0][0] <= corners[2][0][0]:
        tmp = corners[2]
        corners[2] = corners[1]
        corners[1] = tmp

    return leftmost_contour, center_points, areas, distances, corners
Beispiel #9
0
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)

# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)

# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)

(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]

# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.BoxPoints(rect))

# draw a bounding box arounded the detected barcode and display the image
cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
cv2.imshow("Image", image)
cv2.imwrite("contoursImage2.jpg", image)
cv2.waitKey(0)
 def find_min_area_rectangle(self, contour):
     rectangle = cv2.minAreaRect(contour)
     box = cv2.BoxPoints(rectangle)
     return rectangle, box
    def image_callback(self, data):
        # Store the image header in a global variable
        self.image_header = data.header

        # Time this loop to get cycles per second
        start = time.time()

        # Convert the ROS image to OpenCV format using a cv_bridge helper function
        frame = self.convert_image(data)

        # Some webcams invert the image
        if self.flip_image:
            frame = cv2.flip(frame, 0)

        # Store the frame width and height in a pair of global variables
        if self.frame_width is None:
            self.frame_size = (frame.shape[1], frame.shape[0])
            self.frame_width, self.frame_height = self.frame_size

        # Create the marker image we will use for display purposes
        if self.marker_image is None:
            self.marker_image = np.zeros_like(frame)

        # Copy the current frame to the global image in case we need it elsewhere
        self.frame = frame.copy()

        # Reset the marker image if we're not displaying the history
        if not self.keep_marker_history:
            self.marker_image = np.zeros_like(self.marker_image)

        # Process the image to detect and track objects or features
        processed_image = self.process_image(frame)

        # If the result is a greyscale image, convert to 3-channel for display purposes """
        #if processed_image.channels == 1:
        #cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
        #else:

        # Make a global copy
        self.processed_image = processed_image.copy()

        # Display the user-selection rectangle or point
        self.display_selection()

        # Night mode: only display the markers
        if self.night_mode:
            self.processed_image = np.zeros_like(self.processed_image)

        # Merge the processed image and the marker image
        self.display_image = cv2.bitwise_or(self.processed_image,
                                            self.marker_image)

        # If we have a track box, then display it.  The track box can be either a regular
        # cvRect (x,y,w,h) or a rotated Rect (center, size, angle).
        if self.show_boxes:
            if self.track_box is not None and self.is_rect_nonzero(
                    self.track_box):
                if len(self.track_box) == 4:
                    x, y, w, h = self.track_box
                    size = (w, h)
                    center = (x + w / 2, y + h / 2)
                    angle = 0
                    self.track_box = (center, size, angle)
                else:
                    (center, size, angle) = self.track_box

                # For face tracking, an upright rectangle looks best
                if self.face_tracking:
                    pt1 = (int(center[0] - size[0] / 2),
                           int(center[1] - size[1] / 2))
                    pt2 = (int(center[0] + size[0] / 2),
                           int(center[1] + size[1] / 2))
                    cv2.rectangle(self.display_image, pt1, pt2, (50, 255, 50),
                                  self.feature_size, 8, 0)
                else:
                    # Otherwise, display a rotated rectangle
                    vertices = np.int0(cv2.BoxPoints(self.track_box))
                    cv2.drawContours(self.display_image, [vertices], 0,
                                     (50, 255, 50), self.feature_size)

            # If we don't yet have a track box, display the detect box if present
            elif self.detect_box is not None and self.is_rect_nonzero(
                    self.detect_box):
                (pt1_x, pt1_y, w, h) = self.detect_box
                if self.show_boxes:
                    cv2.rectangle(self.display_image, (pt1_x, pt1_y),
                                  (pt1_x + w, pt1_y + h), (50, 255, 50),
                                  self.feature_size, 8, 0)

        # Publish the ROI
        self.publish_roi()

        # Compute the time for this loop and estimate CPS as a running average
        end = time.time()
        duration = end - start
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        # Display CPS and image resolution if asked to
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            """ Print cycles per second (CPS) and resolution (RES) at top of the image """
            if self.frame_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.frame_size[1] / 120.)
            elif self.frame_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.frame_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.frame_size[1] / 120.)
            cv2.putText(self.display_image, "CPS: " + str(self.cps),
                        (10, vstart), font_face, font_scale, (255, 255, 0))
            cv2.putText(
                self.display_image, "RES: " + str(self.frame_size[0]) + "X" +
                str(self.frame_size[1]), (10, voffset), font_face, font_scale,
                (255, 255, 0))

        # Update the image display
        cv2.imshow(self.node_name, self.display_image)

        # Process any keyboard commands
        self.keystroke = cv2.waitKey(5)
        if self.keystroke is not None and self.keystroke != -1:
            try:
                cc = chr(self.keystroke & 255).lower()
                if cc == 'n':
                    self.night_mode = not self.night_mode
                elif cc == 'f':
                    self.show_features = not self.show_features
                elif cc == 'b':
                    self.show_boxes = not self.show_boxes
                elif cc == 't':
                    self.show_text = not self.show_text
                elif cc == 'q':
                    # The has press the q key, so exit
                    rospy.signal_shutdown("User hit q key to quit.")
            except:
                pass
Beispiel #12
0
def main():
    #construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video", help = "path to the optional video file")
    args = vars(ap.parse_args())

    global frame, roiPts, inputMode

    if not args.get("video",False):
        camera = cv2.VideoCapture(0)
    else:
        camera = cv2.VideoCapture(args["video"])

    cv2.namedWindow("frame")
    cv2.setMouseCallback("frame", selectROI)

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    roiBox = None

    while True:
        (grabbed, frame) = camera.read()
        if not grabbed:
            break
        if roiBox is not None:
            # convert current frame to the HSV color space and perform mean shift
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            backProj = cv2.caLcBackProject([hsv], [0], roiHist, [0,180], 1)
            # apply cam shift to the back projection, convert the points to a bounding box and then draw them
            (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
            pts = np.int0(cv2.BoxPoints(r))
            cv2.polylines(frame, [pts], True, (0, 255, 0), 2)

            #show the fram and record if the suer presses a key
            cv2.imshow("frame",frame)
            key = cv2.waitKey(1) & 0xFF

            # handle if the i key is pressed then go into ROI selection mode
            if key == ord("i") and len(roiPts) <4:
                #indicate that we are in input mode and clone frame
                inputMode = True
                orig = frame.copy()

                while len(roiPts) < 4:
                    cv2.imshow("frame",frame)
                    cv2.waitKey(0)

                roiPts = np.array(roiPts)
                s = roiPts.sum(axis = 1)
                tl = roiPts[np.argmin(s)]
                br = roiPts[np.argmax(s)]

                roi = orig[tl[1]:br[1], tl[0]:br[0]]
                roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
                roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)

                roiHist = cv2.calcHist([roi], [0], None, [16], [0,180])
                roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
                roiBox = (tl[0], tl[1], br[0], br[1])

            elif key == ord("q"):
                break

    camera.release()
    cv2.destroyAllWindows()