def change(img,wid):
	image = cv2.imread(img)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (7, 7), 0)

	# perform edge detection, then perform a dilation + erosion to
	# close gaps in between object edges
	edged = cv2.Canny(gray, 50, 100)
	edged = cv2.dilate(edged, None, iterations=1)
	edged = cv2.erode(edged, None, iterations=1)

	# find contours in the edge map
	cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)

	# sort the contours from left-to-right and initialize the
	# 'pixels per metric' calibration variable
	(cnts, _) = contours.sort_contours(cnts)
	pixelsPerMetric = None

	# loop over the contours individually
	for c in cnts:
		# if the contour is not sufficiently large, ignore it
		if cv2.contourArea(c) < 100:
			continue

		# compute the rotated bounding box of the contour
		orig = image.copy()
		box = cv2.minAreaRect(c)
		box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
		box = np.array(box, dtype="int")

		# order the points in the contour such that they appear
		# in top-left, top-right, bottom-right, and bottom-left
		# order, then draw the outline of the rotated bounding
		# box
		box = perspective.order_points(box)
		cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)

		# loop over the original points and draw them
		for (x, y) in box:
			cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

		# unpack the ordered bounding box, then compute the midpoint
		# between the top-left and top-right coordinates, followed by
		# the midpoint between bottom-left and bottom-right coordinates
		(tl, tr, br, bl) = box
		(tltrX, tltrY) = midpoint(tl, tr)
		(blbrX, blbrY) = midpoint(bl, br)

		# compute the midpoint between the top-left and top-right points,
		# followed by the midpoint between the top-righ and bottom-right
		(tlblX, tlblY) = midpoint(tl, bl)
		(trbrX, trbrY) = midpoint(tr, br)

		# draw the midpoints on the image
		cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
		cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
		cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
		cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

		# draw lines between the midpoints
		cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
			(255, 0, 255), 2)
		cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
			(255, 0, 255), 2)

		# compute the Euclidean distance between the midpoints
		dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
		dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

		# if the pixels per metric has not been initialized, then
		# compute it as the ratio of pixels to supplied metric
		# (in this case, inches)
		if pixelsPerMetric is None:
			pixelsPerMetric = dB / wid

		# compute the size of the object
		dimA = dA / pixelsPerMetric
		dimB = dB / pixelsPerMetric

		# draw the object sizes on the image
		cv2.putText(orig, "{:.1f}in".format(dimA),
			(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
			0.65, (255, 255, 255), 2)
		cv2.putText(orig, "{:.1f}in".format(dimB),
			(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
			0.65, (255, 255, 255), 2)

		# show the output image
		cv2.imshow("Image", orig)
		cv2.waitKey(0)
Beispiel #2
0
            #Resize frame to reduce noise and improve bounding-box creation
            #Apply gaussian MOG background subtraction
            resized = imutils.resize(frame, width=600)
            frame = cv2.GaussianBlur(resized, (11, 11), 0)
            fgmask = fgbg.apply(frame)

            #Erode and Dilate image to reduce specs of noise and small particles
            fgmask = cv2.erode(fgmask, None, iterations=1)
            fgmask = cv2.dilate(fgmask, None, iterations=7)

            #Extract white areas (areas where MOG2 sees an object)
            fgmask = cv2.inRange(fgmask, 255, 255)
            cnts = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            #Extract contours of blobs in filtered image, draw bounding boxes around large
            #blobs that represent people
            for c in cnts:
                if cv2.contourArea(c) < 5000:
                    continue
                (x, y, w, h) = cv2.boundingRect(c)

                cv2.rectangle(resized, (x, y), (x + w, y + h), (0, 255, 255),
                              2)

            cv2.imshow('Frame', resized)
            cv2.imshow('Sub', fgmask)
            k = cv2.waitKey(30) & 0xff
            if k == 27:
def Range(img, parameters_dict, finalimage):
    Range = np.array([])
    ZDistance = np.array([])
    Bearing = np.array([])
    Center = np.array([])
    #LWidth=np.array([])
    #LHeight=np.array([])
    #GrayFiltimg=cv2.cvtColor(img,cv2.COLOR_HSV2BGR)
    #GrayFiltimg=cv2.cvtColor(GrayFiltimg,cv2.COLOR_RGB2GRAY)
    Contour = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if Contour == []:
        print("there is nothing here")
    else:
        Contour = imutils.grab_contours(Contour)
        for a in Contour:
            #find the center of the contour
            Moment = cv2.moments(a)
            Area = cv2.contourArea(a)
            if parameters_dict["Circle"] == True:
                Lx1, Ly1, LWidth, LHeight = cv2.boundingRect(a)
                if Area > 30:
                    if LWidth / LHeight < 1.1 and LHeight / LWidth < 1.1:
                        (x, y), radius = cv2.minEnclosingCircle(a)
                        cv2.rectangle(finalimage,
                                      (int(x - radius), int(y + radius)),
                                      (int(x + radius), int(y - radius)),
                                      parameters_dict["BBoxColour"], 2)
                        Distance = (parameters_dict["Height"] *
                                    (f / (2 * radius)) / 8) * math.cos(0.2967)
                        Distance = (-0.0005 *
                                    Distance**2) + (1.4897 * Distance) - 66.919
                        Distance = Distance / 1000
                        ZDistance = np.append(ZDistance, Distance)
                        Bearing = np.append(
                            Bearing, math.radians((x - 160) * (31.1 / 160)))
                        Range = np.vstack(
                            (ZDistance, -Bearing)
                        ).T  #Put Bearing and ZDistance into one array and arrange
                        #columnwise
                        Range = Range[Range[:, 0].argsort()]
                    else:
                        continue
                else:
                    continue
            elif parameters_dict["type"] == 3:
                Lx1, Ly1, LWidth, LHeight = cv2.boundingRect(a)
                if Area > 150 and Area < 5000:
                    if LWidth / LHeight < 1.2 and LHeight / LWidth < 1.2:
                        Lx = int(Moment["m10"] / Moment["m00"])
                        Ly = int(Moment["m01"] / Moment["m00"])
                        Centroid = np.array([Lx, Ly])
                        Center = np.append(Center, Centroid)
                        cv2.rectangle(
                            finalimage,
                            (Lx - int(LWidth / 2), Ly + int(LHeight / 2)),
                            (Lx + int(LWidth / 2), Ly - int(LHeight / 2)),
                            parameters_dict["BBoxColour"], 2)
                        Distance = (parameters_dict["Height"] *
                                    (f / LHeight) / 8) * math.cos(0.2967)
                        Distance = ((-0.0002 * Distance**2) +
                                    (0.8492 * Distance) + 51) / 1000
                        ZDistance = np.append(ZDistance, Distance)
                        MaxMinLocations(a, finalimage)
                        Bearing = np.append(
                            Bearing, math.radians((Lx - 160) * (31.1 / 160)))
                        Range = np.vstack(
                            (ZDistance, -Bearing)
                        ).T  #Put Bearing and ZDistance into one array and arrange
                        #columnwise
                        Range = Range[Range[:, 0].argsort()]
                        #if positive then it's to the right if negative then to left of center
                    else:
                        continue
                else:
                    continue
            elif parameters_dict["type"] == 2:
                Lx1, Ly1, LWidth, LHeight = cv2.boundingRect(a)
                if Area > 150:
                    if LWidth / LHeight < 1.2 and LHeight / LWidth < 1.2:
                        Lx = int(
                            Moment["m10"] /
                            Moment["m00"])  #centroids of shapes identified
                        Ly = int(Moment["m01"] / Moment["m00"])
                        Centroid = np.array([Lx, Ly])
                        Center = np.append(Center, Centroid)
                        cv2.rectangle(
                            finalimage,
                            (Lx - int(LWidth / 2), Ly + int(LHeight / 2)),
                            (Lx + int(LWidth / 2), Ly - int(LHeight / 2)),
                            parameters_dict["BBoxColour"], 2)
                        Distance = (parameters_dict["Height"] *
                                    (f / LHeight) / 8) * math.cos(0.2967)
                        Distance = (262.22 * np.log(Distance) - 1222.1) / 1000
                        ZDistance = np.append(ZDistance, Distance)
                        MaxMinLocations(a, finalimage)
                        Bearing = np.append(
                            Bearing, math.radians((Lx - 160) * (31.1 / 160)))
                        Range = np.vstack(
                            (ZDistance, -Bearing)
                        ).T  #Put Bearing and ZDistance into one array and arrange
                        #columnwise
                        Range = Range[Range[:, 0].argsort()]
                        #if positive then it's to the right if negative then to left of center
                    else:
                        continue
                else:
                    continue
            elif parameters_dict["type"] == 1:
                Lx1, Ly1, LWidth, LHeight = cv2.boundingRect(a)
                if Area > 3000:
                    Lx = int(Moment["m10"] / Moment["m00"])
                    Ly = int(Moment["m01"] / Moment["m00"])
                    Centroid = np.array([Lx, Ly])
                    Center = np.append(Center, Centroid)
                    cv2.rectangle(
                        finalimage,
                        (Lx - int(LWidth / 2), Ly + int(LHeight / 2)),
                        (Lx + int(LWidth / 2), Ly - int(LHeight / 2)),
                        parameters_dict["BBoxColour"], 2)
                    Distance = (parameters_dict["Height"] *
                                (f / LHeight) / 8) * math.cos(0.2967)
                    Distance = 0.8667 * Distance - 3
                    ZDistance = np.append(ZDistance, Distance)
                    #self.MaxMinLocations(a,finalimage)
                    Bearing = np.append(
                        Bearing, math.radians((Lx - 160) * (31.1 / 160)))
                    Range = np.vstack(
                        (ZDistance, -Bearing)
                    ).T  #Put Bearing and ZDistance into one array and arrange
                    #columnwise
                    Range = Range[Range[:, 0].argsort()]
                else:
                    continue
            elif parameters_dict["type"] == 4:
                Lx1, Ly1, LWidth, LHeight = cv2.boundingRect(a)
                if Area > 30 and Area < 3000:
                    if LWidth / LHeight < 1.1 and LHeight / LWidth < 1.1:
                        (x, y), radius = cv2.minEnclosingCircle(a)
                        cv2.rectangle(finalimage,
                                      (int(x - radius), int(y + radius)),
                                      (int(x + radius), int(y - radius)),
                                      parameters_dict["BBoxColour"], 2)
                        Distance = (parameters_dict["Height"] *
                                    (f / (2 * radius)) / 8) * math.cos(0.2967)
                        Distance = (-0.0005 *
                                    Distance**2) + (1.4897 * Distance) - 66.919
                        Distance = Distance / 1000
                        ZDistance = np.append(ZDistance, Distance)
                        Bearing = np.append(
                            Bearing, math.radians((x - 160) * (31.1 / 160)))
                        Range = np.vstack(
                            (ZDistance, -Bearing)
                        ).T  #Put Bearing and ZDistance into one array and arrange
                        #columnwise
                        Range = Range[Range[:, 0].argsort()]
                    else:
                        continue
                else:
                    continue

    return Range
def openDoor():

  vs = cv2.VideoCapture("white_to_black.avi")
  #vs = cv2.VideoCapture(0)

  fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()

  i = 0
  firstFrame = None

  vals = []

  while vs.isOpened():

    frame = vs.read()
    frame = frame[1]

    if frame is None:

      break

    text = "No issues"

    frame = imutils.resize(frame, width=600)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if firstFrame is None:

      firstFrame = gray

    frameDelta = cv2.absdiff(firstFrame, gray)

    ret, thresh1 = cv2.threshold(frameDelta, 50, 255, cv2.THRESH_BINARY)

    thresh = cv2.dilate(thresh1, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    # Adjust for spacing of bounding rectangle
    a = 1
    b = 0

    for c in cnts:

      for d in cnts:

        (x1, y1, w1, h1) = cv2.boundingRect(c)
        (x2, y2, w2, h2) = cv2.boundingRect(d)
        (x1, y1, w1, h1) = (
        x1 - (w1 * b), y1 - (h1 * b), w1 * (a + b), h1 * (a + b))
        (x2, y2, w2, h2) = (
        x2 - (w2 * b), y2 - (h2 * b), w2 * (a + b), h2 * (a + b))

        for point in [(x1, y1), (x1 + w1, y1), (x1, y1 + h1),
                      (x1 + w1, y1 + h1)]:

          if (point[0] >= x2) and (point[0] <= x2 + w2) and (
              point[1] >= y2) and (point[1] <= y2 + h2):
            x_min = int(min(x1, x2))
            x_max = int(max(x1, x2))
            y_min = int(min(y1, y2))
            y_max = int(max(y1, y2))

            if x_min < 0:
              x_min = 0

            if y_min < 0:
              y_min = 0

            if y_max > frame.shape[0]:
              y_max = 449

            if y_max < 0:
              y_max = 0

            if x_max < 0:
              x_max = 0

            if x_max > frame.shape[1]:
              x_max = 799

            cv2.rectangle(frame,
                          (min(x_min, x_max),
                           min(y_min, y_max)),
                          (max(x_min, x_max),
                           max(y_min, y_max)), (0, 255, 0), 2)

            #cv2.imwrite("cropped"+str(x_max)+"_"+str(x_min)+".jpg", frame[min(x_min, x_max):min(y_min, y_max),
						#											max(x_min, x_max):max(y_min, y_max)].copy())

            print(min(x_min, x_max),max(x_min, x_max),min(y_min, y_max),max(y_min, y_max))

      (x, y, w, h) = cv2.boundingRect(c)
      # (x, y, w, h) = (int(x-(w*a)), int(y-(h*b)), int(w*a), int(h*a))
      cv2.rectangle(frame, (int(x - (w * b)), int(y - (h * b))),
                    (x + int(w * (a + b)), y + int(h * (a + b))), (0, 255, 0),
                    2)
      text = "Issue"
      cv2.putText(frame, "Car Status: {}".format(text), (10, 20),
                  cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      cv2.putText(frame, "Issue", (x, int(y * 0.98)),
                  cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      cv2.putText(frame,
                  datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                  (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                  (0, 0, 255), 1)

    numpy_horizontal = np.concatenate((frame, cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2RGB), cv2.cvtColor(thresh1, cv2.COLOR_GRAY2RGB)), axis=1)
    vals.append(np.mean(frame))
    #plt.imshow(frame)
    #plt.show()
#    cv2.imshow("thre", numpy_horizontal)

    #cv2.imshow("Frame Delta", frameDelta)
#    key = cv2.waitKey(1) & 0xFF

    i += 1

  plt.plot(vals)
  plt.show()
def processing(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #cv2.imshow("Image", gray)
    #cv2.waitKey(0)

    blurred = cv2.GaussianBlur(gray, (5, 5), 0)  #blur to reduce noise
    #cv2.imshow("Image", blurred)
    #cv2.waitKey(0)

    # perform edge detection, find contours in the edge map, and sort the
    # resulting contours from left-to-right
    edged = cv2.Canny(blurred, 30, 150)  #30, 15
    #cv2.imshow("Image", edged)
    #cv2.waitKey(0)

    #find contours of characters(objects) in images
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    if cnts:
        cnts = sort_contours(cnts, method="left-to-right")[0]
    # cv2.drawContours(image, cnts, -1, (0, 0, 255), 2)
    # cv2.imwrite("all_contours.jpg", image)

    # initialize the list of contour bounding boxes and associated
    # characters that we'll be OCR'ing
    chars = []
    # loop over the contours
    for c in cnts:
        # compute the bounding box of the contour
        (x, y, w, h) = cv2.boundingRect(c)

        # filter out bounding boxes, ensuring they are neither too small
        # nor too large
        if (w >= 5 and w <= 180) and (h >= 15 and h <= 150):  #180 #150
            # extract the region of interest(roi) of character
            roi = gray[y:y + h, x:x + w]
            # cv2.imshow("Image", roi)
            # cv2.waitKey(0)

            # using a thresholding algorithm to make the character
            # appear as white (foreground) on a black background
            thresh = cv2.threshold(roi, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            #cv2.imshow("Image", thresh)
            #cv2.waitKey(0)

            # then grab the width and height of the thresholded image
            (tH, tW) = thresh.shape

            # if the width is greater than the height, resize along the
            # width dimension
            if tW > tH:
                thresh = imutils.resize(thresh, width=32)
            # otherwise, resize along the height
            else:
                thresh = imutils.resize(thresh, height=32)
            #cv2.imshow("Image", thresh)
            #cv2.waitKey(0)

            # re-grab the image dimensions (now that its been resized)
            # and then determine how much we need to pad the width and
            # height such that our image will be 32x32
            (tH, tW) = thresh.shape
            dX = int(max(0, 32 - tW) / 2.0)
            dY = int(max(0, 32 - tH) / 2.0)

            # pad the image and force 32x32 dimensions
            padded = cv2.copyMakeBorder(thresh,
                                        top=dY,
                                        bottom=dY,
                                        left=dX,
                                        right=dX,
                                        borderType=cv2.BORDER_CONSTANT,
                                        value=(0, 0, 0))
            #cv2.imshow("Image", padded)
            #cv2.waitKey(0)

            padded = cv2.resize(padded, (32, 32))
            #cv2.imshow("Image", padded)
            #cv2.waitKey(0)

            # prepare the padded image for classification via our
            # handwriting OCR model
            padded = padded.astype("float32") / 255.0
            padded = np.expand_dims(padded, axis=-1)
            #cv2.imshow("Image", padded)
            #cv2.waitKey(0)

            # update our list of characters(as padded images) that will be OCR'd
            chars.append((padded, (x, y, w, h)))

    # extract the bounding box locations and padded characters
    boxes = [b[1] for b in chars]
    chars = np.array([c[0] for c in chars], dtype="float32")

    #writing characters in csv
    with open("myhandwriting.csv", "a+", newline='') as csv_file:
        writer = csv.writer(csv_file, delimiter=',')

        list = chars.tolist()
        for listElement in list:
            row = [None] * 1025
            row[0] = 22
            array = np.array(listElement, dtype="float32") * 255.0
            array = array.astype(dtype='uint8')
            array = np.squeeze(array, axis=2)
            #because our characters size is 32x32
            flat_array = array.reshape(1024, )
            flat_list = flat_array.tolist()
            row[1:] = flat_list
            writer.writerow(row)
        print("Character added into Dataset...")
Beispiel #6
0
def main():
    '''
    main function
    '''
    # basic image pre-processing: gray scale/canny
    answer_boxes_image, original_image, gray_image, edged_image = preprocess_image(
    )

    # all contours inside original image
    original_image_contours = cv2.findContours(edged_image.copy(), cv2.RETR_TREE, \
                                              cv2.CHAIN_APPROX_SIMPLE)
    original_image_contours = imutils.grab_contours(original_image_contours)
    contours_original_image = original_image.copy()
    cv2.drawContours(contours_original_image, original_image_contours, -1, \
                     (0, 255, 255), thickness=2)

    # original image with answer boxes marked and boxed coordinates
    answer_boxes_image, answer_boxes = detect_answer_boxes(answer_boxes_image, \
                                                           original_image_contours)

    # one image for every answer box (math/info)
    answer_box_images = []
    answer_box_gray_images = []
    # homomorphic transformation (for rotation/perspective part)
    for answer_box in answer_boxes:
        answer_box_images.append(four_point_transform(original_image.copy(), \
                                                      answer_box.reshape(4, 2)))
        answer_box_gray_images.append(four_point_transform(gray_image.copy(), \
                                                           answer_box.reshape(4, 2)))

    # answer boxes heights - one answer box == 8x17 option squares
    answer_boxes_heights = [round(len(answer_box_images[0]) / 17), \
                        round(len(answer_box_images[1]) / 17)]
    answer_boxes_widths = [round(len(answer_box_images[0][0]) / 8), \
                        round(len(answer_box_images[1][0]) / 8)]

    # get Info/Fiz portion of image
    optional_test_answer_box = answer_boxes[1]
    optional_box_height = len(answer_box_images[1])
    scale = 2 * 4 * answer_boxes_heights[1] / optional_box_height + 1
    width_scale_adaos = int(len(answer_box_images[1][0]) * (scale - 1) / 2)
    optional_test_answer_box = scale_contour(optional_test_answer_box, scale)
    test_chosen_option_image = four_point_transform(original_image.copy(), \
                                                    optional_test_answer_box.reshape(4, 2))
    # get upper part where Info/Fizica text is located
    test_chosen_option_image = test_chosen_option_image[:4 *
                                                        answer_boxes_heights[
                                                            1], :]
    test_chosen_option_image = test_chosen_option_image[:, width_scale_adaos:
                                                        -width_scale_adaos]
    test_chosen_option_image = test_chosen_option_image[:, -2 *
                                                        answer_boxes_widths[1]:]
    original_options_image = test_chosen_option_image.copy()
    test_chosen_option_image = cv2.cvtColor(test_chosen_option_image,
                                            cv2.COLOR_BGR2GRAY)
    test_chosen_option_image = cv2.threshold(test_chosen_option_image, 0, 255, \
                                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # horizontal lines removal
    lines = cv2.HoughLines(test_chosen_option_image, 1, np.pi/180, \
                           int(1.1 * answer_boxes_heights[1]))
    for rho, theta in lines.reshape(-1, 2):
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        x1 = int(x0 + 1000 * (-b))
        y1 = int(y0 + 1000 * (a))
        x2 = int(x0 - 1000 * (-b))
        y2 = int(y0 - 1000 * (a))
        angle = np.arctan2(y2 - y1, x2 - x1) * 180.0 / np.pi
        if abs(angle) < 2 or abs(angle) > 88 and abs(angle) < 92:
            cv2.line(test_chosen_option_image, (x1, y1), (x2, y2), (0, 0, 0),
                     2)

    test_chosen_contours = cv2.findContours(test_chosen_option_image, cv2.RETR_TREE, \
        cv2.CHAIN_APPROX_SIMPLE)
    test_chosen_contours = imutils.grab_contours(test_chosen_contours)

    # option detection - low precision
    selected_option = 3
    test_option_contours = np.array([])
    for contour in test_chosen_contours:
        (_, _, width, height) = cv2.boundingRect(contour)
        aspect_ratio = height / float(width)
        if height >= 0.2 * answer_boxes_heights[1]:
            test_option_contours = contour
            if aspect_ratio > 0.9 and aspect_ratio < 1.1:
                selected_option = 2
            if aspect_ratio < 0.5:
                selected_option = 1
            if aspect_ratio > 0.5 and aspect_ratio < 0.9:
                selected_option = 4
            break

    chosen_option_letter = 'I'
    if test_option_contours.size:
        original_options_image = cv2.drawContours(original_options_image, \
                test_option_contours, -1, (255, 255, 0), 2)
        option_selected_bottom_coords = \
                tuple(test_option_contours[test_option_contours[:, :, 1].argmax()][0])
        if option_selected_bottom_coords[1] > len(
                original_options_image[0]) / 2:
            chosen_option_letter = 'F'

    #chosen_option_letter = 'I'
    #selected_option = 1

    # crop useless part of the answer box
    cropped_answer_box_images = []
    cropped_answer_box_gray_images = []
    for index, image in enumerate(answer_box_images):
        cropped_answer_box_images.append(crop_image(image, 2 * answer_boxes_heights[index], \
                                                    int(3.6 * answer_boxes_widths[index])))
    for index, image in enumerate(answer_box_gray_images):
        cropped_answer_box_gray_images.append(crop_image(image, 2 * answer_boxes_heights[index], \
                                                         int(3.6 * answer_boxes_widths[index])))

    # binary treshold on cropped_answer_box_gray_images
    answer_box_thresh_gray_images = []
    for image in cropped_answer_box_gray_images:
        answer_box_thresh_gray_images.append(cv2.threshold(image.copy(), 0, 255, \
            cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1])

    # remove box lines in order to expose X marks
    for index, image in enumerate(answer_box_thresh_gray_images):
        lines = cv2.HoughLines(image, 1, np.pi / 180,
                               int(answer_boxes_widths[index] * 3))
        for rho, theta in lines.reshape(-1, 2):
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a * rho
            y0 = b * rho
            x1 = int(x0 + 1000 * (-b))
            y1 = int(y0 + 1000 * (a))
            x2 = int(x0 - 1000 * (-b))
            y2 = int(y0 - 1000 * (a))
            angle = np.arctan2(y2 - y1, x2 - x1) * 180.0 / np.pi
            if abs(angle) < 2 or abs(angle) > 88 and abs(angle) < 92:
                cv2.line(answer_box_thresh_gray_images[index], (x1, y1),
                         (x2, y2), (0, 0, 0), 3)

    # detect contours on answer_box_thresh_gray_images
    thresh_box_images_contours = []
    for thresh_image in answer_box_thresh_gray_images:
        contour = cv2.findContours(thresh_image, cv2.RETR_TREE, \
            cv2.CHAIN_APPROX_SIMPLE)
        contour = imutils.grab_contours(contour)
        thresh_box_images_contours.append(contour)

    # detect X marks
    marked_answers_contours = [[], []]
    for index, thresh_contour in enumerate(thresh_box_images_contours):
        for contour in thresh_contour:
            (_, _, width, height) = cv2.boundingRect(contour)
            aspect_ratio = height / float(width)
            if width <= answer_boxes_widths[index] and height <= answer_boxes_heights[index] and \
            width >= 0.2*answer_boxes_widths[index] and height >= 0.2*answer_boxes_heights[index]:
                marked_answers_contours[index].append(contour)

    # highlight x marks inside answer boxes
    answer_box_x_marks_images = []
    for index, contour in enumerate(marked_answers_contours):
        answer_box_x_marks_images.append(cv2.drawContours(cropped_answer_box_images[index].copy(), \
            contour, -1, (255, 255, 0), 2))

    # parse contours and determine answers
    test_answers = [{}, {}]
    for box_index, box_answers_contours in enumerate(marked_answers_contours):
        box_answers_contours = contours.sort_contours(box_answers_contours,
                                                      'top-to-bottom')[0]
        for contour in box_answers_contours:
            left_up_coords = contour.min(axis=0)
            right_down_coords = contour.max(axis=0)
            center_coords = (left_up_coords + right_down_coords) / 2
            question_number = center_coords / answer_boxes_heights[box_index]
            question_number = question_number.reshape(2, ).astype(int)[1]
            question_option = center_coords / answer_boxes_widths[box_index]
            question_option = question_option.reshape(2, ).astype(int)[0]
            question_option = chr(ord('A') + question_option)
            if question_number in test_answers[box_index].keys():
                test_answers[box_index][question_number] = 'Z'
            else:
                test_answers[box_index][question_number] = question_option

    # write answers inside file
    output_file = open('output_file.txt', 'w')
    output_file.write('{} {}\n'.format(chosen_option_letter, selected_option))
    parsed_dict = {}
    for box_index, box_test_answers in enumerate(test_answers):
        question_number = 1
        if box_index == 1:
            question_number = 16
        for key in box_test_answers.keys():
            letter_option = box_test_answers[key]
            if letter_option == 'Z':
                continue
            output_file.write('{} {}\n'.format(question_number + key,
                                               letter_option))
            parsed_dict[question_number + key] = letter_option

    # verify answers
    barem_dict = {}
    with open(
            'dataset/barem/{}{}.txt'.format(chosen_option_letter,
                                            selected_option), 'r') as f:
        for line in f:
            row = line.split()
            if len(row) == 2:
                if row[0].isnumeric():
                    barem_dict[int(row[0])] = row[1]

    shared_items = {k: parsed_dict[k] \
                    for k in parsed_dict if k in barem_dict and parsed_dict[k] == barem_dict[k]}
    output_file.write('R {}\n'.format(len(shared_items)))
    output_file.close()

    # plot entire flow
    cv2.cvtColor(contours_original_image, cv2.COLOR_BGR2RGB)
    answer_box_x_marks_images[0] = cv2.cvtColor(answer_box_x_marks_images[0],
                                                cv2.COLOR_BGR2RGB)
    answer_box_x_marks_images[1] = cv2.cvtColor(answer_box_x_marks_images[1],
                                                cv2.COLOR_BGR2RGB)
    answer_box_thresh_gray_images[0] = \
        cv2.cvtColor(answer_box_thresh_gray_images[0], cv2.COLOR_GRAY2RGB)
    answer_box_thresh_gray_images[1] = \
        cv2.cvtColor(answer_box_thresh_gray_images[1], cv2.COLOR_GRAY2RGB)
    original_options_image = cv2.cvtColor(original_options_image,
                                          cv2.COLOR_BGR2RGB)
    _ = plt.subplot(261), plt.imshow(original_image)
    _ = plt.title('Original Image'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(262), plt.imshow(edged_image, cmap='gray')
    _ = plt.title('Edged Image'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(263), plt.imshow(contours_original_image)
    _ = plt.title('Contours'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(264), plt.imshow(answer_boxes_image)
    _ = plt.title('Two squares'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(265), plt.imshow(cropped_answer_box_images[0])
    _ = plt.title('First box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(266), plt.imshow(cropped_answer_box_images[1])
    _ = plt.title('Second box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(267), plt.imshow(answer_box_thresh_gray_images[0])
    _ = plt.title('Thresh 1st box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(268), plt.imshow(answer_box_thresh_gray_images[1])
    _ = plt.title('Thresh 2nd box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(269), plt.imshow(answer_box_x_marks_images[0])
    _ = plt.title('X Marks 1st box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(2, 6, 10), plt.imshow(answer_box_x_marks_images[1])
    _ = plt.title('X Marks 2nd box'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(2, 6, 11), plt.imshow(test_chosen_option_image,
                                          cmap='gray')
    _ = plt.title('Test option thresh'), plt.xticks([]), plt.yticks([])
    _ = plt.subplot(2, 6, 12), plt.imshow(original_options_image)
    _ = plt.title('Test option contour'), plt.xticks([]), plt.yticks([])
    plt.show()
def ammo_count(ref, frame, meta, debug):
    """ Reads ammo counter from video frame by template matching against a reference image """
    # ammo_count = ""

    # removes any small noise from the ref image, probably not necessary
    ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY)[1]

    # finds all contours in the ref image and sorts them into a list, with the left most contour in position 0
    ref_cnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    ref_cnts = imutils.grab_contours(ref_cnts)
    ref_cnts = contours.sort_contours(ref_cnts, method="left-to-right")[0]

    # assigns each contour a number based on its position in the list, we will use this later to match digits to
    digits = {}
    for (i, c) in enumerate(ref_cnts):
        # probably don't need to identify their bounding rectangles
        (x, y, w, h) = cv2.boundingRect(c)
        roi = ref[y:y + h, x:x + w]
        roi = cv2.resize(roi, (112, 92))
        digits[i] = roi

    # cuts down frame to ammo display location. percentages are accurate enough i can probably remove if and elif
    # 1080p
    # 963 is 89.17%  1000 is 92.595%
    # 1732 is 90.21% 1778 is 92.6%
    if int(meta[1]) == 1080:
        frame = frame[963:1000, 1730:1780]

    # 720p
    # 642.024 is 89.17% 666.684 is 92.595%
    # 1154.688 is 90.21% 1185.28 is 92.6%
    elif meta[1] == 720:
        frame = frame[642:667, 1154:1186]
    else:
        frame = frame[int(int(meta[1]) * 0.8917):int(int(meta[1]) * 0.92595),
                      int(int(meta[0]) * 0.9021):int(int(meta[0]) * 0.926)]

    # converts ammo display to gray scale and thresholds out unwanted noise
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.threshold(frame, 200, 255, cv2.THRESH_BINARY)[1]

    # finds contours of ammo display and returns them sorted in a list, left to right
    ammo_cnts = cv2.findContours(frame, cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)
    ammo_cnts = imutils.grab_contours(ammo_cnts)
    if len(ammo_cnts) == 0:
        ammo_count = 0
        return ammo_count
    ammo_cnts = contours.sort_contours(ammo_cnts, method="left-to-right")[0]

    # initialise ammo list
    ammo = []

    # compares results from ammo display and reference image and returns most confident answer
    for c in ammo_cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        roi = frame[y:y + h, x:x + w]
        roi = cv2.resize(roi, (112, 92))

        scores = []

        for (digit, digitROI) in digits.items():
            result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF_NORMED)
            (_, score, _, _) = cv2.minMaxLoc(result)
            scores.append(score)

        if debug is True:
            print(np.argmax(scores))
            print(np.amax(scores))

        if np.amax(scores) >= 0.5:
            ammo.append(str(np.argmax(scores)))
        else:
            ammo_count = 0
            return ammo_count

    # joins answers into a string to return a final value
    ammo_count = "".join(ammo)

    if debug is True:
        cv2.imshow('ref_ammo', ref)
        cv2.imshow('frame_ammo', frame)

    return ammo_count
def con(image):
    global i
    global j
    global b_c
    global im
    
    i = i + 1
    
    image = cv2.GaussianBlur(image, (5, 5), 0)
    
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    
    lower_skin = np.array([0, 58, 30], dtype = np.uint8)
    upper_skin = np.array([33, 255, 255], dtype = np.uint8)
    
    mask = cv2.inRange(hsv, lower_skin, upper_skin)
    
    mask = cv2.erode(mask, None, iterations = 1)
    mask = cv2.dilate(mask, None, iterations = 9)
    
    cnts = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    
    cnts = imutils.grab_contours(cnts)
    
    if cnts:
        c = max(cnts, key=cv2.contourArea)
    else:
        return 0, 0, 0
    
    extLeft = tuple(c[c[:, :, 0].argmin()][0])
    extRight = tuple(c[c[:, :, 0].argmax()][0])
    extTop = tuple(c[c[:, :, 1].argmin()][0])
    extBot = tuple(c[c[:, :, 1].argmax()][0])
    
    image = im
    
    et = extTop[1] - 10
    el = extLeft[0] - 10
    er = extRight[0] + 10
    eb = extBot[1] + 10
    
    if(el < 0):
        el = 0
    if(et < 0):
        et = 0
    if(er > image.shape[1]):
        er = image.shape[1]
    if(eb > image.shape[0]):
        eb = image.shape[0]
    
    
    if i % 2 == 0:
        j = j + 1
        if j % 3 == 0:
            b_c = (0, 0, 255)
        if j % 3 == 1:
            b_c = (0, 255, 0)
        if j % 3 == 2:
            b_c = (255, 0, 0)
    
    image = cv2.rectangle(image, (el, et), (er, eb), b_c, 3)
    pre_image = image[et:eb, el:er]
    return image, mask, pre_image
Beispiel #9
0
    # draw BOUND
    # cv2.rectangle(imgOutput, (65, 25), (142 + 65, 25 + 435), (0, 0, 255), 1)


    # Track Object 1 (PUCK)-------------------------------------------------------------------

    lowerBound1 = (h_min[0], s_min[0], v_min[0])
    upperBound1 = (h_max[0], s_max[0], v_max[0])

    mask11 = cv2.inRange(hsv, lowerBound1, upperBound1)
    mask11 = cv2.erode(mask11, None, iterations=1)
    mask11 = cv2.dilate(mask11, None, iterations=1)

    cnts1 = cv2.findContours(mask11.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts1 = imutils.grab_contours(cnts1)
    center1 = None

    if len(cnts1) > 0:
        # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
        c1 = max(cnts1, key=cv2.contourArea)
        ((x1, y1), radius1) = cv2.minEnclosingCircle(c1)
        M1 = cv2.moments(c1)
        center1 = (int(M1["m10"] / M1["m00"]), int(M1["m01"] / M1["m00"]))

        msg1 = str(x1) + "," + str(y1)
        #print("puck= ", msg1)
        socketio.emit('updatePuck', msg1)

        # only proceed if the radius meets a minimum size
        if radius1 > 10:
Beispiel #10
0
    # construct a mask for the color "green", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    leftmask = cv.inRange(lefthsv, greenLower, greenUpper)
    leftmask = cv.erode(leftmask, None, iterations=2)
    leftmask = cv.dilate(leftmask, None, iterations=2)

    rightmask = cv.inRange(righthsv, greenLower, greenUpper)
    rightmask = cv.erode(rightmask, None, iterations=2)
    rightmask = cv.dilate(rightmask, None, iterations=2)

    # find contours in the mask and initialize the current (x, y) center of the ball
    leftcnts = cv.findContours(leftmask.copy(), cv.RETR_EXTERNAL,
                               cv.CHAIN_APPROX_SIMPLE)
    leftcnts = imutils.grab_contours(leftcnts)
    leftcenter = None

    rightcnts = cv.findContours(rightmask.copy(), cv.RETR_EXTERNAL,
                                cv.CHAIN_APPROX_SIMPLE)
    rightcnts = imutils.grab_contours(rightcnts)
    rightcenter = None

    # Call function for left frame
    (ldX, ldY, lpts, ldirection, lx,
     ly) = trackedObjectXYcoord(leftFrame, leftcnts, ldX, ldY, lpts,
                                ldirection)
    # show the movement deltas and the direction of movement on the frame
    cv.putText(leftFrame, ldirection, (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.65,
               (0, 0, 255), 3)
    cv.putText(leftFrame, "x: {}, y: {}".format(lx, ly),
Beispiel #11
0
    def compare_screen_without_areas(
            self, path1, *args, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image
    ):
        """
        Compares two pictures, which have parts to be ignored
        x1 and y1 = x and y coordinates for the upper left corner of the ignored area square
        x2 and y2 = x and y coordinates for the lower right corner of the square of the ignored part

        Attention! It is always necessary to enter in order x1 y1 x2 y2 x1 y1 x2 y2 etc ...

        Compare screen without areas ../Image1.png 0 0 30 40 50 50 100 100
        Creates 2 ignored parts at 0,0, 30,40 and 50, 50, 100, 100
        """
        self._check_dir(save_folder)
        self._check_ssim(ssim)
        self._check_image_format(image_format)
        save_folder = self.save_folder

        self.seleniumlib.capture_page_screenshot(save_folder + "/test1.png")
        path2 = save_folder + "/test1.png"
        if os.path.exists(path1) and os.path.exists(path2):
            lt = len(args)
            img1 = cv.imread(path1, 1)
            img2 = cv.imread(path2, 1)
            if lt % 4 == 0:
                x = lt / 4
                self.robotlib.log_to_console(x)
                i = 0
                a = 0
                while i < x:
                    color = (0, 0, 0)
                    x1 = int(args[0 + a])
                    y1 = int(args[1 + a])
                    x2 = int(args[2 + a])
                    y2 = int(args[3 + a])

                    cv.rectangle(img1, (x1, y1), (x2, y2), color, -1)
                    cv.rectangle(img2, (x1, y1), (x2, y2), color, -1)
                    a += 4
                    i += 1
                cv.namedWindow("image", cv.WINDOW_NORMAL)

                # convert to grey
                gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
                gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)

                # SSIM diff Img
                (self.score, diff) = structural_similarity(
                    gray_img1, gray_img2, full=True
                )
                diff = (diff * 255).astype("uint8")

                # Threshold diff Img
                thresh = cv.threshold(
                    diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU
                )[1]
                cnts = cv.findContours(
                    thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE
                )
                cnts = imutils.grab_contours(cnts)

                # Create frame in diff area
                for c in cnts:
                    (x, y, w, h) = cv.boundingRect(c)
                    cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)
                    cv.rectangle(img2, (x, y), (x + w, y + h), (0, 0, 255), 2)

                # Show image
                if float(self.score) < self.ssim:
                    img_diff = cv.hconcat([img1, img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.fail("Image has diff: {} ".format(self.score))
                else:
                    img_diff = cv.hconcat([img1, img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.log_to_console(
                        "Image has diff: {} ".format(self.score)
                    )
        else:
            raise AssertionError("The path to the image does not exist")
Beispiel #12
0
    def compare_screen_areas(
            self, x1, y1, x2, y2, path1, save_folder=save_folder_path, ssim=starts_ssim,
            image_format=starts_format_image
    ):
        """Creates a cut-out from the screen

        Creates a cut-out from the screen that is on the screen and compares it to a previously created

        x1 and y1 = x and y coordinates for the upper left corner of the square
        x2 and y2 = x and y coordinates for the bottom right corner of the square
        path1 = Path to an already created viewport with which we want to compare the viewport created by us

        Example: Compare screen area 0 0 25 25 ../Crop_Image1.png Creates Crop_Image1.png from 0, 0, 25, 25
        """
        self._check_dir(save_folder)
        self._check_ssim(ssim)
        self._check_image_format(image_format)
        save_folder = self.save_folder
        self.seleniumlib.capture_page_screenshot(save_folder + '/test1.png')
        path2 = save_folder + '/test1.png'

        if os.path.exists(path1):
            if os.path.exists(path2):
                # load img
                img1 = cv.imread(path1, 1)  # img from docu
                img2 = cv.imread(path2, 1)  # img from screenshot

                # convert to grey
                gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
                gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)

                # spliting area
                crop_img = gray_img2[
                           int(x1): int(y2), int(y1): int(x2)
                           ]  # Crop from {x, y, w, h } => {0, 0, 300, 400}

                # SSIM diff img
                (self.score, diff) = structural_similarity(
                    gray_img1, crop_img, full=True
                )
                diff = (diff * 255).astype('uint8')

                # Threshold diff img
                thresh = cv.threshold(
                    diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU
                )[1]
                cnts = cv.findContours(
                    thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE
                )
                cnts = imutils.grab_contours(cnts)

                crop_img_color = img2[int(x1): int(y2), int(y1): int(x2)]
                # Create frame in diff area
                for c in cnts:
                    (x, y, w, h) = cv.boundingRect(c)
                    cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)
                    cv.rectangle(crop_img_color, (x, y), (x + w, y + h), (0, 0, 255), 2)

                # Show image
                if float(self.score) < self.ssim:
                    self.robotlib = BuiltIn().get_library_instance('BuiltIn')
                    img_diff = cv.hconcat([img1, crop_img_color])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + '/img' + time_ + '.png'
                    )
                    cv.imwrite(save_folder + '/img' + time_ + self.format, img_diff)
                    self.robotlib.fail('Image has diff: {} '.format(self.score))
                    score_percen = float(self.score) * +100
                    self.robotlib.fail('Image has diff: {} %'.format(score_percen))
                else:
                    img_diff = cv.hconcat([self.img1, self.img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.log_to_console(
                        "Image has diff: {} ".format(self.score)
                    )
            else:
                raise AssertionError("New screen doesnt exist anymore")
        else:
            raise AssertionError("The path1 to the image does not exist. Try a other path, than:" + path1)
        if os.path.exists(save_folder + '/test1.png'):
            os.remove(save_folder + '/test1.png')
    def cbTracking(self):
        if self.image_received:
            # resize the frame, blur it, and convert it to the HSV
            # color space
            frame = imutils.resize(self.cv_image, width=self.imgWidth)
            blurred = cv2.GaussianBlur(self.cv_image, (11, 11), 0)
            hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

            # construct a mask for the color "green", then perform
            # a series of dilations and erosions to remove any small
            # blobs left in the mask
            mask = cv2.inRange(hsv, self.greenLower, self.greenUpper)
            mask = cv2.erode(mask, None, iterations=2)
            mask = cv2.dilate(mask, None, iterations=2)

            # find contours in the mask and initialize the current
            # (x, y) center of the ball
            cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            center = None

            # only proceed if at least one contour was found
            if len(cnts) > 0:
                # find the largest contour in the mask, then use
                # it to compute the minimum enclosing circle and
                # centroid
                c = max(cnts, key=cv2.contourArea)
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                M = cv2.moments(c)
                center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

                # only proceed if the radius meets a minimum size
                if radius > 10:
                    # draw the circle and centroid on the frame,
                    # then update the list of tracked points
                    cv2.circle(self.cv_image, (int(x), int(y)), int(radius),
                               (0, 255, 255), 2)
                    cv2.circle(self.cv_image, center, 5, (0, 0, 255), -1)
            # update the points queue
            self.pts.appendleft(center)

            # loop over the set of tracked points
            for i in range(1, len(self.pts)):
                # if either of the tracked points are None, ignore
                # them
                if self.pts[i - 1] is None or self.pts[i] is None:
                    continue

                # otherwise, compute the thickness of the line and
                # draw the connecting lines
                thickness = int(np.sqrt(self.buffer / float(i + 1)) * 2.5)
                cv2.line(self.cv_image, self.pts[i - 1], self.pts[i],
                         (0, 0, 255), thickness)

            self.cbInfo()
            self.cbShowImage()

            # Allow up to one second to connection
            rospy.sleep(0.01)
        else:
            rospy.logerr("No images recieved")
Beispiel #14
0
def check_bubbles(thresh):
    # cv2.imshow("quiz", thresh)
    # cv2.waitKey()
    # find contours in the thresholded image, then initialize
    # the list of contours that correspond to questions
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    questionCnts = []

    # loop over the contours
    for c in cnts:
        # compute the bounding box of the contour, then use the
        # bounding box to derive the aspect ratio
        (x, y, w, h) = cv2.boundingRect(c)
        ar = w / float(h)

        # in order to label the contour as a question, region
        # should be sufficiently wide, sufficiently tall, and
        # have an aspect ratio approximately equal to 1
        if w >= 20 and h >= 20 and 0.9 <= ar <= 1.1:
            questionCnts.append(c)

    # sort the question contours top-to-bottom, then initialize
    # the total number of correct answers
    questionCnts = contours.sort_contours(questionCnts,
                                          method="top-to-bottom")[0]

    Keys = {}
    ans = []
    # each question has 5 possible answers, to loop over the
    # question in batches of 5
    for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
        ans = []
        # sort the contours for the current question from
        # left to right, then initialize the index of the
        # bubbled answer
        cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
        bubbled = None
        # loop over the sorted contours
        for (j, c) in enumerate(cnts):
            # construct a mask that reveals only the current
            # "bubble" for the question
            mask = np.zeros(thresh.shape, dtype="uint8")
            cv2.drawContours(mask, [c], -1, 255, -1)

            # apply the mask to the thresholded image, then
            # count the number of non-zero pixels in the
            # bubble area
            mask = cv2.bitwise_and(thresh, thresh, mask=mask)
            # cv2.imshow("maska", mask)
            # cv2.waitKey()
            total = cv2.countNonZero(mask)
            # print(total)

            # if the current total has a larger number of total
            # non-zero pixels, then we are examining the currently
            # bubbled-in answer
            if total > 4200:
                ans.append(j)
        Keys[q] = ''.join(str(i) for i in ans)
    return Keys
Beispiel #15
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE :
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

	# Defining RED range
	redLower = (160, 100, 100)
	redUpper = (179, 255, 255)

	blurred = cv2.GaussianBlur(image_np, (11, 11), 0)
	hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
	mask = cv2.inRange(hsv, redLower, redUpper)
	mask = cv2.erode(mask, None, iterations=2)
	mask = cv2.dilate(mask, None, iterations=2)
	cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	center = None

	# Only proceed if at least one contour was found
	if len(cnts) > 0:
		# find the largest contour in the mask, then use
		# it to compute the minimum enclosing circle and
		# centroid
		c = max(cnts, key=cv2.contourArea)
		((x, y), radius) = cv2.minEnclosingCircle(c)
		M = cv2.moments(c)
		center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) # centroid coordinates		
 
		# Only proceed if the radius meets a minimum size
		if radius > 10:
			# draw the circle and centroid on the frame,
			# then update the list of tracked points
			cv2.circle(image_np, (int(x), int(y)), int(radius),
				(0, 255, 255), 2)
			cv2.circle(image_np, center, 5, (0, 0, 255), -1)

			# Image Points coordinates
			float_center = np.float32(center)
			float_center1 = ([float_center[0]+radius, float_center[1]])			
			float_center2 = ([float_center[0]-radius, float_center[1]])
			float_center3 = ([float_center[0], float_center[1]-radius])
			float_center4 = ([float_center[0], float_center[1]+radius])
			image_point = np.array([float_center1, float_center2, float_center3, float_center4]) # Correspondent coordinates of the object points in the image frame

			# Calling solvePnP to compute T-matrix between the object frame and the camera
			(_, rotation_vector, translation_vector) = cv2.solvePnP(self.object_point, image_point , self.camera_matrix, self.dist_coefs)

			# Printing translation vector
			print 'Object position: '
			print translation_vector

			# Publishing the translation vector
			self.translation_pub.publish(float(translation_vector[0]), float(translation_vector[1]), float(translation_vector[2]))
 
	# update the points queue
        cv2.imshow('window', image_np)
        cv2.waitKey(2)
Beispiel #16
0
# it helps in finding/detecting contour in gray image
grayImageBlur = cv2.blur(grayImage, (3, 3))
# then we performed canny edge detection
edgedImage = cv2.Canny(grayImageBlur, 100, 300, 3)
# show the gray and edge-detected image
cv2.imshow("gray", grayImage)
cv2.imshow("grayBlur", grayImageBlur)
cv2.imshow("Edge Detected Image", edgedImage)
cv2.waitKey(0)  # press 0 to close all cv2 windows
cv2.destroyAllWindows()

# find the contours in the edged image, sort area wise
# keeping only the largest ones
allContours = cv2.findContours(edgedImage.copy(), cv2.RETR_LIST,
                               cv2.CHAIN_APPROX_SIMPLE)
allContours = imutils.grab_contours(allContours)
# descending sort contours area and keep top 1
allContours = sorted(allContours, key=cv2.contourArea, reverse=True)[:1]
# approximate the contour
perimeter = cv2.arcLength(allContours[0], True)
ROIdimensions = cv2.approxPolyDP(allContours[0], 0.02 * perimeter, True)
# show the contour on image
cv2.drawContours(image, [ROIdimensions], -1, (0, 255, 0), 2)
cv2.imshow("Contour Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

# reshape coordinates array
ROIdimensions = ROIdimensions.reshape(4, 2)
# list to hold ROI coordinates
rect = np.zeros((4, 2), dtype="float32")
Beispiel #17
0
    maskg = cv2.erode(maskg, None, iterations=2)
    maskg = cv2.dilate(maskg, None, iterations=2)

    maskr = cv2.inRange(hsv, redLower, redUpper)
    maskr = cv2.erode(maskr, None, iterations=2)
    maskr = cv2.dilate(maskr, None, iterations=2)

    maskb = cv2.inRange(hsv, blueLower, blueUpper)
    maskb = cv2.erode(maskb, None, iterations=2)
    maskb = cv2.dilate(maskb, None, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    cntsg = cv2.findContours(maskg.copy(), cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
    cntsg = imutils.grab_contours(cntsg)
    centerg = None

    cntsr = cv2.findContours(maskr.copy(), cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
    cntsr = imutils.grab_contours(cntsr)
    centerr = None

    cntsb = cv2.findContours(maskb.copy(), cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
    cntsb = imutils.grab_contours(cntsb)
    centerb = None

    # only proceed if at least one contour was found
    if len(cntsg) > 0:
        # find the largest contour in the mask, then use
Beispiel #18
0
def main():
    """

    :return:
    """

    # Display best params or run the whole sim
    if DISPLAY_BEST_PARAMS:
        try:
            with open(PATH_BEST_PARAMS, 'rb') as f:
                best_params, best_recall, best_precision = pickle.load(f)
        except FileNotFoundError:
            print("[ERROR] Best param file not found.")
            raise
        iteration_dict = [best_params, best_recall,
                          best_precision]  # Matches the dump order
    else:
        params = {
            'gaussWindow': range(1, 8, 2),
            'residualConnections': range(1, 8, 2),
            'sigma': np.linspace(0.1, 0.9, 5),
            'dilationIterations': range(1, 8, 2),
            'mgp': range(25, 26, 25),
            'winSize': range(3, 4, 2),
            'maxLevel': range(5, 6, 3),
            'threshold_low': range(65, 66, 10),
            'threshold_gain': np.linspace(1.25, 1.26, 1),
            'diffMethod': range(0, 1, 1),
            'skipFrame': range(0, 1, 1)
        }

        header = create_log(PATH_ALL_RESULTS, params)
        iteration_dict = ParameterGrid(params)

    video_stream, nb_frames, frame_width, frame_height = vt.init.import_stream(
        VIDEO_STREAM_PATH)
    bbox_heli_ground_truth = vt.bbox.import_bbox_heli(
        PATH_BBOX)  # Creates a dict

    # Min/Max area for the helicopter detection.
    # Min is difficult: it could be as small as a speck in the distance
    # Max is easier: you know how close it can possibly get (the helipad)
    min_area = 1
    if ((frame_width == 1920 and frame_height == 1080)
            or (frame_width == 3280 and frame_height == 2464)):
        binning = 1
    else:
        binning = 2 * 2
        print(
            "[WARNING] Input resolution unusual. Camera sensor understood to be working with a 2x2 binning."
        )
    max_area = 200 * 200 / binning

    print("[INFO] Starting {} iterations".format(len(iteration_dict)))
    first_bbox = min(bbox_heli_ground_truth.keys())
    last_bbox = max(bbox_heli_ground_truth.keys())
    print("[INFO] Using bbox frames {} to {}".format(first_bbox, last_bbox))

    # Save the best results in memory
    if DISPLAY_BEST_PARAMS:
        counter_best_params = 0  # Used when displaying the 3 best runs
    highest_f1_score = 0
    highest_recall = 0
    highest_precision = 0
    vs2 = vt.init.cache_video(video_stream, 'list', gray_scale=FLAG_GRAY_SCALE)
    for sd in tqdm.tqdm(iteration_dict):
        # -------------------------------------
        # 1. RESET THE SIM DEPENDENT VARIABLES
        # -------------------------------------

        timing = {
            'Read frame': 0,
            'Convert to grayscale': 0,
            'Stabilize': 0,
            'Double Gauss': 0,
            'Abs diff': 0,
            'Thresholding': 0,
            'Dilation': 0,
            'Count boxes': 0,
            'Finalize': 0
        }
        nb_bbox = []  # Stores bbox data for a sim

        # Get ready to store residualConnections frames over and over
        previous_gray_frame = collections.deque(
            maxlen=sd['residualConnections'])
        # previous_gauss_frame = collections.deque(maxlen=sd['residualConnections'])

        # img_stab = imageStabilizer.imageStabilizer(frame_width, frame_height, maxGoodPoints=sd['mgp'],
        #                                           maxLevel=sd['maxLevel'], winSize=sd['winSize'])

        counter_skip_frame = sd[
            'skipFrame']  # Go through the if statement the first time

        fps = FPS().start()
        # ----------------------------
        # 2. FRAME PROCESSING - GO THROUGH ALL FRAMES WITH A BBOX
        # -----------------------------

        for frame_number in range(nb_frames):

            t0 = time.perf_counter()
            # frame = vs.read()[1] # No cache
            current_frame = vs2[frame_number].copy(
            )  # Prevents editing the original frames!
            t1 = time.perf_counter()
            # Skip all the frames that do not have a Bbox
            if frame_number < first_bbox:
                continue
            if frame_number > min(nb_frames - 2, last_bbox):
                break

            # 0. Skip frames - subsampling of FPS
            if counter_skip_frame < sd['skipFrame']:
                counter_skip_frame += 1
                continue
            else:
                counter_skip_frame = 0

            # Create a 0 based index that tracks how many bboxes we have gone through
            bbox_frame_number = frame_number - first_bbox  # Starts at 0, automatically incremented
            # Populate the deque with sd['residualConnections'] gray frames
            if bbox_frame_number < sd['residualConnections']:
                # current_frame = frame
                current_gray_frame = current_frame if FLAG_GRAY_SCALE else cv2.cvtColor(
                    current_frame, cv2.COLOR_BGR2GRAY)
                previous_gray_frame.append(current_gray_frame)
                continue

            # I. Grab the current in color space
            # t0=time.perf_counter()
            # current_frame = frame

            # II. Convert to gray scale
            t2 = time.perf_counter()
            current_gray_frame = current_frame if FLAG_GRAY_SCALE else cv2.cvtColor(
                current_frame, cv2.COLOR_BGR2GRAY)

            # III. Stabilize the image in the gray space with latest gray frame, fwd to color space
            # Two methods (don't chain them): phase correlation & optical flow
            t3 = time.perf_counter()
            if FLAG_PHASE_CORRELATION:
                """[TBR/Learning XP] Phase correlation is linearly faster as the area 
                to process is reduced, which is nice. However, if the expected translation is small
                (like ~ 1px) the results predictions can vary widely as the crop size is reduced.
                If the motion gets larger (even just 10 px), results between small and large crop match very accurately!
                plt.figure()
                plt.imshow(crop)
                plt.show()
                
                lCrop = 1000 # Large crop
                motion = 10 # controlled displacement
                for sCrop in range(100, 1001, 100):
                    #sCrop = 200
                    
                    t31 = time.perf_counter()
                    retvalSmall, response = cv2.phaseCorrelate(np.float32(current_gray_frame[:sCrop, :sCrop])/255.0, 
                    np.float32(current_gray_frame[motion:sCrop+motion, motion:sCrop+motion])/255.0)
                    t32 = time.perf_counter()
                    retvalLarge, response = cv2.phaseCorrelate(np.float32(current_gray_frame[:lCrop, :lCrop])/255.0, 
                    np.float32(current_gray_frame[motion:lCrop+motion, motion:lCrop+motion])/255.0)
                    t33 = time.perf_counter()
                    print("Full image is {} bigger and takes {} more time"
                    .format((lCrop/sCrop)**2, (t33-t32)/(t32-t31)))
                    print("xs {:.3f} xl {:.3f} Rx={:.3f} ys {:.3f} yl {:.3f} Ry={:.3f}".format(
                    retvalSmall[0], retvalLarge[0], retvalSmall[0]/retvalLarge[0], 
                    retvalSmall[1], retvalLarge[1], retvalSmall[1]/retvalLarge[1]))
            assert 1==0
            """
                pass
            if FLAG_OPTICAL_FLOW:
                m, current_gray_frame = img_stab.stabilizeFrame(
                    previous_gray_frame[-1], current_gray_frame)
                current_frame = cv2.warpAffine(current_frame, m,
                                               (frame_width, frame_height))
            t4 = time.perf_counter()
            # current_frame = current_frame[int(cropPerc*frame_height):int((1-cropPerc)*frame_height),
            # int(cropPerc*frame_width):int((1-cropPerc)*frame_width)]
            # modif[bbox_frame_number-1] = img_stab.extractMatrix(m)

            # IV. Gaussian Blur
            # Done between current_frame and the grayFrame from residualConnections ago (first element in the deque)
            current_gauss_frame = cv2.GaussianBlur(
                current_gray_frame, (sd['gaussWindow'], sd['gaussWindow']), 0)
            previous_gauss_frame = cv2.GaussianBlur(
                previous_gray_frame[0], (sd['gaussWindow'], sd['gaussWindow']),
                0)

            t5 = time.perf_counter()

            # V. Differentiation in the Gaussian space
            diff_frame = cv2.absdiff(current_gauss_frame, previous_gauss_frame)
            """[TBR/XP] absdiff strategies in the gaussian space"""
            """#Average of the absdiff with the current_frame for all residual connections (1toN strategy)
            # Basically, you do (1/m)*sum(|current_frame-previousGauss[i]|, i=0..N), 
            # N being dictated by residualConnections
            diff_frame = np.zeros(current_gauss_frame.shape)
            for gaussFrame in previous_gauss_frame:
                diff_frame += cv2.absdiff(current_gauss_frame, gaussFrame)
            diff_frame /= len(previous_gauss_frame)
            diff_frame = diff_frame.astype(np.uint8)  # float -> uint8
            # Best f1_score was about 0.32 (0.34 for simple absdiff(N, N-k))
            """
            """#Average of the absdiff between n and n-1 frame (NtoN-1 strategy)
            # Basically, you do (1/m)*sum(|previousGauss[i]-previousGauss[i+1]|, i=0..N-1), 
            # N being dictated by residualConnections
            # In that case, an array of the differences in the gaussian space could be cached to just pick 
            # what you want, but there is not enough RAM.
            diff_frame = np.zeros(current_gauss_frame.shape)
            for index in range(len(previous_gauss_frame)-1):
                diff_frame += cv2.absdiff(previous_gauss_frame[index], previous_gauss_frame[index+1])
            # Finish with current_gauss_frame and the latest previous_gauss_frame
            diff_frame += cv2.absdiff(current_gauss_frame, previous_gauss_frame[-1])
            diff_frame /= len(previous_gauss_frame)
            diff_frame = diff_frame.astype(np.uint8)  # float -> uint8
            # Best f1_score was about 0.29 (0.34 for simple absdiff(N, N-k))
            """
            t6 = time.perf_counter()
            if DISPLAY_FEED != '000':
                delta_frame = diff_frame.copy()

            # VI. BW space manipulations
            # diff_frame = cv2.threshold(diff_frame, sd['threshold'], 255, cv2.THRESH_BINARY)[1]

            # v = np.median(diff_frame)
            v = 127
            lower = int(max(0, (1.0 - sd['sigma']) * v))
            upper = int(min(255, (1.0 + sd['sigma']) * v))
            # diff_frame = cv2.Canny(diff_frame, sd['threshold_low'], sd['threshold_low']*sd['threshold_gain'])
            diff_frame = cv2.Canny(diff_frame, lower, upper)

            t7 = time.perf_counter()
            # dilate the thresholded image to fill in holes, then find contours
            if sd['diffMethod'] == 0:
                diff_frame = cv2.dilate(diff_frame,
                                        None,
                                        iterations=sd['dilationIterations'])
                diff_frame = cv2.erode(diff_frame,
                                       None,
                                       iterations=sd['dilationIterations'])
            elif sd['diffMethod'] == 1:
                diff_frame = cv2.morphologyEx(diff_frame, cv2.MORPH_OPEN, None)

            if DISPLAY_FEED != '000':
                thresh_feed = diff_frame.copy()
            cnts = cv2.findContours(diff_frame, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            t8 = time.perf_counter()

            # Circle around the actual corner of the helicoBBox
            # Obtained via manual CSRT TRACKER
            # cv2.circle(current_frame, bbox_heli_ground_truth[bbox_frame_number], BBOX_ERROR, (0,0,255), -1)

            large_box = 0
            counter_bbox_heli = 0

            # VII. Process the BB and classify them
            x_gt, y_gt, w_gt, h_gt = bbox_heli_ground_truth[
                frame_number]  # Ground Truth data
            for c in cnts:
                # A. Filter out useless BBs
                # 1. if the contour is too small or too large, ignore it
                if cv2.contourArea(c) < min_area:
                    continue
                if cv2.contourArea(c) > max_area:
                    continue
                # compute the bounding box for the contour, draw it on the current_frame,
                # and update the text
                (x, y, w, h) = cv2.boundingRect(c)

                # 2. Box partially out of the frame
                # if x < 0 or x+s > frame_width or y < 0 or y+s > frame_height: # Square box
                if x < 0 or x + w > frame_width or y < 0 or y + h > frame_height:
                    continue
                # 3. Box center in the PADDING area
                if not (PADDING < x + w // 2 < frame_width - PADDING
                        and PADDING < y + h // 2 < frame_height - PADDING):
                    continue

                # B. Classify BBs - a large_box is a potential bbox_heli_ground_truth
                large_box += 1
                # Check if the corner is within range of the actual corner
                # That data was obtained by running a CSRT TRACKER on the helico

                # Classify bboxes based on their IOU with ground truth
                converted_current_bbox = vt.bbox.xywh_to_x1y1x2y2((x, y, w, h))
                converted_ground_truth_bbox = vt.bbox.xywh_to_x1y1x2y2(
                    (x_gt, y_gt, w_gt, h_gt))
                if vt.bbox.intersection_over_union(
                        converted_current_bbox,
                        converted_ground_truth_bbox) >= IOU:
                    counter_bbox_heli += 1
                    if DISPLAY_FEED == '001':  # Display positive bbox found in COLOR['GREEN']
                        cv2.putText(current_frame, "heli", (x, y - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    COLOR['GREEN'], 2)
                        cv2.rectangle(current_frame, (x, y), (x + w, y + h),
                                      COLOR['GREEN'], 2)
                else:
                    if DISPLAY_FEED == '001':  # Display negative bbox found in COLOR['BLUE']
                        cv2.putText(current_frame, "not heli", (x, y + h + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    COLOR['BLUE'], 2)
                        cv2.rectangle(current_frame, (x, y), (x + w, y + h),
                                      COLOR['BLUE'], 2)
                    pass

            # C. Generate a square BB
            # cv2.rectangle(current_frame, (x, y), (x + s, y + s), COLOR['GREEN'], 2)
            # cv2.rectangle(current_frame, (x, y), (x + w, y + h), COLOR['GREEN'], 2)
            if DISPLAY_FEED == '001':
                cv2.rectangle(current_frame, (x_gt, y_gt),
                              (x_gt + w_gt, y_gt + h_gt), COLOR['RED'], 2)
            t9 = time.perf_counter()

            # VIII. draw the text and timestamp on the current_frame
            if DISPLAY_FEED != '000':
                if DISPLAY_BEST_PARAMS:
                    if counter_best_params == 0:
                        run = "best_f1_score"
                    elif counter_best_params == 1:
                        run = "best_recall"
                    elif counter_best_params == 2:
                        run = "best_precision"
                    else:
                        raise ValueError(
                            'There should only be 3 best results in the best_param log file'
                        )
                    cv2.putText(
                        current_frame,
                        "Current run: {} - f1_score: {:.3f} - recall: {:.3f} - precision: {:.3f}"
                        .format(run, sd['f1_score'], sd['recall'],
                                sd["precision"]), (10, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['RED'], 2)

                cv2.putText(
                    current_frame, "BBoxes: {} found, {} heliBox".format(
                        len(cnts), counter_bbox_heli), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['RED'], 2)
                # cv2.putText(current_frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, 30),
                # cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['RED'], 1) # Shows current date/time

                # IX. show the current_frame and record if the user presses a key
                show_feed(DISPLAY_FEED, thresh_feed, delta_frame,
                          current_frame)
                key = cv2.waitKey(1) & 0xFF
                # if the `q` key is pressed, break from the loop
                if key == ord("q"):
                    break

            # X. Save frames & track KPI
            # The deque has a maxlen of residualConnections so the first-in will pop
            gray_frame_to_append = vs2[
                frame_number] if FLAG_GRAY_SCALE else cv2.cvtColor(
                    vs2[frame_number], cv2.COLOR_BGR2GRAY)
            previous_gray_frame.append(gray_frame_to_append)
            nb_bbox.append([
                len(cnts), large_box, counter_bbox_heli,
                1 if counter_bbox_heli else 0
            ])

            fps.update()
            t10 = time.perf_counter()
            if FLAG_DISPLAY_TIMING:
                new_timing = {
                    'Read frame': t1 - t0,
                    'Convert to grayscale': t3 - t2,
                    'Stabilize': t4 - t3,
                    'Double Gauss': t5 - t4,
                    'Abs diff': t6 - t5,
                    'Thresholding': t7 - t6,
                    'Dilation': t8 - t7,
                    'Count boxes': t9 - t8,
                    'Finalize': t10 - t9
                }
                for key in timing.keys():
                    timing[key] += new_timing[key]

        # XI. Display results
        fps.stop()
        # vs.release()  # Done with going through this simulation, get ready for another pass
        if FLAG_DISPLAY_TIMING:
            print("Code profiling for various operations (in s):\n", timing)
        cv2.destroyAllWindows()

        average_fps = fps.fps()
        print("[INFO] FPS: {:.2f}".format(average_fps))
        # print(img_stab.detailedTiming())

        # Impact of stabilization on number of boxes
        bb = np.array(nb_bbox)
        bb = bb[1:]  # Delete first frame which is not motion controlled

        # KPI
        # per simulation
        # print(bb)
        avg_nb_boxes = np.mean(bb[:, 0])
        avg_nb_filtered_boxes = np.mean(bb[:, 1])
        avg_nb_heli_bbox = np.mean(bb[:, 2])
        # Precision: how efficient is the algo at rulling out irrelevant boxes?
        precision = avg_nb_heli_bbox / avg_nb_filtered_boxes  # Ratio of helibox/nb of boxes
        # Recall: how many frames had a positive heliBox? There should be one in each.
        recall = np.sum(
            bb[:, 3]) / nb_frames  # Proportion of frames with helicopter

        # -----------------
        # SANITY CHECKS & f1_score
        # -----------------
        try:
            assert 0 < recall <= 1
            assert 0 < precision <= 1
            assert 0 <= avg_nb_heli_bbox <= avg_nb_filtered_boxes
            assert 0 <= avg_nb_filtered_boxes <= avg_nb_boxes
            f1_score = 2 / (1 / precision + 1 / recall)
        except AssertionError:
            print('[WARNING] KPIs out of bounds - set to 0')
            print("[WARNING] KPI: ", recall, precision, avg_nb_heli_bbox,
                  avg_nb_filtered_boxes)
            recall, precision, avg_nb_heli_bbox, avg_nb_filtered_boxes = (0, 0,
                                                                          0, 0)
            f1_score = 0
        """kpis
        plt.figure()
        plt.plot(bb[:, 0])
        plt.plot(bb[:, 1])
        plt.plot(bb[:, 2])
        plt.legend(("Number of boxes", "Boxes large enough", "Heli box"))
        titl = \
        "Boxes detected - av: {:.2f} - std: {:.2f} at {:.2f} FPS\n\
        Av Helibb per frame: {:.3f} - Ratio of helibb: {:.3f}\tFrame with heli: {:.3f} "\
        .format(\
        avg_nb_filtered_boxes, np.std(bb[:, 1]), real_fps, \
        avg_nb_heli_bbox, precision, recall\
        )
        plt.title(titl)
        plt.show()
        """
        # Display best params or append best results to log
        if DISPLAY_BEST_PARAMS:
            counter_best_params += 1
            # print(sd)  # Possible to limit digits?
            print(
                'gaussWindow: {}, residualConnections: {}, sigma: {:.1f}, dilationIterations: {}, precision: {:.3f}, recall: {:.3f}, f1_Score: {:.3f}'
                .format(sd['gaussWindow'], sd['residualConnections'],
                        sd['sigma'], sd['dilationIterations'], sd['precision'],
                        sd['recall'],
                        sd['f1_score']))  # Possible to limit digits?
        else:
            # Output results - parameters+kpis
            kpis = [
                IOU, average_fps, avg_nb_boxes, avg_nb_filtered_boxes,
                avg_nb_heli_bbox, precision, recall, f1_score
            ]
            # Warning: they are both int array of the same length so they can be added!
            sim_output = [sd[k] for k in params.keys()] + list(kpis)

            # Log the best f1_score, recall and precision
            if f1_score > highest_f1_score:
                highest_f1_score = f1_score
                best_params = sim_output
            if recall > highest_recall:
                highest_recall = recall
                best_recall = sim_output
            if precision > highest_precision:
                highest_precision = precision
                best_precision = sim_output

            with open(PATH_ALL_RESULTS, 'a') as f:
                w = csv.writer(f)
                w.writerow(sim_output)

    # XII. Wrap-up the search & output some logs for quick review
    # XII.1. Save the best param after inputting the header
    if not DISPLAY_BEST_PARAMS:
        create_log(PATH_BEST_PARAMS, params)
        with open(PATH_BEST_PARAMS, 'a') as f:
            out = csv.writer(f)
            #out.writeheader()
            out.writerow(best_params)
            out.writerow(best_precision)
            out.writerow(best_recall)
        with open(PATH_PARAM_SPACE, 'w') as f:
            out = csv.DictWriter(f, fieldnames=header)
            out.writerow(params)
        """[TBR] No more dict pickling, use DictWriter instead so they are human readable
        with open(PATH_BEST_PARAMS, 'wb') as f:
            best_params = dict(zip(header, best_params))
            best_precision = dict(zip(header, best_precision))
            best_recall = dict(zip(header, best_recall))
            pickle.dump([best_params, best_recall, best_precision], f, protocol=pickle.HIGHEST_PROTOCOL)
        
        # XII.2. Pickle the params dict
        with open(PATH_PARAM_SPACE, 'wb') as f:
            pickle.dump(params, f, protocol=pickle.HIGHEST_PROTOCOL)
        """
        # XII.3. Final message!!
        print("Done. Highest f1_score: ", highest_f1_score)
Beispiel #19
0
def Platoon():
  lower = np.array([0, 164, 169], dtype = "uint8")
  upper = np.array([138, 255, 255], dtype = "uint8")
  sd = ShapeDetector()
  os.system("pkill uv4l")
  cap = cv2.VideoCapture(1)
  f_count = 0
  
  Forward(50) #Starting car here
  
  while(cap.isOpened()):
    if getattr(thread, "platooning", True) == False:
      break
    # Capture frame-by-frame
    ret, frame = cap.read()
    #frame=cv2.flip(frame, -1)
    f_count += 1
    if ret == True and f_count == 1: #adjust here for skipping frames
      f_count = 0  
      
      #resize frame    
      resized = imutils.resize(frame, width=640, height=480)
      
      #color mask
      mask = cv2.inRange(resized, lower, upper)
  
      cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
          cv2.CHAIN_APPROX_SIMPLE)
      cnts = imutils.grab_contours(cnts)
      
      #variable to store biggest contour
      biggest_c = None
      # loop over the contours
      for c in cnts:
          
          #if it fits criteria, update biggest_c variable with current contour
          if sd.detect(c, biggest_c):
              biggest_c = c
           
      if biggest_c is not None:#if contour found
          M = cv2.moments(biggest_c)
          cX = int(M["m10"] / M["m00"])
          cX_track = cX-320 #find difference in pixels from center
          peri = int(cv2.arcLength(biggest_c, True))
          if peri > 50:
            angle = int(cX_track / 320 * 50) #-50 to 50 degrees steering. Might need to flip sign
            #print("Steer("+str(angle)+")") #command to arduino
            if angle > -50 and angle < 50:
              Steer(int(angle))
              #print(int(angle))
              #STEERING
            
            speed = 70 * 2 * (90/peri) + 30
            if 0 < speed and 255 > speed:
              if speed < 65:
                  Stop()
              else:
                  Forward(int(speed))
                  print(int(speed))
          
    # Break the loop
    elif ret == False:
        break
    
    
  # When everything done, release the video capture object
  cap.release()
  Stop()
Beispiel #20
0
    #inspired by https://pythonprogramming.net/morphological-transformation-python-opencv-tutorial/
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

    # find contours in the mask and initialize the current
    # (x, y) center of the blue object
    # divide the frame into seperate halves so that we can have one half control the turning/steering
    # and other half control the forward and reverse.
    up_mask = mask[0:height // 2, 0:width, ]
    down_mask = mask[height // 2:height, width // 4:3 * width // 4, ]

    #find the contours(blue object's boundary) in the left and right frame to find the center of the object
    #syntax: (img,mode,method)
    cnts_up = cv2.findContours(up_mask.copy(), cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)
    cnts_up = imutils.grab_contours(cnts_up)
    center_up = None

    cnts_down = cv2.findContours(down_mask.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)
    cnts_down = imutils.grab_contours(cnts_down)
    center_down = None

    # only proceed if at least one contour was found
    if len(cnts_up) > 0:
        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and centroid
        c = max(cnts_up, key=cv2.contourArea)
        #find circle of minimum area eclosing a 2D point set
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        #The function cv2.moments() gives a dictionary of all moment values calculated.
def findsize(imgOrig, imgOrigGray):
    # Define the midpoint of two points
    def midpoint(ptA, ptB):
        return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)

    # Initialize the width of the reference image, in this case, 12.5 cm
    width = 12.5

    # Find the blur of the imgOrigGray image
    imgOrigBlur = cv2.GaussianBlur(imgOrigGray, (7, 7), 0)

    # Perform edge detection, then perform a dilation plus erosion to
    # close the gaps in between object edges
    edged = cv2.Canny(imgOrigBlur, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # Find the contours in the edge map
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    # Sort the contours from left-to-right and initialize the
    # pixels per metric calibration variable
    (cnts, _) = contours.sort_contours(cnts)
    pixelsPerMetric = None

    # Loop over the contours individually
    for c in cnts:
        # If the contour area is not larger, then ignore it
        if cv2.contourArea(c) < 500:
            continue

        # Compute the rotated bounding box of the contour
        orig = imgOrig.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")

        # Order the points in the contour such that they appear
        # in top-left, top-right, bottom-right, and bottom-left
        # Order, then draw the outline of the rotated bounding box
        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype("int")], -1, (255, 0, 0), 2)

        # Loop over the original points and draw them
        for (x, y) in box:
            cv2.circle(orig, (int(x), int(y)), 5, (255, 0, 0), -1)

        # Unpack the ordered bounding box, then compute the midpoints
        # between the top-left and top-right coordinates, followed by
        # the midpoint between bottom-left and bottom-right coordinates
        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        # Compute the midpoint between the top-left and top-right points
        # followed by the midpoint between the top-right and bottom-right
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # Draw the midpoints on the image
        cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

        # Draw lines between the midpoints
        cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                 (255, 0, 255), 2)
        cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                 (255, 0, 255), 2)

        # Compute the Euclidean distance between the midpoints
        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

        # If the pixels per metric has not been initialized, then
        # compute it as the ratio of pixels to supplied metric
        if pixelsPerMetric is None:
            pixelsPerMetric = dB / width

        # Compute the size of the object
        dimA = dA / pixelsPerMetric
        dimB = dB / pixelsPerMetric

        # Draw the object sizes on the image
        cv2.putText(orig, "{:.1f}cm".format(dimA),
                    (int(tltrX + 20), int(tltrY + 10)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 1)
        cv2.putText(orig, "{:.1f}cm".format(dimB),
                    (int(trbrX - 30), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
                    0.65, (255, 255, 255), 1)

        #  Display the object sizes on the image
        cv2.imshow("Detected Object - Sizes", orig)

        # If any key is pressed but (esc, s, d, n), then the next image will show
        # If esc key is pressed, then the program will shut down
        key = cv2.waitKey(0) & 0xFF

        if key == ord('s'):
            continue
Beispiel #22
0
def crop_brain_contour(image, plot=False):

    #import imutils
    #import cv2
    #from matplotlib import pyplot as plt

    # Convert the image to grayscale, and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    # Threshold the image, then perform a series of erosions +
    # dilations to remove any small regions of noise
    thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.erode(thresh, None, iterations=2)
    thresh = cv2.dilate(thresh, None, iterations=2)

    # Find contours in thresholded image, then grab the largest one
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    c = max(cnts, key=cv2.contourArea)

    # Find the extreme points
    extLeft = tuple(c[c[:, :, 0].argmin()][0])
    extRight = tuple(c[c[:, :, 0].argmax()][0])
    extTop = tuple(c[c[:, :, 1].argmin()][0])
    extBot = tuple(c[c[:, :, 1].argmax()][0])

    # crop new image out of the original image using the four extreme points (left, right, top, bottom)
    new_image = image[extTop[1]:extBot[1], extLeft[0]:extRight[0]]

    if plot:
        plt.figure()

        plt.subplot(1, 2, 1)
        plt.imshow(image)

        plt.tick_params(axis='both',
                        which='both',
                        top=False,
                        bottom=False,
                        left=False,
                        right=False,
                        labelbottom=False,
                        labeltop=False,
                        labelleft=False,
                        labelright=False)

        plt.title('Original Image')

        plt.subplot(1, 2, 2)
        plt.imshow(new_image)

        plt.tick_params(axis='both',
                        which='both',
                        top=False,
                        bottom=False,
                        left=False,
                        right=False,
                        labelbottom=False,
                        labeltop=False,
                        labelleft=False,
                        labelright=False)

        plt.title('Cropped Image')

        plt.show()

    return new_image
Beispiel #23
0
def markBall(ballLowerLimit,ballUpperLimit,maskball,img,previmg,prevthresh,prevx,prevy,prevw,prevh):
    #Defining a kernel to do morphological operation in threshold image to get better output
    kernel = np.ones((10,10),np.uint8)
    contoursImage = img.copy()

    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    gray = cv.GaussianBlur(gray, (21, 21), 0)
    oldgray = cv.cvtColor(previmg, cv.COLOR_BGR2GRAY)
    oldgray = cv.GaussianBlur(oldgray, (21, 21), 0)
    delta = cv.absdiff(oldgray, gray)
    #maskball = cv.dilate(maskball, kernel, iterations=5)
    #maskball = cv.erode(maskball, kernel, iterations=5)
    #Do masking
    '''
    resball = cv.bitwise_and(img, img, mask=maskball)
    #convert to hsv to gray
    resball_bgr = cv.cvtColor(resball,cv.COLOR_HSV2BGR)
    resball_gray = cv.cvtColor(resball,cv.COLOR_BGR2GRAY)'''


    #res =  cv.multiply(maskball,delta,10)
    #res = curres + prevres
    for threshold in range(5,200,5):
        thresh = cv.threshold(delta,threshold,255,cv.THRESH_BINARY)[1]
        thresh = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
        thresh = cv.dilate(thresh, kernel, iterations=1)
        thresh = cv.erode(thresh, kernel, iterations=1)
        whiteness = cv.countNonZero(thresh)
        if whiteness < 12000:
            break

    #curthresh = thresh.copy()
    #thresh += prevthresh
    font = cv.FONT_HERSHEY_SIMPLEX

    #find contours in threshold image of movement
    contoursss,hierarchy = cv.findContours(thresh,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)

    resball = cv.bitwise_and(img, img, mask=maskball)
    resball = cv.cvtColor(resball,cv.COLOR_HSV2BGR)
    resball_gray = cv.cvtColor(resball, cv.COLOR_BGR2GRAY)
    colthresh = cv.threshold(resball_gray,10,255,cv.THRESH_BINARY)[1]
    colthresh = cv.dilate(colthresh, kernel, iterations=1)
    colthresh = cv.erode(colthresh, kernel, iterations=1)

    colthresh = cv.multiply(colthresh,thresh,5) + colthresh
    colthresh = cv.dilate(colthresh, kernel, iterations=2)
    colthresh = cv.erode(colthresh, kernel, iterations=1)
    curthresh = colthresh.copy()
    #prevthresh = cv.dilate(prevthresh, kernel, iterations=2)
    colthresh += prevthresh
    #find contours in threshold image of colors
    contourss,hierarchy = cv.findContours(colthresh,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
    #contourss+=contoursss
    nzCountball = []
    for c in contourss:
            x,y,w,h = cv.boundingRect(c)
            if cv.contourArea(c) > 1000:
                nzCountball.append(0)
                continue
            #Check for ball
            if(150>w>5 and 150>h>5):
                ball_img = img[y:y+h,x:x+w]
                ball_hsv = cv.cvtColor(ball_img,cv.COLOR_BGR2HSV)
                maskball = cv.inRange(ball_hsv, np.array(ballLowerLimit), np.array(ballUpperLimit))
                res3 = cv.bitwise_and(ball_img, ball_img, mask=maskball)
                res3 = cv.cvtColor(res3,cv.COLOR_HSV2BGR)
                res3 = cv.cvtColor(res3,cv.COLOR_BGR2GRAY)

                dist = math.sqrt((prevx-x-w/2)**2 + (prevy-y-h/2)**2)
                #Prioritising countours with higher nzCountball
                nzCountball.append(cv.countNonZero(res3)*50 - dist*5)
            else:
                nzCountball.append(0)
    if len(nzCountball)!=0:
        if max(nzCountball)>10:
            x,y,w,h = cv.boundingRect(contourss[nzCountball.index(max(nzCountball))])
            ballimgfinal = img[y:y+h,x:x+w]
            ballhsvfinal = cv.cvtColor(ballimgfinal,cv.COLOR_BGR2HSV)
            maskballfinal = cv.inRange(ballhsvfinal, np.array(ballLowerLimit), np.array(ballUpperLimit))
            maskballfinal = cv.erode(maskballfinal,None, iterations = 1)
            maskballfinal = cv.dilate(maskballfinal,None, iterations = 1)
            cntrs = cv.findContours(maskballfinal, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
            cntrs = imutils.grab_contours(cntrs)
            if len(cntrs) > 0:
                cball = max(cntrs, key = cv.contourArea)
                x1,y1,w1,h1 = cv.boundingRect(cball)

                # show ball
                cv.putText(contoursImage, 'Pilka', (x-2, y-2), font, 0.8, (0,255,0), 2, cv.LINE_AA)
                cv.rectangle(contoursImage,(x+x1,y+y1),(x+x1+w1,y+y1+h1),(0,255,0),3)
                prevx = x+x1
                prevy = y+y1
                prevw = w1
                prevh = h1
            else:
                cv.putText(contoursImage, 'Pilka', (x-2, y-2), font, 0.8, (255,0,0), 2, cv.LINE_AA)
                cv.rectangle(contoursImage,(x,y),(x+w,y+h),(255,0,0),3)
                prevx = x
                prevy = y
                prevw = w
                prevh = h
        else:
            cv.putText(contoursImage, 'Pilka', (prevx-2, prevy-2), font, 0.8, (0,0,255), 2, cv.LINE_AA)
            cv.rectangle(contoursImage,(prevx,prevy),(prevx+prevw,prevy+prevh),(0,0,255),3)
    else:
        cv.putText(contoursImage, 'Pilka', (prevx-2, prevy-2), font, 0.8, (0,0,255), 2, cv.LINE_AA)
        cv.rectangle(contoursImage,(prevx,prevy),(prevx+prevw,prevy+prevh),(0,0,255),3)
    return contoursImage, colthresh,curthresh, prevx, prevy, prevw, prevh
Beispiel #24
0
# Open and reize image
img = cv2.imread('images/frontcar.png', cv2.IMREAD_COLOR)
img = cv2.resize(img, (600, 400))

# GRAYSCALE
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 50, 15, 15)

# Canny filter --> Gausian
edged = cv2.Canny(gray, 30, 200)

# TODO try different algorithms
contours = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                            cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
screenCnt = None

for c in contours:

    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.018 * peri, True)

    if len(approx) == 4:
        screenCnt = approx
        break

if screenCnt is None:
    detected = 0
    print("No contour detected")
Beispiel #25
0
def main():
    #### controls window ####
    cv2.namedWindow(controls_title_window)

    cv2.createTrackbar("trackbar_lR", controls_title_window, ball[LOW_RED],
                       alpha_slider_max, on_trackbar_lR)
    cv2.createTrackbar("trackbar_lG", controls_title_window, ball[LOW_GREEN],
                       alpha_slider_max, on_trackbar_lG)
    cv2.createTrackbar("trackbar_lB", controls_title_window, ball[LOW_BLUE],
                       alpha_slider_max, on_trackbar_lB)

    cv2.createTrackbar("trackbar_hR", controls_title_window, ball[HIGH_RED],
                       alpha_slider_max, on_trackbar_hR)
    cv2.createTrackbar("trackbar_hG", controls_title_window, ball[HIGH_GREEN],
                       alpha_slider_max, on_trackbar_hG)
    cv2.createTrackbar("trackbar_hB", controls_title_window, ball[HIGH_BLUE],
                       alpha_slider_max, on_trackbar_hB)

    cv2.createTrackbar("trackbar_rad_min", controls_title_window, radius_min,
                       80, on_trackbar_radmin)
    cv2.createTrackbar("trackbar_rad_max", controls_title_window, radius_max,
                       80, on_trackbar_radmax)

    global coordinates

    #### acquire image
    cap = cv2.VideoCapture(0)

    while (cap.isOpened()):

        #### capture frame
        ret, frame = cap.read()
        #### resize
        resized = imutils.resize(frame, width=600)

        #### BGR --> RGB
        rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)

        # convert the resized image to grayscale, blur it slightly,
        # and threshold it
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        thresh = cv2.threshold(blurred, 200, 250, cv2.THRESH_BINARY_INV)[1]

        # find contours in the thresholded image and initialize the
        # shape detector
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        cnt = cnts[0]

        rect = cv2.minAreaRect(cnt)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(resized, [box], 0, (0, 0, 255), 2)

        if (ret):
            cv2.imshow('frame', resized)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Beispiel #26
0
    def on_any_event(event):
        if event.is_directory:
            return None
        elif event.event_type == 'created':

            shutil.rmtree('/home/diego/Desktop/Output')
            os.mkdir('/home/diego/Desktop/Output')

            shutil.rmtree('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test')
            os.mkdir('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test')

            shutil.rmtree('/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images')
            os.mkdir('/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images')

            shutil.rmtree('/home/diego/Desktop/Output_ToStich')
            os.mkdir('/home/diego/Desktop/Output_ToStich')

            ## look in INPUT folder, crop photo and save crop to OUTPUT folder
            def load_data_make_jpeg(folder):
                list = glob.glob(folder)
                for entry in list:
                    img_size = (256, 256, 3)
                    img_new = io.imread(entry)
                    img_new = (img_new / 256).astype('uint8')
                    shape = img_new.shape
                    height = shape[0] // 256
                    height256 = height * 256
                    width = shape[1] // 256
                    width256 = width * 256

                    img_new = img_new[:height256, :width256, :3]
                    img_new_w = view_as_blocks(img_new, img_size)
                    img_new_w = np.uint8(img_new_w)
                    imageio.imwrite('/home/diego/Desktop/Output_Final/' + 'CroppedVersion' + '.png', img_new)
                    r = 0
                    for i in range(img_new_w.shape[0]):
                        for j in range(img_new_w.shape[1]):
                            A = np.zeros((img_size[0], img_size[1], 3))
                            A[:, :, :] = img_new_w[i, j, :, :]
                            A = np.uint8(A)
                            imageio.imwrite('/home/diego/Desktop/Output/' + str(r) + '.png', A)
                            r += 1
                return width, height

            ## Cut up in order, append white images
            width, height = load_data_make_jpeg('/home/diego/Desktop/Input/*.*')

            def combine_white(folderA):
                os.chdir(folderA)
                for file in os.listdir(folderA):
                    imA = io.imread(file)
                    newimage = np.concatenate((imA, white), axis=1)
                    imageio.imwrite('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test/' + file,
                                    newimage)

            white = io.imread('/home/diego/Desktop/White/white.png')

            combine_white('/home/diego/Desktop/Output/')

            ## Save that dataset to PIX2PIX/datasets/___

            ## Run PIX2PIX network
            os.system(
                'python3 /home/diego/MPFI/gold_particles/PIX2PIX/test.py --dataroot /home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/ --name Oct30pix2pix --model pix2pix --direction AtoB --num_test 1000000 --checkpoints_dir /home/diego/MPFI/gold_particles/PIX2PIX/checkpoints/ --results_dir /home/diego/MPFI/gold_particles/PIX2PIX/results/')
            ## Take only the Fake_B photos and stich together
            list = glob.glob(
                '/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images/*_fake_B.png')
            ## Save to OUTPUT folder
            for entry in list:
                split_name = entry.split('/')
                dirA = '/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images/'
                pathA = os.path.join(dirA, split_name[10])
                dirB = '/home/diego/Desktop/Output_ToStich/'
                pathB = os.path.join(dirB, split_name[10])
                shutil.move(pathA, pathB)

            ## STICH TOGETHER

            widthdiv256 = width
            heighttimeswidth = width * height

            folderstart = '/home/diego/Desktop/Output_ToStich/'

            def stitch_row(n):
                file1 = np.array(Image.open(folderstart + master[n]))
                file2 = np.array(Image.open(folderstart + master[n + 1]))
                full_row = np.concatenate((file1, file2), axis=1)
                for i in range(n + 2, n + widthdiv256):
                    file_next = np.array(Image.open(folderstart + master[i]))
                    full_row = np.concatenate((full_row, file_next), axis=1)
                return full_row

            files = os.listdir(folderstart)
            list = []
            for file in files:
                split_name = re.split('\D', file)

                list.append(split_name[0])

            list.sort(key=float)
            master = []
            for file in list:
                name = file + '_fake_B.png'
                master.append(name)

            picture = stitch_row(0)
            for n in range(widthdiv256, heighttimeswidth, widthdiv256):
                next_row = stitch_row(n)
                picture = np.concatenate((picture, next_row), axis=0)

            imageio.imwrite('/home/diego/Desktop/Output_Final/OutputStitched.png', picture)

            ## Count All Green Dots
            img = cv2.imread('/home/diego/Desktop/Output_Final/OutputStitched.png')

            lower_green = np.array([0, 245, 0])
            upper_green = np.array([40, 255, 40])

            mask = cv2.inRange(img, lower_green, upper_green)
            kernel = np.ones((5, 5), np.uint8)
            e = cv2.erode(mask, kernel, iterations=1)
            d = cv2.dilate(e, kernel, iterations=1)

            cnts = cv2.findContours(d, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            results = pd.DataFrame(columns=['X', 'Y'])

            for c in cnts:
                M = cv2.moments(c)
                if M["m00"] != 0:
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                else:
                    M["m00"] = 1
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])

                if (cX != 0 or cY != 0):
                    results = results.append({'X': cX, 'Y': cY}, ignore_index=True)
                    # cv2.circle(newlabeled, (cX, cY), 2,(255,255,255), -1)
                cv2.putText(d, "center", (cX - 4, cY - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            export_csv = results.to_csv(r'/home/diego/Desktop/Output_Final/Results.csv', index=None, header=True)

            shutil.rmtree('/home/diego/Desktop/Input')
            os.mkdir('/home/diego/Desktop/Input')


        elif event.event_type == 'modified':
def extract(img):

    mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
    cnts = cv2.findContours(img.copy(), cv2.RETR_CCOMP,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts,
                  key=lambda c:
                  (cv2.boundingRect(c)[2] * cv2.boundingRect(c)[3]),
                  reverse=True)
    for (i, c) in enumerate(cnts[0:1]):
        print(cv2.contourArea(c))
        cv2.drawContours(mask, [c], 0, (255), 1)
    #print(get_central_square(img))
    #cv2.drawContours(mask, [get_central_square(img)], 0, (255), 1)
    box = get_central_square(img)
    a = get_angle(box)
    img_crop, img_rot = crop_rect(img, a)
    box = get_central_square(img_rot)
    bo = points(box)
    ret = []
    b = box
    box = w_h(box)
    d = 1
    print('min', min(bo[:, 1]))
    ret.append(img_rot[int(1.05 * (min(bo[:, 1]))):int(0.95 * (max(bo[:, 1]))),
                       int(1.05 * (min(bo[:, 0]))):int(0.95 *
                                                       (max(bo[:, 0])))])

    ret.append(img_rot[0:int(max(bo[:, 1]) - 1 * int(box[1][1][0])),
                       int(min(bo[:, 0])):int(max(bo[:, 0]))])
    ret.append(img_rot[min(bo[:, 1]) + d * int(box[1][1][0]):img_rot.shape[0],
                       min(bo[:, 0]):max(bo[:, 0])])
    ret.append(img_rot[min(bo[:, 1]):max(bo[:, 1]),
                       0:min(img_rot.shape[0],
                             max(bo[:, 0]) - d * int(box[1][1][1]))])
    ret.append(img_rot[0:max(bo[:, 1]) - d * int(box[1][1][0]),
                       0:min(img_rot.shape[0],
                             max(bo[:, 0]) - d * int(box[1][1][1]))])
    ret.append(img_rot[min(bo[:, 1]) + d * int(box[1][1][0]):img_rot.shape[0],
                       0:min(img_rot.shape[0],
                             max(bo[:, 0]) - d * int(box[1][1][1]))])
    ret.append(img_rot[min(bo[:, 1]):max(bo[:, 1]),
                       max(0,
                           min(bo[:, 0]) +
                           d * int(box[1][1][1])):img_rot.shape[1]])
    ret.append(img_rot[0:max(bo[:, 1]) - d * int(box[1][1][0]),
                       max(0,
                           min(bo[:, 0]) +
                           d * int(box[1][1][1])):img_rot.shape[1]])
    ret.append(img_rot[min(bo[:, 1]) + d * int(box[1][1][0]):img_rot.shape[0],
                       max(0,
                           min(bo[:, 0]) +
                           d * int(box[1][1][1])):img_rot.shape[1]])

    print(len(box))
    cv2.drawContours(mask, [b], 0, (255), 1)
    r = []

    for i in range(0, len(ret)):
        ret[i] = recognize(ret[i])
        r.append(ret[i])

    return ret
Beispiel #28
0
    frame = imutils.resize(frame, width=320)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

    # construct a mask for the color "green", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    ball_mask = cv2.inRange(hsv, redLower, redUpper)
    ball_mask = cv2.erode(ball_mask, None, iterations=2)
    ball_mask = cv2.dilate(ball_mask, None, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    ball_cnts = cv2.findContours(ball_mask.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)
    ball_cnts = imutils.grab_contours(ball_cnts)
    center = None

    # only proceed if at least one contour was found
    if len(ball_cnts) > 0:
        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        ball_c = max(ball_cnts, key=cv2.contourArea)
        ((ball_x, ball_y), radius) = cv2.minEnclosingCircle(ball_c)
        M = cv2.moments(ball_c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        # only proceed if the radius meets a minimum size
        if radius > 5:
            # draw the circle and centroid on the frame,
Beispiel #29
0
def detect_face(user_id, image):

    try:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    except cv2.error:
        a, g = cv2.imencode('.jpeg', image)
        a = g.tobytes()
        a = base64.b64encode(a)
        a = 'data:image/jpeg;base64,' + a.decode()
        return a, inactive_dict

    # user = UserModel.query.filter_by(user_id=user_id).first()
    # if user is None:
    #     add_user(user_id)

    # else:
    #     user.count = (user.count + 1) % 180
    chang_frame = imutils.resize(gray, width=100)
    response_dict = inactive_dict
    if not frame:
        frame.append(chang_frame)
    else:

        delta_frame = cv2.absdiff(frame[0], chang_frame)
        frame[0] = chang_frame

        _, thresh = cv2.threshold(delta_frame, 25, 255, cv2.THRESH_BINARY)
        thresh = cv2.dilate(thresh, None, iterations=2)

        contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
        contours = imutils.grab_contours(contours)

        for contour in contours:
            if cv2.contourArea(contour) > 30:
                response_dict["back_video"] = 1
                continue

    rects = detector(gray, 0)

    if len(rects) > 1:
        response_dict["back_video"] = 2

    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        (x, y, w, h) = face_utils.rect_to_bb(rect)
        show_edges(image, x, y, w, (20, 255, 57))
        if len(shape) == 68:
            for (x, y) in shape:
                cv2.circle(image, (x, y), 1, (20, 255, 57), -1)
            #print_schema(shape, image)
            left_eye = shape[l_start:l_end]
            right_eye = shape[r_start:r_end]
            eyes = [left_eye, right_eye]
            response_dict["track"] = track_eye(eyes, image)
            val_1, val_2, d1, a1, a2 = calculating_data(shape)
            response_dict["eyebrow"] = val_1
            response_dict["high-wid"] = val_2
            response_dict["d1"] = d1
            response_dict["a1"] = a1
            response_dict["a2"] = a2

            a, g = cv2.imencode('.jpeg', image)
            a = g.tobytes()
            a = base64.b64encode(a)
            a = 'data:image/jpeg;base64,' + a.decode()
            return a, response_dict

    a, g = cv2.imencode('.jpeg', image)
    a = g.tobytes()
    a = base64.b64encode(a)
    a = 'data:image/jpeg;base64,' + a.decode()
    return a, inactive_dict
Beispiel #30
0
    #frame = cv2.flip(frame, 1)
    #frame = cv2.resize(frame, (854, 480))
    # -------------------------------------------
    if frame_count % 4:
        continue
    if frame_count % 4:
        continue

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (15, 15), 0)
    thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
    chan = cv2.medianBlur(blurred, 5)
    edged = cv2.Canny(chan, 10, 100)
    # find contours in the thresholded image
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    if(len(cnts)):
        # loop over the contours
        for c in cnts:
            # compute the center of the contour
            M = cv2.moments(c)
            if(M["m10"]==0):
                continue
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # draw the contour and center of the shape on the image
            cv2.drawContours(edged, [c], -1, (0, 255, 0), 2)
            cv2.circle(edged, (cX, cY), 7, (255, 255, 255), -1)
            cv2.putText(edged, "*", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            cv2.imshow('frame', edged)
            #edged = cv2.resize(edged, (854, 480))
def findContours(red_mask_pp):
    


    
    #im_gauss = cv2.GaussianBlur(imgray, (5, 5), 0)
    ret, thresh = cv2.threshold(red_mask_pp, 1, 255, 0)
    #contours,hierarchy  = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    
    #New lines added from pyimage
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    	cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    c = max(contours, key=cv2.contourArea)
    
    
    	

    #This is not used since we only want to find the centrum and radius after sorting on area and circulatiry
    #cnt = contours[0]
    #M = cv2.moments(cnt)
    #cx = int(M['m10']/M['m00'])
    #cy = int(M['m01']/M['m00'])
    #        
    #(x,y),radius = cv2.minEnclosingCircle(cnt)
    #center_new = (int(x),int(y))
    #radius_new  = int(radius)
    
    
    #Next part is discarding based on area and circularity
    contours_area = []
    # calculate area and filter into new array
    for con in contours:
        area = cv2.contourArea(con)
        #print('area=',area)
        if 200 < area < 40000:
            contours_area.append(con)
            
            
    contours_cirles = []
    
    # check if contour is of circular shape
    for con in contours_area:
        perimeter = cv2.arcLength(con, True)
        area = cv2.contourArea(con)
        if perimeter == 0:
            break
        circularity = 4*math.pi*(area/(perimeter*perimeter))
        #print ('circularity=',circularity)
        if 0.85 < circularity < 1.2:
            contours_cirles.append(con)
            
    
    
            
    #Next part: finding minimum enclosing circel
    cnt = contours_cirles[0]
    (x,y),radius = cv2.minEnclosingCircle(cnt)
    center_new = (int(x),int(y))
    radius_new  = int(radius)
    
    
    pos= np.hstack((x,y,radius) )
    pos=np.reshape(pos, (1,1, 3))


#    import ThorFunctions2 as TH
#    #The next par is only to find the extremums in N,E,S,W and thereafter drawing these and the perimeter itself
    # determine the most extreme points along the contour
#    extLeft = tuple(c[c[:, :, 0].argmin()][0])
#    extRight = tuple(c[c[:, :, 0].argmax()][0])
#    extTop = tuple(c[c[:, :, 1].argmin()][0])
#    extBot = tuple(c[c[:, :, 1].argmax()][0])
#    
#    diameter1=TH.calculateDistance(extLeft,extRight)
#    diameter2=TH.calculateDistance(extTop,extBot)
#    
#    if diameter1 >= diameter2:
#        radius=np.round(diameter1/2)
#    else:
#        radius=np.round(diameter2/2)
#    
#    #Only if you want to draw the extrema
#    cv2.drawContours(img, [c], -1, (0, 255, 0), 3)
#    cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
#    cv2.circle(img, extRight, 8, (0, 255, 0), -1)
#    cv2.circle(img, extTop, 8, (255, 0, 0), -1)
#    cv2.circle(img, extBot, 8, (255, 255, 0), -1)
#    
    return pos