Beispiel #1
0
 def __init__(self):
     self.cd = ColorSpecDet()
     self.Navigation = ard_i2c()
     self.sd = ShapeDetector()
     self.pose = calculateDist()
     self.hord = hullOrderer()
     self.searchState = "left"
Beispiel #2
0
def get_center_point_list_image(contours):
    sd = ShapeDetector()

    preCX = 0
    preCY = 0

    area_list = []
    center_list = []

    for c in contours:
        shape = sd.detect(c)
        if shape == 'square':
            M = cv2.moments(c)

            area = cv2.contourArea(c)

            if M["m00"] > 0:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])

                diffX = preCX - cX
                diffY = preCY - cY

                if diffX > 0 or diffY > 0:
                    if area > 10:
                        area_list.append(area)
                        center_list.append([cX, cY])

                        #print('cX=' + str(cX) + ', cY=' + str(cY))
                        #print('diffcX=' + str(diffX) + ', diffcY=' + str(diffY))

                preCX = cX
                preCY = cY

    histo = np.histogram(area_list, bins=7)

    max_value_histogram = max(histo[0])
    idx = [i for i, j in enumerate(histo[0]) if j == max_value_histogram]

    min_border = histo[1][idx[0]]

    try:
        max_border = histo[1][idx[0] + 2]
    except:
        max_border = 100000000

    cpoint_list = []
    for idx, area in enumerate(area_list):
        if area < max_border and area > min_border:
            cpoint_list.append(center_list[idx])

    return cpoint_list
def count_shapes(image, median_size=5):
    """
    :param image: The original image, in which you wanna reduce the noise.
    :param median_size: the matrix dimensions of the median filter
    :return: numpy array includes the contours in this image
    :draw: the original image with the contours detected and drawn on it and the names of each object in the picture.
    :print: the number of squares, circles and triangles in the image.
    """
    shapes = {
        "square": 0,
        "circle": 0,
        "triangle": 0,
        "rectangle": 0
    }

    shape_detector = ShapeDetector()

    contours = get_contours(image, median_size)

    #delete the boarder if it was added in the contours
    if contours[-1][0][0][0] == 0 and contours[-1][0][0][1] == 0 and contours[-1][-1][0][1] == 0:
        contours = contours[0:-1]

    for contour in contours:
        # compute the center of the contour, then detect the name of the shape using only the contour

        moments = cv2.moments(contour)
        # If the shape is so small (it's just a noise), skip it.
        if moments["m00"] <= 0.0:
            continue

        contourX = int((moments["m10"] / moments["m00"]))
        contourY = int((moments["m01"] / moments["m00"]))

        shape = shape_detector.detect(contour)
        shapes[shape] += 1

        cv2.drawContours(image, [contour],
                         -1, (0, 255, 0), 2)
        cv2.putText(image, shape, (contourX, contourY),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (255, 0, 0), 2)

    print("%d,%d,%d" % (shapes["square"], shapes["circle"], shapes["triangle"]))
    cv2.imshow('img', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    def TunnelNothingFound(self, img):
        fd_obj, objects_, x_color, y_color, w_color, h_color = cd.ColorDet([], img.copy(), "red")
        if fd_obj:
            ctr = np.array(objects_).reshape((-1, 1, 2)).astype(np.int32)
            rgb_image = img
            cv2.drawContours(rgb_image.copy(), [ctr], -1, (0, 255, 0), 2)
            # ctr = np.array(objects_).reshape((-1, 1, 2)).astype(np.int32)
            sd = ShapeDetector()
            shape_, w, h = sd.detect(ctr)
            if shape_ == "tunnel" or shape_ == "rectangle":
                if (w / h < 2.1):  # tam olarak görmedigi yarım gördügü
                    nfd = 1 # obje bulduk

                else:
                    nfd = 0
            else:
                nfd = 0
        else:
            nfd = 0
        return nfd
    def findBiggestObject(self, image):
        #Consider resizing image
        #Resizing image to a smaller factor can help better approximate shapes

        #Process Image. Setting the second arg to True will display dilation
        processedImage = self.preProcessImage(image, True)

        #Find the biggest contour in the processed image
        #Returns an array of points that make up the biggest contour
        contour = self.findBiggestContour(processedImage)

        #Detect the shape of the biggest contour
        sd = ShapeDetector()  #create instance of the ShapeDetector class
        shape, approx = sd.detect(
            contour
        )  #returns the determined shape and an array with the contour vertices

        #Compute the center of the contour
        M = cv2.moments(contour)
        cX = int((M["m10"] / M["m00"]))
        cY = int((M["m01"] / M["m00"]))

        #Add contour and text to the image
        contour = contour.astype("int")
        cv2.drawContours(image, [contour], -1, (0, 255, 0), 2)
        cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2)

        #Add contour vertices to the image
        red = [0, 0, 255]
        for pix in approx:
            pix1 = pix[0][0]
            pix2 = pix[0][1]
            cv2.circle(image, (pix1, pix2), 3, red, 2)

        #return the image with shape information, the shape name, and the center of the contour
        return (image, shape, cX, cY)
Beispiel #6
0
class Bar2Wall:
    def __init__(self):
        pass

    while (b2w):
        filename = '/home/bahart/PycharmProjects/FreakImage/Data/Wall/' \
                   '200.jpg'
        PrimaryStates = ["NothingFound", "GreenWallDetected"]
        SecondaryStates = ["WallFarAway", "WallNearby"]
        MiddleState = ["Searching"]
        # resim surekli okunucak
        bgr_image = cv2.imread(filename)
        image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        fd_obj, objects_, size_cnt, mask = cd.ColorDet([], image, "green")
        plt.imshow(mask)
        if fd_obj:
            ctr = np.array(objects_).reshape((-1, 1, 2)).astype(np.int32)
            rgb_image = image
            cv2.drawContours(rgb_image, [ctr], -1, (0, 255, 0), 2)
            plt.imshow(image)
            # ctr = np.array(objects_).reshape((-1, 1, 2)).astype(np.int32)
            sd = ShapeDetector()
            shape_, w, h = sd.detect(ctr)
            if shape_ == "rectangle":
                if (w / h < 1.3):  # tam olarak görmedigi yarım gördügü
                    initial_state = PrimaryStates[1]
                else:
                    initial_state = PrimaryStates[0]
            else:
                initial_state = PrimaryStates[0]
        else:
            initial_state = PrimaryStates[0]

        if initial_state == PrimaryStates[0]:
            FlagArd = 1
            TurnLeft = 1
            middlestate_ = MiddleState[0]

    M = cv2.moments(ctr)
    cX = int((M["m10"] / M["m00"]))
    cY = int((M["m01"] / M["m00"]))
    c = ctr.astype("float")
    c = ctr.astype("int")
    cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
    cv2.putText(image, shape_, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2)
    plt.imshow(image)
Beispiel #7
0
class Wall2Bar:
    def __init__(self):
        self.cd = ColorSpecDet()
        self.Navigation = ard_i2c()
        self.sd = ShapeDetector()
        self.pose = calculateDist()
        self.hord = hullOrderer()
        self.searchState = "left"
    def Wall2BarMain(self, start_flag, bgr_image):

        if start_flag:
            PrimaryStates = ["NothingFound", "BarDetected"]
            image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            fd_obj, objects_, resImg, imagePoints, w_color, h_color = self.cd.ColorDet(image, "blue")
            # taking the upper left and right corners of the object
            stop_flag = 0
            if (len(imagePoints) is 4):
                imagePoints = self.hord.organizer(imagePoints)
                x_color, y_color = imagePoints[1][0], imagePoints[1][1]
                x_color1, y_color1 = imagePoints[2][0], imagePoints[2][1]

    ##            cv2.namedWindow("Result", 1)
    ##            cv2.imshow("Result", resImg)
    ##            cv2.waitKey(0)
    ##            cv2.destroyAllWindows()
                if fd_obj:
                    ctr = np.array(objects_).reshape((-1, 1, 2)).astype(np.int32)
                    rgb_image = image
                    cv2.drawContours(rgb_image, [ctr], -1, (0, 255, 0), 2)
                    shape_, w, h = self.sd.detect(ctr)
                    if shape_ == "rectangle":
                        if (w / h < 5):  # tam olarak görmedigi yarım gördügü
                            initial_state = PrimaryStates[1]
                            print("We found something..")
                        else:
                            initial_state = PrimaryStates[0]
                            print("We cannot find Bar..")
                    else:
                        initial_state = PrimaryStates[0]
                        print("We cannot find Bar..")
                else:
                    initial_state = PrimaryStates[0]
                    print("We cannot find Bar..")

                if initial_state == PrimaryStates[0]:
                    if (self.searchState == "left"):
                        self.Navigation.writeArduino("s1*")
                        print("Searching..")
                    else:
                        self.Navigation.writeArduino("s0*")
                        print("Searching..")
                    return stop_flag, resImg
                else:
                    bar_distL = x_color
                    dist_, angle = self.pose.distNAngle("bar" ,imagePoints)
                    print("The distance is %f and the angle is %f" % (dist_, angle))
                    if (x_color < 10) and (y_color < 10) and (abs(x_color1 - 640) < 10) and (abs(y_color1 - 0) < 10):
                        print("Bar will be passed, state is changed")
                        stop_flag = 1
                        return stop_flag, resImg

                    self.Navigation.writeArduino("b"+ np.str(int(bar_distL)) + np.str(int(dist_)) + "*")
                    return stop_flag, resImg
            else:
                return 1, resImg
        else:
            return 1,bgr_image
Beispiel #8
0
original_image = cv2.imread(args["original"])
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])

# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)[1]

# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
ci = 0
b = 0
radios = []
for c in cnts[:len(cnts) - 1]:
    # compute the center of the contour, then detect the name of the
    # shape using only the contour
    M = cv2.moments(c)
    if M["m00"] == 0:
        M["m00"] = 0.000001
    cX = int((M["m10"] / M["m00"]) * ratio)
    cY = int((M["m01"] / M["m00"]) * ratio)
    shape, radio = sd.detect(c)
    if shape != "pentagon":
        if shape == 'Circulo':
#print(np.array_equal(cnts[76], cnts_fail[58]))

print(cnts[76][1])
print(cnts_fail[58][1])

print(has_same_contour(cnts[76], cnts_fail))

num_fails = 0
for c in cnts:
    if not has_same_contour(c, cnts_fail):
        cv2.drawContours(image_fail,[c], 0, (0,0,255), 1)
        num_fails+=1

for c in cnts:
    detector = ShapeDetector()
    area = cv2.contourArea(c)
    #cv2.drawContours(image,[c], 0, (0,0,255), 1)

    M = cv2.moments(c)

    shape = detector.detect(c)
    
    if shape == "circle":
        circles+=1
    elif shape == "square":
        squares+=1
    elif shape == "pentagon":
        pentagons+=1
    elif shape == "rectangle":
        rectangles+=1
import cv2
import imutils
import numpy as np
from shape_detector import ShapeDetector

image = cv2.imread('shapes_and_colors.jpg')
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blur, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
sd = ShapeDetector()
for i in cnts:
    M = cv2.moments(i)
    cX = int(M["m10"] / M["m00"] * ratio)
    cY = int(M["m01"] / M["m00"] * ratio)
    shape = sd.detect_shape(i)
    i = i.astype("float")
    i *= ratio
    i = i.astype("int")
    cv2.drawContours(image, [i], -1, (255, 0, 0), 3)
    cv2.putText(image, shape, (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2)

cv2.imshow('image', image)
cv2.waitKey(0)
image = cv2.imread(args["image"])
resized = ht.resize(image, 400)
# resized= imutils.resize(image, width=300)
image = resized
ratio=image.shape[0] / float(resized.shape[0])

#gray, blur, threshold
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3,3),0)
thresh=cv2.threshold(blurred, 60,255,cv2.THRESH_BINARY)[1]
thresh=cv2.bitwise_not(thresh)

#find contours and init shape detector
contours= cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if imutils.is_cv2() else contours[1]
sd=ShapeDetector()

#loop over contours
for c in contours:
    # get center of c, get shape name
    M = cv2.moments(c)
    area=cv2.contourArea(c)
    print("area of contour: {}\n".format(area))
    if M["m00"] > 0 and area >300 and area <800:
        cX = int((M["m10"] / M["m00"]) * ratio)
        cY = int((M["m01"] / M["m00"]) * ratio)
        shape = sd.detect(c)

        #mutiply contour (x,y) to resize ratio, then draw shape and name
        c = c.astype("float")
        c *= ratio
Beispiel #12
0
def goruntu_isleme(grt):
    """
    Kameradan sağlanan görüntüyü (grt) verilen sınırlara göre inceleyerek matris renklerini belirler. Renklerin baş harflerini dizi olarak gönderir
    """

    hsv = cv2.cvtColor(grt, cv2.COLOR_BGR2HSV)
    rect_count = 0
    merkezler = []

    # Renk sınırları: (renk kodu, (hsv alt sınır, hsv üst sınır))
    renk_sinir = [
    ('k', (np.array([0, 50, 50], dtype = np.uint8) , np.array([12, 255, 255], dtype = np.uint8))), # Kırmızı
    ('m', (np.array([97, 75, 50], dtype = np.uint8) , np.array([120, 255, 255], dtype = np.uint8))), # Mavi
    ('s', (np.array([15, 180, 50], dtype = np.uint8) , np.array([30, 255, 255], dtype = np.uint8))), # Sarı
    ]

    for (renk, (alt, ust)) in renk_sinir:

        mask = cv2.inRange(hsv, alt, ust)
        if renk = "k":
            k_ust = ( , )
            mask2 = cv2.inRange(hsv, np.array([170, 50, 50], dtype = np.uint8), np.array([179, 255, 255], dtype = np.uint8))
            mask = mask + mask2
        # Verilen rengi algıla ve şekilleri birleştir

        blurred = cv2.GaussianBlur(mask, (15, 15), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        erndi = cv2.erode(thresh, None, iterations=2)
        erndi = cv2.dilate(erndi, None, iterations=2)

        # Resimdeki şekilleri belirle
        cnts = cv2.findContours(erndi.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        sd = ShapeDetector()

        # Şekilleri tek tek incele
        if len(cnts) > 0:
            for c in cnts:
                shape = sd.detect(c)
                if (cv2.contourArea(c) > 1000 and (shape == 'sq' or shape == 'rect')): # Belirli bir boyuttan büyük karelere ve dikdörtgenlere odaklan

                    # Şeklin merkezini belirle
                    M = cv2.moments(c)
                    if M["m00"] != 0:
                        cX = int(M["m10"] / M["m00"])
                        cY = int(M["m01"] / M["m00"])
                    else:
                        cX, cY = 0, 0

                    if renk == 'k':
                        color = (0, 0, 255)
                    elif renk == 'm':
                        color = (255, 0, 0)
                    elif renk == 's':
                        color = (0, 255, 255)

                    cv2.drawContours(grt, [c], -1, color, 2)

                    # Merkezler: şekillerin koordinatlarının ve renklerinin saklandığı liste
                    merkezler.append((cX, cY, renk))
                    rect_count += 1

        else:
            print('Hiçbir şekil algılanamadı')
            return 'Hata'
Beispiel #13
0
from actuator import Actuator

import cv2

# System configuration
image_width = 640
image_height = 480
framerate = 32
minimum_area = 250
maximum_area = 100000
# [rows, columns]
grid_size = [3,3]

# Objects initialization
cam = CameraGrabber(image_width, image_height, framerate)
detector = ShapeDetector()
grid = GridSplitterAlgorithm(image_width, image_height, grid_size, 0.1)
actuators = Actuator(grid_size, [1,1])
# Initialialing with negative numbers will produce always an update
old_grid_pos = [-1,-1]

cv2.namedWindow("Frames", cv2.WINDOW_NORMAL)

while True:
    image = cam.get_image()
    if not image is None:
        contours = detector.detect_contours(image)

        ball_location = detector.detect_circle()
        if ball_location:
            if (ball_location[0] > minimum_area) and (ball_location[0] < maximum_area):
Beispiel #14
0
def main():
    cap = cv2.VideoCapture(0)

    while True:
        ret, frame = cap.read()
        kernel = np.ones((3, 3), np.float32) / 25

        # processing input frame
        frame = imutils.resize(frame, width=900)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        median = cv2.filter2D(frame, -1, kernel)
        blurred = cv2.GaussianBlur(frame, (5, 5), 0)  # 5,5

        hsv = cv2.cvtColor(median, cv2.COLOR_BGR2HSV)
        ratio = frame.shape[0] / float(frame.shape[0])

        red, green, blue, yellow = color_detector.color(hsv)

        color_ = {'red': red, 'green': green, 'blue': blue, 'yellow': yellow}

        for key, value in colors.items():
            # kernel = np.ones((9, 9), np.uint8)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))

            mask = cv2.morphologyEx(color_[key], cv2.MORPH_OPEN, kernel)
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
            mask = cv2.bilateralFilter(mask, 11, 17, 17)

            cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            center = None
            sd = ShapeDetector()
            print("Length: {0}".format(len(cnts)))

            cnts = cnts[0] if len(cnts) == 2 else cnts[1]
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1]
            # area = cv2.contourArea(cnts)
            # if area > 2000:

            contour_sizes = [(cv2.contourArea(contour), contour) for contour in cnts]
            area_thresh = 0

            area_thresh = 20000
            for c in cnts:
                area = cv2.contourArea(c)
                if area > area_thresh:
                    area = area_thresh
                    big_contour = c

                    M = cv2.moments(big_contour)
                    if M["m00"] == 0:  # this is a line
                        shape = "line"

                    else:
                        cX = int((M["m10"] / M["m00"]) * ratio)
                        cY = int((M["m01"] / M["m00"]) * ratio)
                        shape = sd.detect(big_contour)

                        rect = cv2.minAreaRect(big_contour)
                        frame = distance_detector.get_dist(rect, frame)

                        big_contour = big_contour.astype("float")
                        big_contour *= ratio
                        big_contour = big_contour.astype("int")
                        cv2.drawContours(frame, [big_contour], -1, colors[key], 2)
                        cv2.putText(frame, shape + " " + key, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.8, colors[key], 2)
                        cv2.imshow('mask', mask)

            '''if len(cnts) > 0:

                c = max(contour_sizes, key=lambda x: x[0])[1]

                # c = max(cnts, key=cv2.contourArea)

                M = cv2.moments(c)
                if M["m00"] == 0:  # this is a line
                    shape = "line"

                else:
                    cX = int((M["m10"] / M["m00"]) * ratio)
                    cY = int((M["m01"] / M["m00"]) * ratio)
                    shape = sd.detect(c)

                    rect = cv2.minAreaRect(c)
                    frame = distance_detector.get_dist(rect, frame)

                    c = c.astype("float")
                    c *= ratio
                    c = c.astype("int")
                    cv2.drawContours(frame, [c], -1, colors[key], 2)
                    cv2.putText(frame, shape + " " + key, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
                                0.8, colors[key], 2)
                    cv2.imshow('mask', mask)'''

        cv2.imshow("Frame", frame)

        key = cv2.waitKey(1) & 0xFF
        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break

    cap.release()
    cv2.destroyAllWindows()
            i = 1
        elif i == 1:

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.bilateralFilter(gray, 1, 10, 120)

            edges = cv2.Canny(gray, 10, 250)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))

            closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)

            cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            sd = ShapeDetector()

            c = max(cnts, key=cv2.contourArea)
            mask = np.zeros_like(frame)
            cv2.drawContours(mask, [c], -1, (255, 255, 255), -1)
            out = np.zeros_like(frame)
            out[mask == 255] = frame[mask == 255]

            (x, y, z) = np.where(mask == 255)
            (topx, topy) = (np.min(x), np.min(y))
            (bottomx, bottomy) = (np.max(x), np.max(y))
            total = bottomx - topx
            partition = int(total / 2)
            # print(topx, topy, bottomx, bottomy)
            one = out[topx:topx + partition + 25, topy:bottomy, ]
            two = out[topx + (1 * partition + 10):topx + (2 * partition),
class Bar2Wall:
    def __init__(self):
        self.cd = ColorSpecDet()
        self.GWD = GreenWallDetected()
        self.Navigation = ard_i2c()
        self.sd = ShapeDetector()
        self.pose = calculateDist()
        self.hord = hullOrderer()
        self.searchState = "left"

    def Bar2WallMain(self, start_flag, bgr_image):
        if start_flag:
            PrimaryStates = ["NothingFound", "GreenWallDetected"]
            image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            fd_obj, objects_, resImg, imagePoints, w_color, h_color = self.cd.ColorDet(
                image, "green")
            stop_flag = 0
            if (len(imagePoints) is 4):
                imagePoints = self.hord.organizer(imagePoints)
                x_color, y_color = imagePoints[1][0], imagePoints[1][1]
                x_color1, y_color1 = imagePoints[2][0], imagePoints[2][1]
                if fd_obj:
                    ctr = np.array(objects_).reshape(
                        (-1, 1, 2)).astype(np.int32)
                    rgb_image = image
                    # cv2.drawContours(rgb_image, [ctr], -1, (0, 255, 0), 2)
                    shape_, w, h = self.sd.detect(ctr)
                    if shape_ == "rectangle":
                        if (w / h < 5):
                            initial_state = PrimaryStates[1]
                            print("We found something..")
                        else:
                            initial_state = PrimaryStates[0]
                            print("We cannot find Tunnel..")
                    else:
                        initial_state = PrimaryStates[0]
                        print("We cannot find Tunnel..")
                else:
                    initial_state = PrimaryStates[0]
                    print("We cannot find Tunnel..")

                # end of first if block, rectangle decision has been made

                if initial_state == PrimaryStates[0]:
                    if (self.searchState == "left"):
                        self.Navigation.writeArduino("s1*")
                        print("Searching..")
                    else:
                        self.Navigation.writeArduino("s0*")
                        print("Searching..")
                    return stop_flag, resImg

                else:
                    half_ = self.GWD.GreenWallDetected(x_color, x_color1)
                    if half_ == 1:
                        if (x_color < 10):
                            # right is not seen
                            i1 = "x"
                            i2 = "y"
                            self.searchState = "left"
                            print(
                                "Half of the wall is detected, Turning Right is needed"
                            )
                        else:
                            i1 = "y"
                            i2 = "x"
                            self.searchState = "right"
                            print(
                                "Half of the wall is detected, Turning Left is needed"
                            )
                        self.Navigation.writeArduino("w+9999" + i1 + i2 + "*")
                        return 0, resImg

                    elif half_ == 0:
                        print("Wall is detected, " "Navigation is Complete")
                        #print("The points are (%f,%f) and (%f,%f)"%(x_color, y_color, x_color1, y_color1))
                        # calculating the pose of the object
                        dist_, angle = self.pose.distNAngle(
                            "wall", imagePoints)
                        print("The corners are", imagePoints)
                        print("The distance is %f and the angle is %f" %
                              (dist_, angle))
                        ##                          cv2.namedWindow("Result", 1)
                        ##                          cv2.imshow("Result", resImg)
                        ##                          cv2.waitKey(0)
                        ##                          cv2.destroyAllWindows()
                        if angle < 0:
                            s = "-"
                        elif angle > 0:
                            s = "+"
                        else:
                            s = "+"

                        if abs(angle) < 10:
                            ang_ = "0" + np.str(int(abs(angle)))
                        else:
                            ang_ = np.str(int(abs(angle)))
                        if abs(dist_) < 10:
                            dist_str = "0" + np.str(abs(int(dist_)))
                        else:
                            dist_str = np.str(abs(int(dist_)))

                        self.Navigation.writeArduino("w" + s + ang_ +
                                                     dist_str + "yy*")
                        if (abs(angle) <= 8) and (abs(dist_) <= 10):
                            print("Tunnel will be passed, state is changed")
                            stop_flag = 1
                            print("State Change")
                            return 1, resImg
                        return 0, resImg
            elif (len(imagePoints) == 3):
                imagePoints = self.hord.organizer(imagePoints)
                x_color, y_color = imagePoints[0][0], imagePoints[0][1]
                x_color1, y_color1 = imagePoints[1][0], imagePoints[1][1]
                print("Three point half")

                if (x_color < 10):
                    # right is not seen
                    i1 = "x"
                    i2 = "y"
                    self.searchState = "right"
                    self.Navigation.writeArduino("w+9999" + i1 + i2 + "*")
                    print(
                        "Half of the wall is detected, Turning Right is needed"
                    )

                elif (abs(x_color1 - 640) < 10):
                    i1 = "y"
                    i2 = "x"
                    self.searchState = "left"
                    self.Navigation.writeArduino("w+9999" + i1 + i2 + "*")
                    print(
                        "Half of the wall is detected, Turning Left is needed")

                return 0, resImg
            else:
                return 0, resImg

        else:
            return 0, bgr_image
class Bar2Tunnel:
    def __init__(self):
        self.cd = ColorSpecDet()
        self.RTD = RedTunnelDetected()
        self.sd = ShapeDetector()
        self.pose = calculateDist()
        self.Navigation = ard_i2c()
        self.hord = hullOrderer()
        self.searchState = "left"

    def Bar2TunnelMain(self, start_flag, bgr_image):
        if start_flag:
            PrimaryStates = ["NothingFound", "TunnelDetected"]
            image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            fd_obj, objects_, resImg, imagePoints, w_color, h_color = self.cd.ColorDet(
                image.copy(), "red")
            # taking the upper left and right corners of the object
            stop_flag = 0
            if (len(imagePoints) == 4):
                imagePoints = self.hord.organizer(imagePoints)
                x_color, y_color = imagePoints[1][0], imagePoints[1][1]
                x_color1, y_color1 = imagePoints[2][0], imagePoints[2][1]
                if fd_obj:
                    ctr = np.array(objects_).reshape(
                        (-1, 1, 2)).astype(np.int32)
                    shape_, w, h = self.sd.detect(ctr)
                    if shape_ == "rectangle" or shape_ == "tunnel":
                        if w_color / h_color < 5:
                            initial_state = PrimaryStates[1]
                            print("We found something..")
                        else:
                            initial_state = PrimaryStates[0]
                            print("We cannot find Tunnel..")
                    else:
                        initial_state = PrimaryStates[0]
                        print("We cannot find Tunnel..")
                else:
                    initial_state = PrimaryStates[0]
                    print("We cannot find Tunnel..")

                if initial_state == PrimaryStates[0]:
                    if (self.searchState == "left"):
                        self.Navigation.writeArduino("s1*")
                        print("Searching..")
                    else:
                        self.Navigation.writeArduino("s0*")
                        print("Searching..")
                    return 0, resImg

                else:
                    half_ = self.RTD.RedTunnelDetected(x_color, x_color1)
                    if half_ == 1:
                        if (x_color < 10):
                            # right is not seen
                            i1 = "x"
                            i2 = "y"
                            self.searchState = "left"
                            print(
                                "Half of the tunnel is detected, Turning Right is needed"
                            )
                        else:
                            i1 = "y"
                            i2 = "x"
                            self.searchState = "right"
                            print(
                                "Half of the tunnel is detected, Turning Left is needed"
                            )
                        self.Navigation.writeArduino("t+9999" + i1 + i2 + "*")

                        return 0, resImg

                    elif half_ == 0:
                        print("Tunnel is detected, " "Navigation is Complete")
                        #print("The points are (%f,%f) and (%f,%f)"%(x_color, y_color, x_color1, y_color1))
                        dist_, angle = self.pose.distNAngle(
                            "tunnel", imagePoints)
                        print("The distance is %f and the angle is %f" %
                              (dist_, angle))
                        ##                    cv2.namedWindow("Result", 1)
                        ##                    cv2.imshow("Result", resImg)
                        ##                    cv2.waitKey(0)
                        ##                    cv2.destroyAllWindows()
                        if angle < 0:
                            s = "-"
                        elif angle > 0:
                            s = "+"
                        else:
                            s = "+"
                        if abs(angle) < 10:
                            ang_ = "0" + np.str(int(abs(angle)))
                        else:
                            ang_ = np.str(int(abs(angle)))
                        if abs(dist_) < 10:
                            dist_str = "0" + np.str(abs(int(dist_)))
                        else:
                            dist_str = np.str(abs(int(dist_)))
                        self.Navigation.writeArduino("t" + s + ang_ +
                                                     dist_str + "yy*")

                        if (abs(angle) <= 8) and (abs(dist_) <= 10):
                            print("Tunnel will be passed, state is changed")
                            stop_flag = 1
                            print("State Change")
                            return 1, resImg
                        return 0, resImg
            elif (len(imagePoints) == 3):
                imagePoints = self.hord.organizer(imagePoints)
                x_color, y_color = imagePoints[0][0], imagePoints[0][1]
                x_color1, y_color1 = imagePoints[1][0], imagePoints[1][1]
                print("Three point half")
                if ((y_color < 36) and (y_color1 < 36)):
                    self.Navigation.writeArduino("t+3005yy*")
                    print("Going Backwards, Freak can't see")
                elif (x_color < 10):
                    # right is not seen
                    i1 = "x"
                    i2 = "y"
                    self.searchState = "right"
                    self.Navigation.writeArduino("t+9999" + i1 + i2 + "*")
                    print(
                        "Half of the tunnel is detected, Turning Right is needed"
                    )

                elif (abs(x_color1 - 640) < 10):
                    i1 = "y"
                    i2 = "x"
                    self.searchState = "left"
                    self.Navigation.writeArduino("t+9999" + i1 + i2 + "*")
                    print(
                        "Half of the tunnel is detected, Turning Left is needed"
                    )

                else:
                    print("3 Points - IDLE")
                    self.Navigation.writeArduino("i*")
                return 0, resImg
            else:
                print("Object is not find..")
                return 0, resImg
        else:
            return 1, bgr_image
Beispiel #18
0
avg_blue = np.average(b)

thresh_red = cv2.threshold(r, avg_red + 10, 255, cv2.THRESH_BINARY)[1]
thresh_green = cv2.threshold(g, avg_green + 10, 255, cv2.THRESH_BINARY)[1]
thresh_blue = cv2.threshold(b, avg_blue + 10, 255, cv2.THRESH_BINARY)[1]

show_image("blue", thresh_blue)
show_image("green", thresh_green)
show_image("red", thresh_red)

for item in [thresh_green, thresh_blue, thresh_red]:
    contours = cv2.findContours(item.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

    contours = contours[0] if imutils.is_cv2() else contours[1]
    sd = ShapeDetector()

    # loop over the contours
    for contour in contours:
        print("contour")
        # compute the center of the contour, then detect the name of the
        # shape using only the contour
        M = cv2.moments(contour)
        if M["m00"] == 0:
            continue
        cX = int((M["m10"] / M["m00"]) * ratio)
        cY = int((M["m01"] / M["m00"]) * ratio)
        shape = sd.detect(contour)

        # multiply the contour (x, y)-coordinates by the resize ratio,
        # then draw the contours and the name of the shape on the image