def red_filtering(self, frame):
     # Adapted from
     # https://stackoverflow.com/questions/42840526/opencv-python-red-ball-detection-and-tracking
     #
     # convert the input stream into HSV color space
     hsv_conv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
     # then the image is blurred
     hsv_blurred_img = cv2.medianBlur(hsv_conv_img, 9, 3)
     # because hue wraps up and to extract as many "red objects" as possible,
     # we define lower and upper boundaries for brighter and for darker red shades
     bright_red_mask = cv2.inRange(hsv_blurred_img,
                                   self.bright_red_lower_bounds,
                                   self.bright_red_upper_bounds)
     dark_red_mask = cv2.inRange(hsv_blurred_img,
                                 self.dark_red_lower_bounds,
                                 self.dark_red_upper_bounds)
     # after masking the red shades out, I add the two images
     weighted_mask = cv2.addWeighted(bright_red_mask, 1.0, dark_red_mask,
                                     1.0, 0.0)
     # then the result is blurred
     blurred_mask = cv2.GaussianBlur(weighted_mask, (9, 9), 3, 3)
     # some morphological operations (closing) to remove small blobs
     erode_element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
     dilate_element = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
     eroded_mask = cv2.erode(blurred_mask, erode_element)
     dilated_mask = cv2.dilate(eroded_mask, dilate_element)
     return dilated_mask
Пример #2
0
    def get_letter_type(self, img_hsv=None, bgColor=None, img=None):
        if img_hsv is None:
            img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        if bgColor is None:
            for key, colors in self.BG_COLOR.items():
                temp = cv2.inRange(img_hsv, *colors)

                if np.sum(temp == 255) / img_hsv.size > 0.05:
                    bgColor = key
                    break

        if bgColor == 'yellow':
            temp = cv2.inRange(img_hsv, *self.LETTER_COLOR['black'])

            if np.sum(temp == 255) / img_hsv.size > 0.005:
                return self.LETTER_TYPE_PENDING
            else:
                return self.LETTER_TYPE_ERROR
        elif bgColor == 'gray':
            temp = cv2.inRange(img_hsv, *self.LETTER_COLOR['black'])

            if np.sum(temp == 255) / img_hsv.size > 0.005:
                return self.LETTER_TYPE_UNCHECK
            else:
                return self.LETTER_TYPE_EMPTY
        elif bgColor == 'white':
            return self.LETTER_TYPE_ALREADY
        elif bgColor == 'green':
            return self.LETTER_TYPE_FINISHED

        return self.LETTER_TYPE_UNEXCEPTED
Пример #3
0
def get_contours(img, range_list):
    """
    根据颜色提取图像
    """
    # 从BGR转换到HSV
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask = []
    for hsv_range in range_list:
        if len(mask) > 0:
            mask += cv2.inRange(hsv, hsv_range[0], hsv_range[1])
        else:
            mask = cv2.inRange(hsv, hsv_range[0], hsv_range[1])

    _, binary = cv2.threshold(mask, 0, 255,
                              cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    # 定义结构元素
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    # 开闭运算,先开运算去除背景噪声,再继续闭运算填充目标内的孔洞
    opened = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
    closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)

    # 在binary中发现轮廓,轮廓按照面积从小到大排列
    contours, _ = cv2.findContours(closed, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    return contours
Пример #4
0
def boxDetection2411(rgbaImg, width, height):
	"""
	Takes RGBA image along with height and width and returns positions of cubes in reference frame
	"""
	rgba = bytes(rgbaImg)
	# Make a new image object from the bytes
	img = Image.frombytes('RGBA', (width, height), rgba)
	opencv_img = np.array(img)

	# Converting RGBA image to RGB(dropping alpha channel)
	rgbImage = cv2.cvtColor(opencv_img, cv2.COLOR_RGBA2RGB)
	# Converting RGB image to HSV(For Color detection)
	hsvFrame = cv2.cvtColor(rgbImage, cv2.COLOR_RGB2HSV)
	# Converting RGB image to BGR
	imageFrame = cv2.cvtColor(rgbImage, cv2.COLOR_RGB2BGR)

    # Set range for red color and define mask
	red_lower = np.array([0, 70, 50], np.uint8)
	red_upper = np.array([10, 255, 255], np.uint8)
	red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)

    # Set range for green color and define mask
	green_lower = np.array([40, 52, 72], np.uint8)
	green_upper = np.array([70, 255, 255], np.uint8)
	green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
Пример #5
0
def skin_extract(image):

    img_HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    #skin color range for hsv color space
    HSV_mask = cv2.inRange(img_HSV, (0, 15, 0), (17, 170, 255))
    #cv2.imshow('HSV mask_before',HSV_mask)
    #cv2.imwrite('HSV mask_before.jpg',HSV_mask)
    HSV_mask = cv2.morphologyEx(HSV_mask, cv2.MORPH_OPEN,
                                np.ones((3, 3), np.uint8))
    #cv2.imshow('HSV mask',HSV_mask)
    #cv2.imwrite('HSV mask.jpg',HSV_mask)
    #converting from gbr to YCbCr color space
    img_YCrCb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
    #skin color range for hsv color space
    YCrCb_mask = cv2.inRange(img_YCrCb, (0, 135, 85), (255, 180, 135))
    #cv2.imshow('YCrCb_mask_before',YCrCb_mask)
    #cv2.imwrite('YCrCb_mask_before.jpg',YCrCb_mask)
    YCrCb_mask = cv2.morphologyEx(YCrCb_mask, cv2.MORPH_OPEN,
                                  np.ones((3, 3), np.uint8))
    #cv2.imshow('YCrCb_mask',YCrCb_mask)
    #cv2.imwrite('YCrCb_mask.jpg',YCrCb_mask)

    #merge skin detection (YCbCr and hsv)
    global_mask = cv2.bitwise_and(YCrCb_mask, HSV_mask)
    #cv2.imshow('global_mask_before',YCrCb_mask)
    cv2.imwrite('global_mask_befores.jpg', YCrCb_mask)
    global_mask = cv2.medianBlur(global_mask, 3)
    global_mask = cv2.morphologyEx(global_mask, cv2.MORPH_OPEN,
                                   np.ones((4, 4), np.uint8))
    #cv2.imshow('global_mask',YCrCb_mask)
    cv2.imwrite('global_mask.jpg', YCrCb_mask)

    return YCrCb_mask
Пример #6
0
def base():
    if request.method == 'GET':
        return "<h1>Crop AI</h1>"
    if request.method == 'POST':
        if 'InputImg' not in request.files:
            print("No file part")
            return redirect(request.url)
        file = request.files['InputImg']
        if file.filename == '':
            print('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filestr = request.files['InputImg'].read()
            img = cv2.imdecode(np.fromstring(filestr, np.uint8),
                               cv2.IMREAD_COLOR)

            img = cv2.resize(img, (96, 96), interpolation=cv2.INTER_AREA)

            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

            # find the green color
            mask_green = cv2.inRange(hsv, (36, 0, 0), (86, 255, 255))
            # find the brown color
            mask_brown = cv2.inRange(hsv, (8, 60, 20), (145, 255, 255))
            # find the yellow color in the leaf
            mask_yellow = cv2.inRange(hsv, (5, 42, 143), (145, 255, 255))
            # find the black color in the leaf
            mask_black = cv2.inRange(hsv, (100, 100, 100), (127, 127, 127))

            # find any of the four colors(green or brown or yellow or black) in the image
            mask = cv2.bitwise_or(mask_green, mask_brown)
            mask = cv2.bitwise_or(mask, mask_yellow)
            mask = cv2.bitwise_or(mask, mask_black)

            # Bitwise-AND mask and original image
            res = cv2.bitwise_and(img, img, mask=mask)

            # Gaussian blur with 3x3 kernel
            blur_img = cv2.GaussianBlur(res, (3, 3), 0)

            # Histogram equalization
            B, G, R = cv2.split(blur_img)
            output_R = cv2.equalizeHist(R)
            output_G = cv2.equalizeHist(G)
            output_B = cv2.equalizeHist(B)
            img = cv2.merge((output_R, output_G, output_B))

            img = img / 255

            img_array = np.expand_dims(img, axis=0)

            output = label_dictionary[model.predict(img_array)[0].argmax()]

        return output
Пример #7
0
def getArmorColor(raw_frame, rects):
    '''
    从机器人目标检测中筛选出所有蓝色方机器人目标
    :param rects: rects[0]=x_center, rects[1]=y_center, rects[2]=width, rects[3]=height
           raw_frame: 摄像头拍摄的原图
    :return: red_armour: 红色机器人目标所在的rects
    '''
    Red = 1
    Blue = 2
    Others = 3

    hsv = cv.cvtColor(raw_frame, cv.COLOR_BGR2HSV)
    #对原图处理 保留蓝色
    frame_blue = cv.inRange(hsv, (100, 43, 46), (124, 255, 255))
    #对原图处理 保留红色
    frame_red = cv.inRange(hsv, (156, 43, 46), (180, 255, 255))
    # #对原图处理 保留蓝色
    # frame_blue = cv.inRange(hsv,(130, 100, 0),(255, 255, 65))
    # #对原图处理 保留红色
    # frame_red = cv.inRange(hsv,(0,0,140),(70, 70, 255))

    # cv.imshow("frame_blue",frame_blue)
    # cv.imshow("frame_red",frame_red)

    armor_color = []

    threshold = 100

    for rect in rects:
        start_x, start_y, end_x, end_y = [int(i) for i in rect]

        blue_count = 0
        red_count = 0
        # start_x=rect[0]-rects[2]/2
        # end_x=rect[0]+rects[2]/2
        #
        # start_y=rect[1]-rects[3]/2
        # end_y=rect[1]+rects[3]/2

        for x in range(start_x, end_x):
            for y in range(start_y, end_y):
                if frame_blue[y][x] != 0:
                    blue_count = blue_count + 1
                if frame_red[y][x] != 0:
                    red_count = red_count + 1

        if (blue_count > threshold) and (blue_count > red_count):
            armor_color.append(Blue)
        elif (red_count > threshold) and (red_count > blue_count):
            armor_color.append(Red)
        else:
            armor_color.append(Others)

    return armor_color
Пример #8
0
def process():
    """
	turn the image into hsv and make red mask to do CHT
	"""

    image = cv2.imread("camImage.png")
    #blur to improve circle detection through noise
    #get HSV mapping for red values
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    #create mask for lower red HSV threshold
    mask_lower = cv2.inRange(hsv, RED_MIN_LOWER, RED_MAX_LOWER)

    #create mask for upper red HSV threshold
    mask_upper = cv2.inRange(hsv, RED_MIN_UPPER, RED_MAX_UPPER)

    #combine both masks
    mask = cv2.addWeighted(mask_lower, 1.0, mask_upper, 1.0, 0.0)

    kernel = np.ones((5, 5), np.uint8)

    #erode and dilate then dilate and erode
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

    cv2.imwrite("hsvMask.png", mask)

    #apply Gaussian Blur to improve detection and remove some noise
    blur = cv2.GaussianBlur(mask, (11, 11), 0)
    cv2.imwrite("blurredImg.png", blur)

    #perform CHT
    circles = cv2.HoughCircles(blur, cv2.cv.CV_HOUGH_GRADIENT, 1, 40, 800, 150,
                               20, 0)

    try:
        circles = np.uint16(np.around(circles))
    except AttributeError:
        print("No Circles Found! Adjust parameters of CHT.")

    try:
        for i in circles[0, :]:
            # draw the outer circle
            cv2.circle(image, (i[0], i[1]), i[2], (0, 255, 0), 2)
            # draw the center of the circle
            cv2.circle(image, (i[0], i[1]), 2, (0, 0, 255), 3)
    except TypeError:
        print("No Circles Found! Adjust parameters of CHT")

    #write out image with drawn circles
    cv2.imwrite("detectedCircles.png", image)
Пример #9
0
    def _filter_red(self, frame):
        hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_threshed1 = cv2.inRange(hsv_img, self._LOWER_RED_MIN,
                                      self._LOWER_RED_MAX)
        frame_threshed2 = cv2.inRange(hsv_img, self._UPPER_RED_MIN,
                                      self._UPPER_RED_MAX)
        frame_threshed_red = cv2.bitwise_or(frame_threshed1, frame_threshed2)

        # close gaps in red objects
        kernel = np.ones((5, 5), np.uint8)
        frame_threshed_red = cv2.morphologyEx(frame_threshed_red,
                                              cv2.MORPH_CLOSE, kernel)
        frame_threshed_red = cv2.bitwise_not(frame_threshed_red)
        return frame_threshed_red
Пример #10
0
def identify(image):

    mask_y = cv2.inRange(image, y_lower, y_upper)
    y = cv2.countNonZero(mask_y)

    mask_r = cv2.inRange(image, r_lower, r_upper)
    r = cv2.countNonZero(mask_r)

    print(y, r)

    if y > r:
        return "Banana"
    else:
        return "Apple"
Пример #11
0
def color_segmenter(frame):

    frame = np.array(frame)
    hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # low_colors = [[90, 100,50], [40, 100, 50], [0,100,50],[0,0,0],[0,0,230],[220,25,153]]
    # high_colors = [[120, 255,255], [60,255,255], [20, 255, 255],[180,255,20],[255,5,255],[240,50,178]]

    low_colors = [[90, 100, 50], [30, 85, 85], [0, 100, 50], [0, 0, 0],
                  [0, 0, 200], [0, 0, 66]]
    high_colors = [[120, 255, 255], [102, 255, 255], [20, 255, 255],
                   [180, 255, 20], [145, 60, 255], [255, 33, 200]]

    color_names = ["blue", "green", "red", "black", "white", "gray"]

    #dont change value without consulting in group
    #whitw-lower v-180

    ans = ("UNK", 0)

    path = "color-cars/"

    # red_low=[170,100,50]
    # red_high=[180,255,255]

    red_low = [160, 150, 90]
    red_high = [180, 255, 255]
    for x in range(len(color_names)):
        if (x == 2):
            mask1 = cv2.inRange(hsv_frame, np.array(low_colors[x]),
                                np.array(high_colors[x]))
            mask2 = cv2.inRange(hsv_frame, np.array(red_low),
                                np.array(red_high))
            color_mask = mask1 + mask2
        else:
            color_mask = cv2.inRange(hsv_frame, np.array(low_colors[x]),
                                     np.array(high_colors[x]))
        #color_region=cv2.bitwise_and(frame,frame, mask=color_mask)
        color_mask = np.array(color_mask)
        flatten_array = color_mask.flatten()
        ad = np.unique(flatten_array, return_counts=True)
        if (len(ad[1]) == 1):

            continue
        elif (ad[1][1] > ans[1]):
            ans = (color_names[x], ad[1][1])
        #cv2.imwrite(path + str(img_name) + "_" + str(color_names[x]) + str(".jpg"), color_mask)

    return (ans[0])
Пример #12
0
	def _get_balls(self, image, color, bounds, cam_id = 1):

		# converting the input stream into HSV color space
		hsv_conv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

		bright_mask = cv2.inRange(hsv_conv_img, bounds[0], bounds[1])

		blurred_mask = cv2.GaussianBlur(bright_mask, (9, 9), 3, 3)

		# some morphological operations (closing) to remove small blobs
		erode_element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
		dilate_element = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
		eroded_mask = cv2.erode(blurred_mask, erode_element)
		dilated_mask = cv2.dilate(eroded_mask, dilate_element)

		detected_circles = cv2.HoughCircles(dilated_mask, cv2.HOUGH_GRADIENT, **HOUGH_PARAMS)
		balls = []

		if detected_circles is not None:
			for circle in detected_circles[0, :]:
				x = circle[0]
				y = circle[1]
				r = circle[2]
				circled_orig = cv2.circle(image, (x, y), r, (0, 255, 0), thickness=2)
				
				# locate detected balls
				# https_www.pyimagesearch.com/?url=https%3A%2F%2Fwww.pyimagesearch.com%2F2015%2F01%2F19%2Ffind-distance-camera-objectmarker-using-python-opencv%2F
				distance = self.find_position(x, y, r, cam_id)
				balls.append((x, y, r, color))
			cv2.imshow('circled_orig', circled_orig)
			cv2.waitKey(0)

		return balls
Пример #13
0
def get_red_armour(raw_frame, rects):
    '''
    从机器人目标检测中筛选出所有红色方机器人目标
    :param rects: rects[0]=x_center, rects[1]=y_center, rects[2]=width, rects[3]=height
           raw_frame: 摄像头拍摄的原图
    :return: red_armour: 红色机器人目标所在的rects
    '''
    #对原图处理 保留红色
    frame = cv.inRange(raw_frame, (0, 0, 140), (70, 70, 255))
    red_armour = []
    threshold = 10

    for rect in rects:
        account = 0
        start_x = rect[0] - rects[2] / 2
        end_x = rect[0] + rects[2] / 2

        start_y = rect[1] - rects[3] / 2
        end_y = rect[1] + rects[3] / 2

        for x in range(start_x, end_x):
            for y in range(start_y, end_y):
                if frame[y][x] != 0:
                    account = account + 1
                if account >= threshold:
                    red_armour.append(rect)
    return red_armour
Пример #14
0
    def _get_balls(self, filename, color, cam_id):
        # print('looking for ' + color)
        image = cv2.imread(filename)

        bounds = COLOR_BOUNDS[color]

        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        bright = cv2.inRange(hsv, bounds[0], bounds[1])
        blur = cv2.GaussianBlur(bright, (9, 9), 3, 3)

        # cv2.imshow('circled_orig', blur)
        # cv2.waitKey(0)

        detected_circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,
                                            **HOUGH_PARAMS)
        balls = []

        if detected_circles is not None:
            for circle in detected_circles[0, :]:
                x = circle[0]
                y = circle[1]
                r = circle[2]
                # circled_orig = cv2.circle(image, (x, y), r, (0, 255, 0), thickness = 2)

                # locate detected balls
                distance = self._find_distance(x, y, r, cam_id)
                balls.append(distance)
                # balls.append((x, y, r, color))
                # balls.append()
            # cv2.imshow('circled_orig', circled_orig)
            # cv2.waitKey(0)
        # print('found')
        # print(balls)
        return balls
Пример #15
0
    def get_text(self, img, color=None, lowerb=None, upperb=None, reg=None):
        if color is None:
            color = 'black'

        if lowerb is None:
            lowerb = self.hsv_color[color][0]

        if upperb is None:
            upperb = self.hsv_color[color][1]

        temp = Image.fromarray(img)
        text = pytesseract.image_to_string(temp, config=self.TESSERACT_CONFIG)

        if len(img.shape) == 3 and img.shape[2] == 3:
            dilate_kernel = np.ones((2, 2), np.uint8)
            hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            bi_img = cv2.inRange(hsv_img, lowerb, upperb)
            bi_img = cv2.dilate(bi_img, dilate_kernel, iterations=1)
            bi_img = Image.fromarray(255 - bi_img)
            text += pytesseract.image_to_string(bi_img, config=self.TESSERACT_CONFIG)

        if reg is None:
            return text
        else:
            match = re.search(reg, text)
            if match is None:
                raise NotFound()
            else:
                return match
Пример #16
0
def line_slice(roi_kreuzung, dieser_frame, offset):
    """
    Zeichnet auf auf einem Schnit am gegebenen Y-Offset im Kamerabild die gefundenen
    Linienstückchen als grüne Rechtecke ein.
    Wird die eine Linie zu breit, wird das Rechteck rot eingefärbt.
    Das Zentrum aller Linienstückchen in einem Schnitt wird mit einem Punkt markiert.
    """
    yoffset = PIXEL_HOEHE - (PIXEL_HOEHE - 200) - 20 - offset
    # Horizontalen Streifen an y-Position "offset" herausschneiden
    roi_line = dieser_frame[PIXEL_HOEHE - 20 - offset:PIXEL_HOEHE - 1 - offset,
                            0:PIXEL_BREITE - 1]
    # in HSV-Farbschema umwandeln
    hsv = cv2.cvtColor(roi_line, cv2.COLOR_BGR2HSV)
    # Maske erstellen (Monochromes Bild: Linie weiß, alles andere schwarz)
    black_mask = cv2.inRange(hsv, black_lower_limit, black_upper_limit)
    # Konturen extrahieren
    _, konturen, _ = cv2.findContours(black_mask, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)
    cx = 0
    cy = yoffset + 5
    cnt = 0
    farbe = GRUEN
    # Liste der X-Positionen der Zentren der gefundenen Linienstücke
    ret = []
    is_kreuzung = False
    for kontur in konturen:
        # Rechteck um die Kontur erhalten
        x, y, w, h = cv2.boundingRect(kontur)
        # zu kleine Konturen wegfiltern
        if w > 10:
            # zu große Konturen rot einfärben
            if w > 150:
                farbe = ROT
                is_kreuzung = True
            # sonst grün einfärben
            else:
                farbe = GRUEN
            # Rechteck um die Kontur zeichnen
            cv2.rectangle(roi_kreuzung, (x, y + yoffset),
                          (x + w, y + yoffset + h), farbe, 2)
            # Summe aller x-Positionen der Zentren der gefundenen Rechtecke bilden
            cx = cx + int(x + w / 2)
            # Anzahl der gefundenen Rechecke mitzählen
            cnt = cnt + 1
            # Rote Rechtecke: X-Position ist 0 (Mitte des Kamerabildes)
            if is_kreuzung:
                ret.append(0)
            # Grüne Rechtecke: Abweichung von Bildmitte an Liste anfügen
            else:
                ret.append(cx - PIXEL_BREITE / 2)
    # keine Linienstücke gefunden: Durchnitt aller X-Positionen ist Bildmitte
    if cx is 0:
        cx = (PIXEL_BREITE - 1) / 2
    # Linienstückchen gefunden: Durchschnitt aller X-Positionen errechnen
    else:
        cx = cx / cnt
    # Kreis zeichnen an durchschnittlicher X-Position aller gefundenen Linienstückchen
    cv2.circle(roi_kreuzung, (int(cx), int(cy)), 5, farbe, -1)
    # Ergebnisliste zurückgeben: Liste der Abweichung der Linie von Mitte in Pixel
    return ret
Пример #17
0
def getCropMask(color, depth, hue):
    ''' 拿到掩模 '''
    ### H-[65 98] S-[33 255] V-[117 255] ###
    ## 原 [30,100,40]
    ##    [100,255,255]
    hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
    lower_g = np.array([hue-20,33,30])
    upper_g = np.array([hue+20,255,255])
    mask = cv2.inRange(hsv, lower_g, upper_g)
    mask = cv2.medianBlur(mask, 5)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

    ''' 去除掩模小的连通域 '''
    if(cv2.__version__[0] == '4'):
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    boundaries = []
    for con in contours:
        if(cv2.contourArea(con) > 1000):
            boundaries.append(con)
            cv2.drawContours(mask, [con], 0, 255, -1)
        else:
            cv2.drawContours(mask, [con], 0, 0, -1)
    
    ''' 将掩模与深度做与运算 '''
    depth_bin = np.uint8(depth>0)*255
    mask = cv2.bitwise_and(mask, depth_bin)
    return(mask)
Пример #18
0
def red_detect(frame):  # オレンジ色を検出し、画像加工を施す。
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    lower = (0, 230, 150)
    upper = (30, 255, 255)
    red = cv2.inRange(hsv, lower, upper)
    kernal = np.ones((5, 5), "uint8")
    red = cv2.dilate(red, kernal)
    res = cv2.bitwise_and(frame, frame, mask=red)
    (ret, contours, hierarchy) = cv2.findContours(
        red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    x = 0
    y = 0
    w = 0
    h = 0
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area > 100):
            x, y, w, h = cv2.boundingRect(contour)
            frame = cv2.rectangle(
                frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(frame, "RED color", (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))
            cv2.drawMarker(frame, (480, 350), (255, 255, 0),
                           markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10)
            cv2.drawMarker(frame, ((x + w//2), (y + h//2)), (255, 255, 0),
                           markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10)
            cv2.arrowedLine(frame, (480, 350),
                            ((x + w//2), (y + h//2)), (255, 0, 0), 5)
            cv2.rectangle(frame, (330, 200), (630, 500), (0, 255, 0), 1)
    return frame, x, y, w, h  # 動画データとピクセル(x,y,z,h)を返す
Пример #19
0
def img_color_filter(img):
    hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
    # color orange
    low_hsv = np.array([11, 43, 46])
    up_hsv = np.array([25, 255, 255])
    mask = cv.inRange(hsv, lowerb=low_hsv, upperb=up_hsv)
    cv.imshow("filter", mask)
def get_balls(image, color, bounds, camera):

    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    bright = cv2.inRange(hsv, bounds[0], bounds[1])
    blur = cv2.GaussianBlur(bright, **GAUSS_PARAMS)

    # detected_circles = cv2.HoughCircles(blurred_mask, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
    detected_circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,
                                        **HOUGH_PARAMS)
    balls = []

    if detected_circles is not None:
        for circle in detected_circles[0, :]:
            x = circle[0]
            y = circle[1]
            r = circle[2]

            # locate detected balls
            position, distance = find_position(x, y, r, camera)
            circled_orig = cv2.circle(image, (x, y),
                                      r, (0, 255, 0),
                                      thickness=1)

            font = cv2.FONT_HERSHEY_SIMPLEX
            # x = x - 10
            print(x, y)
            cv2.putText(circled_orig, str(format(distance, '.2f')), (x, y),
                        font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)

            balls.append((x, y, r, color))

        display_image(circled_orig)

    return balls
Пример #21
0
def detectColor():
    def empty(a):
        pass

    cv2.namedWindow("TrackBars")
    cv2.resizeWindow("TrackBars", 640, 240)
    cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty)
    cv2.createTrackbar("Sat Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Val Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Hue Max", "TrackBars", 179, 179, empty)
    cv2.createTrackbar("Sat Max", "TrackBars", 255, 255, empty)
    cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)

    while True:
        img = cv2.imread("./lambo.jpeg")
        imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
        h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
        s_min = cv2.getTrackbarPos("Sat  Min", "TrackBars")
        s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
        v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
        v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
        #print(v_min)
        # lower1 = np.array([h_min, s_min, v_min])
        # upper1 = np.array([h_max, s_max, v_max])
        lower = np.array([000, 000, 186])
        upper = np.array([179, 255, 255])
        mask = cv2.inRange(imgHSV, lower, upper)
        imgResult = cv2.bitwise_and(img, img, mask=mask)

        cv2.imshow("Original", img)
        cv2.imshow("HSV", imgHSV)
        cv2.imshow("Mask", mask)
        cv2.imshow("Result", imgResult)
        cv2.waitKey(0)
Пример #22
0
 def detect_red(self, img):
     thresh = cv2.inRange(img, (0, 0, 100), (10, 10, 255))
     if (sum(sum(thresh)) == 0):  #If it is obscured
         return None  #Return none
     kernel = np.ones((5, 5), np.uint8)
     result = cv2.dilate(thresh, kernel, iterations=3)
     return self.getCoM(result)  #Positions returned
Пример #23
0
 def get_white(src):
     # 白色
     lower_white = np.array([0, 0, 221])
     upper_white = np.array([180, 45, 255])
     hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
     mask = cv2.inRange(hsv, lower_white, upper_white)
     return mask
Пример #24
0
def select_yellow(image):
    hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    lower = np.array([20, 60, 60])
    upper = np.array([38, 174, 250])
    mask = cv2.inRange(hsv, lower, upper)

    return mask
def dupImageByColor(in_frame, hue, sat, val, torg, t):
    out_img = cv2.cvtColor(in_frame, cv2.COLOR_BGR2HSV)
    #
    out_img = cv2.inRange(out_img, (hue[0], sat[0], val[0]),
                          (hue[1], sat[1], val[1]))
    out_img = cv2.blur(out_img, (3, 3))
    #
    # masking out the area that we wanted by just taking an entire slice of the area and leave it in the 'out' image
    #out_img = out_img[_y1:_y2, _x1:_x2]
    # also draw this slice on the original image in Red (BGR)
    #cv2.rectangle(out_img, _mask_p1, _mask_p2, (0,0,255), 1)

    # Now, let's find the contour - we only keeping/using contours for this time
    contours, _ = cv2.findContours(out_img,
                                   mode=cv2.RETR_EXTERNAL,
                                   method=cv2.CHAIN_APPROX_SIMPLE)

    maxcontourarea = 0.0
    if len(contours) > 0:
        c = max(contours, key=cv2.contourArea)
        maxcontourarea = cv2.contourArea(c)
    #for c in contours:
    #    cv2.drawContours(out_img, c, 0, (0,150,150), 1)
    # out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
    # cv2.putText(in_frame, str(t) + ": " + str(maxcontourarea), torg, cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))

    return (out_img, maxcontourarea)
Пример #26
0
def AnalisFrame(color, rango_bajos, rango_altos):

    Pcolor = 0
    PorcentajeBlack = 0

    #Detectamos los píxeles que estén dentro del rango que hemos establecido:
    mask = cv2.inRange(hsv, rango_bajos, rango_altos)
    #print(len(mask))

    #Mostramos la imagen original y la máscara:
    #cv2.imshow("Original", img)

    #Mostramos la imagen original y la máscara:
    #cv2.imshow(color, mask)

    cv2.imwrite('messigray.png', mask)

    foto = Image.open('messigray.png')
    datos = list(foto.getdata())
    for dato in datos:
        if (int(dato) > 0):
            #print(dato)
            Pcolor += 1
        else:
            PorcentajeBlack += 1
    foto.close()

    Porcentajecolor = Pcolor * 100 / (Pcolor + PorcentajeBlack)
    AnalisisColor = [color, Porcentajecolor]

    AnalisListColor.append(AnalisisColor)
    PorcentajePixel.append(Porcentajecolor)

    # Resultados Por scala
    """
Пример #27
0
def find_game_frame(screen):
    global game_frame

    img = np.array(screen)
    thresh = cv2.inRange(img, WINDOW_BORDER_COLOR, WINDOW_BORDER_COLOR)
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        return

    window_border = max(contours, key=cv2.contourArea)
    x0, y0, w, h = cv2.boundingRect(window_border)
    window_crop = thresh[y0:y0 + h, x0:x0 + w]
    window_thresh = cv2.bitwise_not(window_crop)
    contours, _ = cv2.findContours(window_thresh, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        return

    game_border = max(contours, key=cv2.contourArea)
    x1, y1, w, h = cv2.boundingRect(game_border)
    if w * h < 540 * 405:
        return

    game_frame = (x0 + x1, y0 + y1, w, h)
Пример #28
0
def findend():
    global endpoint

    hsv_image = cv2.cvtColor(img_maze, cv2.COLOR_BGR2HSV)
  
   # define range of red color in HSV
    lower_red = np.array([0,100,100])
    upper_red = np.array([135,255,255])
   
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv_image, lower_red, upper_red)
    

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(hsv_image,hsv_image, mask= mask)
    cimg=res
    res=cv2.cvtColor(res, cv2.COLOR_RGB2GRAY)

    circles = cv2.HoughCircles(res,cv2.HOUGH_GRADIENT,1,30,param1=50,param2=10,minRadius=0,maxRadius=0)
    #circles = np.uint16(np.around(circles))

    for i in circles[0,:]:
    # draw the outer circle
        cv2.circle(cimg,(i[0],i[1]),i[2],(255,0,0),2)
     # draw the center of the circle
        cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
        endpoint = (i[0],i[1])
Пример #29
0
 def maskDisplay(self, hsvarr, img):
     imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
     lower = np.array([hsvarr[0], hsvarr[2], hsvarr[4]])
     upper = np.array([hsvarr[1], hsvarr[3], hsvarr[5]])
     mask = cv2.inRange(imgHSV, lower, upper)
     imgresult = cv2.bitwise_and(img, img, mask=mask)
     cv2.imshow("Masked Feed", imgresult)
Пример #30
0
    def traitement_image(img_bw):
        global Photo_traitee

        global canvas_photoTraitee
        global hsv
        couleur = "#3F875C"

        hsv = [75, 130, 90]
        H = hsv[0]
        S = hsv[1]
        V = hsv[2]

        HMin = H - 15
        HMax = H + 15
        SMin = S - 70
        SMax = S + 70
        VMin = V - 90
        VMax = V + 90
        minHSV = np.array([HMin, SMin, VMin])
        maxHSV = np.array([HMax, SMax, VMax])
        #img = cv2.imread(img_bw)
        imageHSV = cv2.cvtColor(img_bw, cv2.COLOR_BGR2HSV)
        maskHSV = cv2.inRange(imageHSV, minHSV, maxHSV)
        resultHSV = cv2.bitwise_and(img_bw, img_bw, mask=maskHSV)
        img_gray = cv2.cvtColor(resultHSV, cv2.COLOR_RGB2GRAY)
        (thresh, img_bw) = cv2.threshold(img_gray, 128, 255,
                                         cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        #path_file=('static/%s.jpg' %uuid.uuid4().hex),
        cv2.imwrite('static/photo.jpg', img_bw)
        return json.dumps('static/photo.jpg')