Ejemplo n.º 1
0
def get_cityscapes_rm(img, semantics, size, C):
    """Applies one adaptive filter to find road markings"""
    semantics_road = (128, 64, 128)
    mask = cv2.inRange(semantics, semantics_road, semantics_road)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.bitwise_and(img, img, mask=mask)
    # thr_otsu = masked_otsu_threshold(img, mask)
    # _, thr_global = cv2.threshold(img, thr_otsu, 255, cv2.THRESH_BINARY)
    thr_adaptive = masked_adaptive_threshold(img,
                                             mask,
                                             max_value=255,
                                             size=size,
                                             C=C)
    # thr_adaptive = cv2.convertScaleAbs(thr_adaptive)
    # thr_combined = cv2.bitwise_and( thr_adaptive, thr_adaptive, mask=thr_global)
    thr_adaptive = cv2.bitwise_and(thr_adaptive, thr_adaptive, mask=mask)

    # plt.figure()
    # plt.subplot(3,1,1)
    # plt.imshow(thr_global)
    # plt.subplot(3,1,2)
    # plt.imshow(thr_adaptive)
    # plt.subplot(3,1,3)
    # plt.imshow(thr_combined)

    return thr_adaptive
Ejemplo n.º 2
0
def get_rm_shadow(img, roi):
    # convert to hsv
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # Gaussian blur
    hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
    # get saturation
    _, S, _ = cv2.split(hsv)
    # threshold saturation, shadows have higher sat than road
    _, shadow_mask = cv2.threshold(S, 70, 255, cv2.THRESH_BINARY)
    # get rid of noise
    kernel = np.ones((7, 7), np.uint8)
    shadow_mask = cv2.morphologyEx(shadow_mask,
                                   cv2.MORPH_CLOSE,
                                   kernel,
                                   iterations=1)
    # mask roi
    shadow_mask = cv2.bitwise_and(shadow_mask, roi)
    # get shadows from img
    shadows = cv2.bitwise_and(img, img, mask=shadow_mask)
    # make shadows grayscale
    shadows_gray = cv2.cvtColor(shadows, cv2.COLOR_BGR2GRAY)
    # get road markings in shadow
    _, rm_mask = cv2.threshold(shadows_gray, 100, 255, cv2.THRESH_BINARY)
    # clean up noise
    kernel = np.ones((2, 2), np.uint8)
    rm_mask = cv2.morphologyEx(rm_mask, cv2.MORPH_OPEN, kernel, iterations=1)
    # increase mask size a bit
    kernel = np.ones((3, 3), np.uint8)
    rm_mask = cv2.dilate(rm_mask, kernel)

    return rm_mask
Ejemplo n.º 3
0
def rm_otsu_sunshade(img, roi, config):
    shadow = get_shadow(img, roi)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # get threshold for shadow region
    # print('thr_in_shadow')
    thr_in_shadow = masked_otsu_threshold(img_gray, shadow)
    # get road markings in shadow
    _, rm_in_shadow = cv2.threshold(img_gray, thr_in_shadow, 255,
                                    cv2.THRESH_BINARY)
    # kernel_erode = np.ones((7,7), np.uint8)
    # shadow_eroded = cv2.erode(roi_shadow, kernel_erode)
    rm_in_shadow = cv2.bitwise_and(rm_in_shadow, rm_in_shadow, mask=shadow)
    # get threshold for sunlight region
    shadow_inv = cv2.bitwise_not(shadow)
    shadow_inv = cv2.bitwise_and(shadow_inv, shadow_inv, mask=roi)
    thr_out_shadow = masked_otsu_threshold(img_gray, shadow_inv)
    # get road markings not in shadow
    _, rm_out_shadow = cv2.threshold(img_gray, int(
        (thr_out_shadow * 1.5) % 255), 255, cv2.THRESH_BINARY)
    # rm_out_shadow = cv2.bitwise_and(rm_out_shadow, rm_out_shadow, mask=shadow_inv)
    # combine markings in shadow and not in shadow
    rm = cv2.bitwise_or(rm_in_shadow, rm_out_shadow)
    rm = cv2.bitwise_and(rm, rm, mask=roi)

    return rm
Ejemplo n.º 4
0
def overlay_images(img1, img2, mask):
    # img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    # ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)
    img1_bg = cv2.bitwise_and(img1, img1, mask=mask_inv)
    img2_fg = cv2.bitwise_and(img2, img2, mask=mask)
    dst = cv2.add(img1_bg, img2_fg)

    return dst
Ejemplo n.º 5
0
def quitarFondo():
    cap = cv2.imread(
        r'/home/juan-rios/Documentos/python/trackMove/original/foto.png', 1)
    newImg = cv2.resize(cap, (550, 350))
    print(cap.shape)
    panel = np.zeros([650, 1120], np.uint8)
    cv2.namedWindow('panel')

    def nothing(x):
        pass

    cv2.createTrackbar('L - h', 'panel', 0, 179, nothing)
    cv2.createTrackbar('U - h', 'panel', 179, 179, nothing)
    cv2.createTrackbar('L - s', 'panel', 0, 255, nothing)
    cv2.createTrackbar('U - s', 'panel', 255, 255, nothing)
    cv2.createTrackbar('L - v', 'panel', 0, 255, nothing)
    cv2.createTrackbar('U - v', 'panel', 255, 255, nothing)
    cv2.createTrackbar('S ROWS', 'panel', 0, 480, nothing)
    cv2.createTrackbar('E ROWS', 'panel', 480, 480, nothing)
    cv2.createTrackbar('S COL', 'panel', 0, 640, nothing)
    cv2.createTrackbar('E COL', 'panel', 640, 640, nothing)
    while True:

        frame = newImg[0:650, 0:1120]
        s_r = cv2.getTrackbarPos('S ROWS', 'panel')
        e_r = cv2.getTrackbarPos('E ROWS', 'panel')
        s_c = cv2.getTrackbarPos('S COL', 'panel')
        e_c = cv2.getTrackbarPos('E COL', 'panel')
        roi = frame[s_r:e_r, s_c:e_c]
        hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        l_h = cv2.getTrackbarPos('L - h', 'panel')
        u_h = cv2.getTrackbarPos('U - h', 'panel')
        l_s = cv2.getTrackbarPos('L - s', 'panel')
        u_s = cv2.getTrackbarPos('U - s', 'panel')
        l_v = cv2.getTrackbarPos('L - v', 'panel')
        u_v = cv2.getTrackbarPos('U - v', 'panel')
        lower_green = np.array([l_h, l_s, l_v])
        upper_green = np.array([u_h, u_s, u_v])
        mask = cv2.inRange(hsv, lower_green, upper_green)
        mask_inv = cv2.bitwise_not(mask)
        bg = cv2.bitwise_and(roi, roi, mask=mask)
        fg = cv2.bitwise_and(roi, roi, mask=mask_inv)
        cv2.imshow('filtro', bg)
        cv2.imshow('camara', fg)
        #cv2.imshow('panel', panel)
        cv2.imwrite(
            r'/home/juan-rios/Documentos/python/trackMove/sin_fondo/foto_sin_fondo.png',
            fg)
        k = cv2.waitKey(30) & 0xFF
        if k == 27:
            break
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
def get_cityscapes_rm_da(img, semantics):
    """Applies two adaptive filters to find road markings"""

    semantics_road = (128, 64, 128)
    mask_road = cv2.inRange(semantics, semantics_road, semantics_road)

    # cv2.imwrite('temp/cityscapes_road_mask.png', mask_road)

    height = img.shape[0]
    width = img.shape[1]
    vertices = np.array([[(0, height), (0, int(height * 3 / 4)),
                          (width, int(height * 3 / 4)), (width, height)]])
    mask_bottom = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
    cv2.fillPoly(mask_bottom, vertices, 255)
    mask_top = cv2.bitwise_not(mask_bottom)

    mask_road_top = cv2.bitwise_and(mask_road, mask_road, mask=mask_top)
    mask_road_bottom = cv2.bitwise_and(mask_road, mask_road, mask=mask_bottom)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # img = cv2.bitwise_and(img, img, mask=mask)
    thr_adaptive_top = masked_adaptive_threshold(img,
                                                 mask_road_top,
                                                 max_value=255,
                                                 size=101,
                                                 C=-15)
    thr_adaptive_top = cv2.convertScaleAbs(thr_adaptive_top)
    thr_adaptive_top = cv2.bitwise_and(thr_adaptive_top,
                                       thr_adaptive_top,
                                       mask=mask_road_top)
    thr_adaptive_bottom = masked_adaptive_threshold(img,
                                                    mask_road_bottom,
                                                    max_value=255,
                                                    size=251,
                                                    C=-15)
    thr_adaptive_bottom = cv2.convertScaleAbs(thr_adaptive_bottom)
    thr_adaptive_bottom = cv2.bitwise_and(thr_adaptive_bottom,
                                          thr_adaptive_bottom,
                                          mask=mask_road_bottom)
    thr_adaptive_combined = cv2.bitwise_or(thr_adaptive_top,
                                           thr_adaptive_bottom)

    plt.figure()
    plt.subplot(3, 1, 1)
    plt.imshow(img, cmap='gray')
    plt.subplot(3, 1, 2)
    plt.imshow(thr_adaptive_top, cmap='gray')
    plt.subplot(3, 1, 3)
    plt.imshow(thr_adaptive_bottom, cmap='gray')

    return thr_adaptive_combined
Ejemplo n.º 7
0
def createMask2(image):
    pic = image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    edged = cv2.Canny(gray, 50, 100)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
    closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
    (_, cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255
    for c in cnts:
        cv2.drawContours(mask, [c], -1, 0, -1)
    mask_inv = cv2.bitwise_not(mask)
    image = cv2.bitwise_and(image, image, mask=mask)
    image2 = cv2.bitwise_and(pic, pic, mask=mask_inv)
    return whiteBackground(image2)
Ejemplo n.º 8
0
def produce_frames(frames):
    bg = np.array(Image.open('bg.jpg'))
    bg = cv2.cvtColor(bg, cv2.COLOR_BGR2RGB)
    bg_s = np.array(Image.open('bg_s.jpg'))
    bg_s = cv2.cvtColor(bg_s, cv2.COLOR_BGR2RGB)

    frames_generated = []
    for frame in frames:
        res1 = cv2.bitwise_and(frame, bg)
        res2 = cv2.bitwise_and(cv2.bitwise_not(frame), bg_s)
        res = cv2.bitwise_xor(res1, res2)

        frames_generated.append(res)

    return frames_generated
Ejemplo n.º 9
0
def red_detect(frame):  # オレンジ色を検出し、画像加工を施す。
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    lower = (0, 230, 150)
    upper = (30, 255, 255)
    red = cv2.inRange(hsv, lower, upper)
    kernal = np.ones((5, 5), "uint8")
    red = cv2.dilate(red, kernal)
    res = cv2.bitwise_and(frame, frame, mask=red)
    (ret, contours, hierarchy) = cv2.findContours(
        red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    x = 0
    y = 0
    w = 0
    h = 0
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area > 100):
            x, y, w, h = cv2.boundingRect(contour)
            frame = cv2.rectangle(
                frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(frame, "RED color", (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))
            cv2.drawMarker(frame, (480, 350), (255, 255, 0),
                           markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10)
            cv2.drawMarker(frame, ((x + w//2), (y + h//2)), (255, 255, 0),
                           markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10)
            cv2.arrowedLine(frame, (480, 350),
                            ((x + w//2), (y + h//2)), (255, 0, 0), 5)
            cv2.rectangle(frame, (330, 200), (630, 500), (0, 255, 0), 1)
    return frame, x, y, w, h  # 動画データとピクセル(x,y,z,h)を返す
Ejemplo n.º 10
0
def detectColor():
    def empty(a):
        pass

    cv2.namedWindow("TrackBars")
    cv2.resizeWindow("TrackBars", 640, 240)
    cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty)
    cv2.createTrackbar("Sat Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Val Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Hue Max", "TrackBars", 179, 179, empty)
    cv2.createTrackbar("Sat Max", "TrackBars", 255, 255, empty)
    cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)

    while True:
        img = cv2.imread("./lambo.jpeg")
        imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
        h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
        s_min = cv2.getTrackbarPos("Sat  Min", "TrackBars")
        s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
        v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
        v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
        #print(v_min)
        # lower1 = np.array([h_min, s_min, v_min])
        # upper1 = np.array([h_max, s_max, v_max])
        lower = np.array([000, 000, 186])
        upper = np.array([179, 255, 255])
        mask = cv2.inRange(imgHSV, lower, upper)
        imgResult = cv2.bitwise_and(img, img, mask=mask)

        cv2.imshow("Original", img)
        cv2.imshow("HSV", imgHSV)
        cv2.imshow("Mask", mask)
        cv2.imshow("Result", imgResult)
        cv2.waitKey(0)
Ejemplo n.º 11
0
def roi(img, vertices):
    mask = np.zeros_like(img)
    #channel_count = img.shape[2]
    match_mask_color = 255
    cv2.fillPoly(mask, vertices, match_mask_color)
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
Ejemplo n.º 12
0
 def addTwoImgs(self, img1_RGBA, face_param):
     #将贴纸贴到图片上
     self.img = cv2.imread(self.path)
     self.img = cv2.resize(self.img,
                           (int(face_param[2] / 1), int(face_param[2] / 1)),
                           interpolation=cv2.INTER_CUBIC)
     try:
         self.rows, self.cols = self.img.shape[:2]
     except:
         NoteLabel.config(text='Fail in loading sticker!')
     self.getStickerPosition(face_param)
     if self.x1 >= 0 and self.x2 <= img1_RGBA.shape[
             1] and self.y1 >= 0 and self.y2 <= img1_RGBA.shape[0]:
         #制作掩膜
         roi = img1_RGBA[self.y1:self.y2, self.x1:self.x2]
         sticker_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
         ret, mask = cv2.threshold(sticker_gray, 10, 255, cv2.THRESH_BINARY)
         del ret  #没什么意义,不想出现黄色报错而已
         mask_inv = cv2.bitwise_not(mask)
         img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
         self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGBA)
         dst = cv2.add(img1_bg, self.img)
         img1_RGBA[self.y1:self.y2, self.x1:self.x2] = dst
         return True, img1_RGBA
     else:
         NoteLabel.config(text="No enough space for stickers!")
         return False, None
Ejemplo n.º 13
0
def getCropMask(color, depth, hue):
    ''' 拿到掩模 '''
    ### H-[65 98] S-[33 255] V-[117 255] ###
    ## 原 [30,100,40]
    ##    [100,255,255]
    hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
    lower_g = np.array([hue-20,33,30])
    upper_g = np.array([hue+20,255,255])
    mask = cv2.inRange(hsv, lower_g, upper_g)
    mask = cv2.medianBlur(mask, 5)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

    ''' 去除掩模小的连通域 '''
    if(cv2.__version__[0] == '4'):
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    boundaries = []
    for con in contours:
        if(cv2.contourArea(con) > 1000):
            boundaries.append(con)
            cv2.drawContours(mask, [con], 0, 255, -1)
        else:
            cv2.drawContours(mask, [con], 0, 0, -1)
    
    ''' 将掩模与深度做与运算 '''
    depth_bin = np.uint8(depth>0)*255
    mask = cv2.bitwise_and(mask, depth_bin)
    return(mask)
    def fit(self, image, label):
        cv2.namedWindow('img')
        cv2.setMouseCallback('img', self.draw_mask)
        color_img = cv2.cvtColor(self.cv_img, cv2.COLOR_GRAY2BGR)
        font = cv2.FONT_HERSHEY_SIMPLEX

        while True:
            k = cv2.waitKey(1) & 0xFF
            if k == 27:
                break
            elif k == ord('d') or k == ord('D'):
                self.redraw_grab_cut(color_img)

            mask2 = np.where((self.mask == 1) + (self.mask == 3), 255,
                             0).astype('uint8')
            out = self.cv_img2.copy()
            cv2.putText(out, f'DSC: {self.score}', (15, 50), font, 1,
                        (255, 0, 0), 1, cv2.LINE_AA)
            output = cv2.bitwise_and(self.cv_img, self.cv_img, mask=mask2)
            cv_label = (self.label * 255).astype(np.uint8)

            cv2.imshow('img', out)
            cv2.imshow('label', cv_label)
            cv2.imshow('output', output)

        cv2.destroyAllWindows()
Ejemplo n.º 15
0
def masked_to_shape(image: np.array, shape: [Point]) -> np.array:
    """Masks the image to the given shape."""
    mask = np.zeros(image.shape, dtype=np.uint8)
    channel_count = image.shape[2] if is_colored(image) else 1
    mask_color = (255, ) * channel_count
    cv2.fillPoly(mask, Point.to_numpy(shape), mask_color)
    return cv2.bitwise_and(image, mask)
Ejemplo n.º 16
0
    def _rank_lzs(self,
                  lzsProposals,
                  riskMap,
                  obstacles,
                  weightDist=5,
                  weightRisk=15,
                  weightOb=5):
        for lz in lzsProposals:
            riskFactor, distanceFactor, obFactor = 0, 0, 0
            lzRad = lz.get("radius")
            lzPos = lz.get("position")
            mask = np.zeros_like(riskMap)
            mask = cv.circle(mask, (lzPos[0], lzPos[1]), lzRad,
                             (255, 255, 255), -1)
            areaLz = math.pi * lzRad * lzRad
            crop = cv.bitwise_and(riskMap, mask)

            if weightRisk != 0:
                riskFactor = self._risk_map_eval_basic(crop, areaLz)
            if weightDist != 0:
                distanceFactor = self.getDistanceCenter(
                    riskMap, (lzPos[0], lzPos[1]))
            if weightOb != 0:
                obFactor = self._dist_to_obs(lz, obstacles, riskMap)

            if lz["confidence"] is math.nan:
                lz["confidence"] = abs(
                    (weightRisk * riskFactor + weightDist * distanceFactor +
                     weightOb * obFactor) /
                    (weightRisk + weightDist + weightOb))
                xxx = 0

        lzsSorted = sorted(lzsProposals, key=lambda k: k["confidence"])
        return lzsSorted
Ejemplo n.º 17
0
def setStandardValues(base64Image, values):

	d = []
	for i in values:
		d.append(int(i['value']))
	(h_min, h_max, s_min, s_max, v_min, v_max, threshold1, threshold2, area_min) = tuple(d)

	img_str = base64.b64decode(base64Image)

	nparr = np.fromstring(img_str, np.uint8)
	image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
	imgHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
	lower = np.array([h_min, s_min, v_min])
	upper = np.array([h_max, s_max, v_max])
	mask = cv2.inRange(imgHSV, lower, upper)
	result = cv2.bitwise_and(image, image, mask = mask)
	mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
	imgCanny = cv2.Canny(mask, threshold1, threshold2)
	kernel = np.ones((5,5))
	imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
	found, standardHeight = getContour(imgDil, image)

	img_str = cv2.imencode('.png', imgHSV)[1].tobytes()
	base64ImageReturn = base64.b64encode(img_str) 
	
	imgD_str = cv2.imencode('.png', result)[1].tobytes()
	base64ImgDilReturn = base64.b64encode(imgD_str)

	return base64ImageReturn, base64ImgDilReturn
def extract(image):
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    low = np.array([0, 100, 250])
    high = np.array([179, 255, 255])
    mask_fore = cv2.inRange(hsv, low, high)
    mask_back = cv2.bitwise_not(mask_fore)

    kernel = np.ones((11, 11), np.float32) / 121
    fltr1_f_dil = cv2.dilate(mask_fore, kernel, iterations=1)
    fltr1_f_bor = cv2.bitwise_and(mask_back, mask_back, mask=fltr1_f_dil)

    contours, hierarchy = cv2.findContours(fltr1_f_bor, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
    contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
    i = 0

    save_path = os.path.join(os.getcwd(), IMG_DES)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        if w > 50 and h > 50:
            p = os.path.join(save_path, "{}.png".format(str(i)))
            cv2.imwrite(p, pad_image(fltr1_f_bor[y:y + h, x:x + w]))
            i = i + 1
Ejemplo n.º 19
0
def skin_extract(image):

    img_HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    #skin color range for hsv color space
    HSV_mask = cv2.inRange(img_HSV, (0, 15, 0), (17, 170, 255))
    #cv2.imshow('HSV mask_before',HSV_mask)
    #cv2.imwrite('HSV mask_before.jpg',HSV_mask)
    HSV_mask = cv2.morphologyEx(HSV_mask, cv2.MORPH_OPEN,
                                np.ones((3, 3), np.uint8))
    #cv2.imshow('HSV mask',HSV_mask)
    #cv2.imwrite('HSV mask.jpg',HSV_mask)
    #converting from gbr to YCbCr color space
    img_YCrCb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
    #skin color range for hsv color space
    YCrCb_mask = cv2.inRange(img_YCrCb, (0, 135, 85), (255, 180, 135))
    #cv2.imshow('YCrCb_mask_before',YCrCb_mask)
    #cv2.imwrite('YCrCb_mask_before.jpg',YCrCb_mask)
    YCrCb_mask = cv2.morphologyEx(YCrCb_mask, cv2.MORPH_OPEN,
                                  np.ones((3, 3), np.uint8))
    #cv2.imshow('YCrCb_mask',YCrCb_mask)
    #cv2.imwrite('YCrCb_mask.jpg',YCrCb_mask)

    #merge skin detection (YCbCr and hsv)
    global_mask = cv2.bitwise_and(YCrCb_mask, HSV_mask)
    #cv2.imshow('global_mask_before',YCrCb_mask)
    cv2.imwrite('global_mask_befores.jpg', YCrCb_mask)
    global_mask = cv2.medianBlur(global_mask, 3)
    global_mask = cv2.morphologyEx(global_mask, cv2.MORPH_OPEN,
                                   np.ones((4, 4), np.uint8))
    #cv2.imshow('global_mask',YCrCb_mask)
    cv2.imwrite('global_mask.jpg', YCrCb_mask)

    return YCrCb_mask
Ejemplo n.º 20
0
def zone_mask(img, vertices):
    # blank mask:
    mask = np.zeros_like(img)
    # fill the mask
    cv2.fillPoly(mask, vertices, 255)
    # now only show the area that is the mask
    return cv2.bitwise_and(img, mask)
Ejemplo n.º 21
0
def region_of_interest(image):
    height = image.shape[0]
    polygons = np.array([(200, height), (1100, height), (550, 250)])
    mask = np.zeros_like(image)
    cv2.fillPoly(mask, np.array([polygons], dtype=np.int32), 255)
    masked_image = cv2.bitwise_and(image, mask)
    return masked_image
Ejemplo n.º 22
0
    def cartoon(self, image):
        '''
        Cartoonise Image!

        Datatypes: image:nparray format BGR
        
        '''
        numdown, numbilateral = 2, 7
        color = image
        for _ in range(numdown):
            color = cv2.pyrDown(color)
        for _ in range(numbilateral):
            color = cv2.bilateralFilter(color, d=9, sigmaColor=9, sigmaSpace=7)
        for _ in range(numdown):
            color = cv2.pyrUp(color)
        cartoon = cv2.bitwise_and(
            color,
            cv2.cvtColor(
                cv2.adaptiveThreshold(cv2.medianBlur(
                    cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), 7),
                                      255,
                                      cv2.ADAPTIVE_THRESH_MEAN_C,
                                      cv2.THRESH_BINARY,
                                      blockSize=9,
                                      C=2), cv2.COLOR_GRAY2RGB))
        cartoon = cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB)
        return cartoon
Ejemplo n.º 23
0
def logic_demo(m1, m2):
    dst1 = cv.bitwise_and(m1, m2)
    dst2 = cv.bitwise_or(m1, m2)
    dst3 = cv.bitwise_not(m1)
    cv.imshow("and", dst1)
    cv.imshow("or", dst2)
    cv.imshow("not", dst3)
Ejemplo n.º 24
0
    def __findNumberPlate__(self, image):
        ''' Handles all the pre-processing of image before passing it to Tesseract OCR '''
        resizedImage = imutils.resize(image, width=1000)
        grayImage = cv2.cvtColor(resizedImage, cv2.COLOR_BGR2GRAY)
        filteredImage = cv2.bilateralFilter(grayImage, 11, 17, 17)

        cannyEdges = cv2.Canny(filteredImage, 170, 200)
        contours, _ = cv2.findContours(cannyEdges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        sortedContours = sorted(contours, key=cv2.contourArea, reverse=True)[:30]
        #cv2.drawContours(cannyEdges, sortedContours, 0, 255, -1)
        #cv2.imshow("Top 30 Contours", cannyEdges)          #Show the top 30 contours.
        #cv2.waitKey(0)

        NumberPlateCount = 0
        for contour in sortedContours:
            perimeter = cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, 0.02*perimeter, True)
            if len(approx) == 4:
                NumberPlateCount = approx
                break

        imageMask = np.zeros(grayImage.shape, np.uint8)
        x = cv2.drawContours(imageMask, [NumberPlateCount], 0, 255, -1)
        finalImage = cv2.bitwise_and(resizedImage, resizedImage, mask=imageMask)
        cv2.imwrite('numberPlateImage.jpg', finalImage)
        #finalGrayImage = cv2.cvtColor(finalImage, cv2.COLOR_BGR2GRAY)
        _, thresh2 = cv2.threshold(finalImage, 127, 255, cv2.THRESH_BINARY)
        cv2.imshow("Detected Number Plate", imutils.resize(finalImage, width=200))
        cv2.waitKey(0)
        return thresh2
def focus_zone(img, focus_zone):
    mask = np.zeros_like(img)
    channel_count = img.shape[2]
    match_mask_color = (255, ) * channel_count
    cv2.fillPoly(mask, focus_zone, match_mask_color)
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
Ejemplo n.º 26
0
def roi(img):
    """
    Lấy ROI của ảnh:
    Ảnh có kích thước (x, y) - (width, height)
    ROI là hình chữ nhật 4 points a, b, c, d:
        - a là điểm bên trái phía dưới
        - b là điểm bên phải phía dưới
        - c là điểm bên phải phía trên
        - d là điểm bên trái phía trên
    shape là array chứa 4 điểm HCN(a, b, c, d) - kích thước của ROI

    muốn tìm a, b, c, d dùng matplotlib sẽ show ảnh với tọa độ điểm trên ảnh:
        plt.imshow(image)
        plt.show()
    """
    x = int(img.shape[1])
    y = int(img.shape[0])
    shape = np.array([[int(0), int(170)], [int(1600), int(170)],
                      [int(1600), int(0)], [int(0), int(0)]])

    #define a numpy array with the dimensions of img, but comprised of zeros
    mask = np.zeros_like(img)
    #Uses 3 channels or 1 channel for color depending on input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]
        ignore_mask_color = (255, ) * channel_count
    else:
        ignore_mask_color = 255
    #creates a polygon with the mask color
    cv2.fillPoly(mask, np.int32([shape]), ignore_mask_color)
    #returns the image only where the mask pixels are not zero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
Ejemplo n.º 27
0
def findend():
    global endpoint

    hsv_image = cv2.cvtColor(img_maze, cv2.COLOR_BGR2HSV)
  
   # define range of red color in HSV
    lower_red = np.array([0,100,100])
    upper_red = np.array([135,255,255])
   
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv_image, lower_red, upper_red)
    

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(hsv_image,hsv_image, mask= mask)
    cimg=res
    res=cv2.cvtColor(res, cv2.COLOR_RGB2GRAY)

    circles = cv2.HoughCircles(res,cv2.HOUGH_GRADIENT,1,30,param1=50,param2=10,minRadius=0,maxRadius=0)
    #circles = np.uint16(np.around(circles))

    for i in circles[0,:]:
    # draw the outer circle
        cv2.circle(cimg,(i[0],i[1]),i[2],(255,0,0),2)
     # draw the center of the circle
        cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
        endpoint = (i[0],i[1])
Ejemplo n.º 28
0
    def traitement_image(img_bw):
        global Photo_traitee

        global canvas_photoTraitee
        global hsv
        couleur = "#3F875C"

        hsv = [75, 130, 90]
        H = hsv[0]
        S = hsv[1]
        V = hsv[2]

        HMin = H - 15
        HMax = H + 15
        SMin = S - 70
        SMax = S + 70
        VMin = V - 90
        VMax = V + 90
        minHSV = np.array([HMin, SMin, VMin])
        maxHSV = np.array([HMax, SMax, VMax])
        #img = cv2.imread(img_bw)
        imageHSV = cv2.cvtColor(img_bw, cv2.COLOR_BGR2HSV)
        maskHSV = cv2.inRange(imageHSV, minHSV, maxHSV)
        resultHSV = cv2.bitwise_and(img_bw, img_bw, mask=maskHSV)
        img_gray = cv2.cvtColor(resultHSV, cv2.COLOR_RGB2GRAY)
        (thresh, img_bw) = cv2.threshold(img_gray, 128, 255,
                                         cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        #path_file=('static/%s.jpg' %uuid.uuid4().hex),
        cv2.imwrite('static/photo.jpg', img_bw)
        return json.dumps('static/photo.jpg')
Ejemplo n.º 29
0
 def maskDisplay(self, hsvarr, img):
     imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
     lower = np.array([hsvarr[0], hsvarr[2], hsvarr[4]])
     upper = np.array([hsvarr[1], hsvarr[3], hsvarr[5]])
     mask = cv2.inRange(imgHSV, lower, upper)
     imgresult = cv2.bitwise_and(img, img, mask=mask)
     cv2.imshow("Masked Feed", imgresult)
Ejemplo n.º 30
0
def region_of_interest(image):
    height = image.shape[0]
    triangle = np.array([[(200, height),(550, 250),(1100, height),]], np.int32)
    mask = np.zeros_like(image)
    cv2.fillPoly(mask, triangle, 255)
    masked_image = cv2.bitwise_and(image, mask)
    return masked_image