Пример #1
0
    def preprocess(self, image):
        # split the image into channels
        B, G, R = cv2.split(image.astype('float32'))

        # subtract the means for each channel
        R -= self.rmean
        G -= self.gmean
        B -= self.bmean

        # merge the channels back together and return the image
        return cv2.merge([B, G, R])
    def increase_brightness(self, img, value=30):
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)

        lim = 255 - value
        v[v > lim] = 255
        v[v <= lim] += value

        final_hsv = cv2.merge((h, s, v))
        img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
        return img
Пример #3
0
def hisEqulColor(img):

    if len(img.shape) == 2:
        return hisEqul(img)

    ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    cv2.equalizeHist(channels[0], channels[0])
    cv2.merge(channels, ycrcb)
    cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img
Пример #4
0
 def reshape(self,blocks):
     _,gImage,rImage = cv2.split(self.image)
     bImage = []
     for chunk in self.chunkRows(blocks,self.col/8):
         for numRow in range(8):
             for block in chunk:
                 bImage.extend(block[numRow])
     bImage = np.array(bImage).reshape(self.row,self.col)
     bImage = np.uint8(bImage)
     img = cv2.merge((bImage,gImage,rImage))
     return img
Пример #5
0
def identify_rivers(image):
    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    lower_blue = np.array([90, 20, 38])
    upper_blue = np.array([110, 255, 255])
    rivers_mask = cv2.inRange(image_hsv, lower_blue, upper_blue)
    rivers_bgr = cv2.bitwise_and(image, image, mask=rivers_mask)
    rivers_gray = cv2.cvtColor(rivers_bgr, cv2.COLOR_BGR2GRAY)
    _, rivers_alpha = cv2.threshold(rivers_gray, 0, 255, cv2.THRESH_BINARY)
    rivers_b, rivers_g, rivers_r = cv2.split(rivers_bgr)
    rivers_bgra = [rivers_b, rivers_g, rivers_r, rivers_alpha]
    return cv2.merge(rivers_bgra, 4)
def adapt_hist_equilization(img):

    #img = cv2.imread("test_001.jpg")
    R, G, B = cv2.split(img)

    output1_R = cv2.equalizeHist(R)
    output1_G = cv2.equalizeHist(G)
    output1_B = cv2.equalizeHist(B)

    equ = cv2.merge((output1_R, output1_G, output1_B))
    res = np.hstack((img, equ))  #stacking images side-by-side
    return res
Пример #7
0
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    dtype = img.dtype  # uint8

    x = np.arange(0, 256, dtype=np.int16)
    lut_hue = ((x * r[0]) % 180).astype(dtype)
    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)

    img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed
Пример #8
0
def remove_background(image):
    """
    Removes background from image
    """
    # Paramters.
    BLUR = 21
    CANNY_THRESH_1 = 10
    CANNY_THRESH_2 = 30
    MASK_DILATE_ITER = 10
    MASK_ERODE_ITER = 10
    MASK_COLOR = (0.0, 0.0, 1.0)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Edge detection.
    edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
    edges = cv2.dilate(edges, None)
    edges = cv2.erode(edges, None)

    # Find contours in edges, sort by area
    contour_info = []
    contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

    for c in contours:
        contour_info.append((
            c,
            cv2.isContourConvex(c),
            cv2.contourArea(c),
        ))
    contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
    max_contour = contour_info[0]

    # Create empty mask.
    mask = np.zeros(edges.shape)
    cv2.fillConvexPoly(mask, max_contour[0], (255))

    # Smooth mask and blur it.
    mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
    mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
    mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
    mask_stack = np.dstack([mask] * 3)

    # Blend masked img into MASK_COLOR background
    mask_stack = mask_stack.astype('float32') / 255.0
    image = image.astype('float32') / 255.0

    masked = (mask_stack * image) + ((1 - mask_stack) * MASK_COLOR)
    masked = (masked * 255).astype('uint8')

    c_red, c_green, c_blue = cv2.split(image)
    img_a = cv2.merge((c_red, c_green, c_blue, mask.astype('float32') / 255.0))

    return img_a * 255
Пример #9
0
 def _clahe_rgb(rgb_array, clip_limit=2.0, tile_grid_size=(8, 8)):
     # convert RGB to LAB
     lab = cv2.cvtColor(rgb_array, cv2.COLOR_RGB2LAB)
     # apply clahe on LAB's L component.
     lab_planes = cv2.split(lab)
     clahe = cv2.createCLAHE(clipLimit=clip_limit,
                             tileGridSize=tile_grid_size)
     lab_planes[0] = clahe.apply(lab_planes[0])
     lab = cv2.merge(lab_planes)
     # remap LAB tp RGB.
     rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
     return rgb
Пример #10
0
 def pre_dispose(self):#返回一个面上的颜色平均值HSVRGB
         img = cv2.GaussianBlur(self.frame,(7,7),0)
         b,g,r = cv2.split(img)
         avgb = cv2.mean(b)[0]
         avgg = cv2.mean(g)[0]
         avgr = cv2.mean(r)[0]
         k = (avgb+avgg+avgr)/3
         kb = k/avgb
         kg = k/avgg
         kr = k/avgr
         b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)
         g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
         r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
         img = cv2.merge([b,g,r])
         img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
         h,s,v = cv2.split(img_hsv)
         v = cv2.equalizeHist(v)
         img_hsv =  cv2.merge([h,s,v])
         self.img_hsv = img_hsv
         img =  cv2.cvtColor(img_hsv,cv2.COLOR_HSV2BGR)
         self.img_bgr = img
         self.frame = img
Пример #11
0
def split_object_demo():  #cv.split():三通道分离;cv.merge():通道合并
    capture = cv.VideoCapture(0)
    while True:
        ret, frame = capture.read()
        if ret == False:
            break
        b, g, r = cv.split(frame)
        cv.imshow("image", frame)
        cv.imshow("blue", b)
        cv.imshow("green", g)
        cv.imshow("red", r)
        cv.waitKey(0)
        cv.destroyAllWindows()
Пример #12
0
 def _get_risk_map(self, seg_img, gaussian_sigma=25):
     if self.SIMULATE:
         image, _, _ = cv.split(seg_img)
     else:
         image = seg_img
     risk_array = image.astype("float32")
     for label in self.labels:
         np.where(risk_array == self.labels[label], risk_table[label],
                  risk_array)
     risk_array = gaussian_filter(risk_array, sigma=gaussian_sigma)
     risk_array = (risk_array / risk_array.max()) * 255
     risk_array = np.uint8(risk_array)
     return risk_array
Пример #13
0
def get_fg_color_hists(fg):
    # returns normalized histograms of color for
    r, g, b, a = cv2.split(fg)
    bData = numpy.extract(a > 0, b)
    gData = numpy.extract(a > 0, g)
    rData = numpy.extract(a > 0, r)
    fgHist = {}
    for chan, col in zip([rData, gData, bData], ['red', 'green', 'blue']):
        fgHist[col] = cv2.calcHist([chan], [0], None, [256], [0, 256])
        fgHist[col] /= fgHist[col].sum(
        )  # normalize to compare images of different sizes

    return fgHist
Пример #14
0
def equalized(image: np.array) -> np.array:
    """Equalizes the image using CLAHE."""
    clahe = cv2.createCLAHE(clipLimit=4)

    if is_colored(image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        lab_planes = cv2.split(image)
        lab_planes[0] = clahe.apply(lab_planes[0])
        image = cv2.merge(lab_planes)
        image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
    else:
        image = clahe.apply(image)

    return image
Пример #15
0
def ponerTransparente():
    src = cv2.imread(
        r'/home/juan-rios/Documentos/python/trackMove/sin_fondo/foto_sin_fondo.png',
        1)
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    size = 15
    chars = string.ascii_uppercase
    cv2.imwrite(
        r'/home/juan-rios/Documentos/python/trackMove/transparente/trasparente'
        + ''.join(random.choice(chars) for _ in range(size)) + ".png", dst)
    def __init__(self,file,mode=cv2.IMREAD_COLOR):
        self.file = file
        try:
            self.image = self.loadImg(file,mode)
        except:
            print("Load image error!")

        print(self.image.shape)
        #assert self.image != None #"Load image error!"
        
        """#change b g r channel order avoid color not correct when plot"""
        if mode == cv2.IMREAD_COLOR:  
            b,g,r = cv2.split(self.image)       # get b,g,r
            self.image = cv2.merge([r,g,b])     # switch it to rgb
Пример #17
0
    def changeBrightness(self, img, value):
        """ This function will take an image (img) and the brightness
            value. It will perform the brightness change using OpenCv
            and after split, will merge the img and return it.
        """

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)
        lim = 255 - value
        v[v > lim] = 255
        v[v <= lim] += value
        final_hsv = cv2.merge((h, s, v))
        img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
        return img
Пример #18
0
 def histogram_equalization(img):
     channels = cv2.split(img)
     max_h = []
     channel_size = img.shape[0] * img.shape[1]
     for channel in channels:
         # # 可用 cv2.equalizeHist(chan) 函数代替
         # # 即 channel[:] = equalizeHist(channel)
         # # 计算直方图可以用cv2.calcHist() 函数代替 具体用法查询百度
         h_c = []
         for i in range(256):
             h_c.append(channel[channel == i].size)
         c_c = np.cumsum(h_c) / channel_size
         channel[:] = 255 * c_c[channel]
         max_h.append((np.argmax(h_c), np.max(h_c), np.max(h_c) / channel_size))
     return cv2.merge(channels), max_h
Пример #19
0
def pic_superpose(path_img_logo, path_img_out, RadarStationName):
    filepath, shotname, extension = get_filePath_fileName_fileExt(
        path_img_logo)
    ymdhis = shotname[15:29]
    thisdate = datetime.datetime.strptime(ymdhis, "%Y%m%d%H%M%S")
    thisdateBJT = thisdate + datetime.timedelta(hours=8)
    thisdateBJT_str = thisdateBJT.strftime("%Y{y}%m{m}%d{d}%H{H}%M{M}").format(
        y='年', m='月', d='日', H='时', M='分')

    img = cv2.imread(basemap)  #背景图的读取
    co = cv2.imread(path_img_logo, -1)  #需要插入图的读取
    scr_channels = cv2.split(co)
    dstt_channels = cv2.split(img)
    b, g, r, a = cv2.split(co)
    for i in range(3):
        dstt_channels[i] = dstt_channels[i] * (255.0 - a) / 255
        dstt_channels[i] += np.array(scr_channels[i] * (a / 255),
                                     dtype=np.uint8)
    imgout = cv2.merge(dstt_channels)
    cv2.imwrite(path_img_out, imgout)

    img1 = Image.open(path_img_out)
    img2 = Image.open(sbfile)
    img1.paste(img2, (10, 550))
    img1.save(path_img_out)

    imgout = cv2.imread(path_img_out)
    imgout = putText(imgout, "贵州省" + thisdateBJT_str + "雷达回波拼图", 5, 5, 46)
    rds = "站点:"
    for rd in RadarStationName:
        if rd != "":
            rds = rds + rd.replace(" ", "") + ","
    imgout = putText(imgout, rds[:-1], 1050, 0, 26)
    cv2.imwrite(path_img_out, imgout)

    print("生成:" + path_img_out)
Пример #20
0
    def RandomBrightness(self, bgr):
        '''

        :param bgr:
        :return:
        '''
        if random.random() < 0.5:
            hsv = self.BGR2HSV(bgr)
            h, s, v = cv2.split(hsv)
            adjust = random.choice([0.5, 1.5])
            v = v * adjust
            v = np.clip(v, 0, 255).astype(hsv.dtype)
            hsv = cv2.merge((h, s, v))
            bgr = self.HSV2BGR(hsv)
        return bgr
Пример #21
0
 def RandomSaturation(self, bgr):
     '''
     随机饱和度
     :param bgr:
     :return:
     '''
     if random.random() < 0.5:
         hsv = self.BGR2HSV(bgr)
         h, s, v = cv2.split(hsv)
         adjust = random.choice([0.5, 1.5])
         s = s * adjust
         s = np.clip(s, 0, 255).astype(hsv.dtype)
         hsv = cv2.merge((h, s, v))
         bgr = self.HSV2BGR(hsv)
     return bgr
Пример #22
0
    def _get_saturation_edge(self, img):
        '''
        Get image edges for saturation channel in HSV model.

        @img:
            np.array, input image.
        @return:
            np.array, edge image.
        '''
        hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
        _, s, _ = cv.split(hsv)
        s = cv.equalizeHist(s)
        s = cv.GaussianBlur(s, self._gassian_kernel_size, self._gassian_sigma)
        s = cv.Canny(s, max(self._canny_param), min(self._canny_param))
        return s
Пример #23
0
def getSaturation(img):
    # split the image into its respective RGB components
    (B, G, R) = cv.split(img.astype("float"))
    # compute rg = R - G
    rg = np.absolute(R - G)
    # compute yb = 0.5 * (R + G) - B
    yb = np.absolute(0.5 * (R + G) - B)
    # compute the mean and standard deviation of both `rg` and `yb`
    (rbMean, rbStd) = (np.mean(rg), np.std(rg))
    (ybMean, ybStd) = (np.mean(yb), np.std(yb))
    # combine the mean and standard deviations
    stdRoot = np.sqrt((rbStd**2) + (ybStd**2))
    meanRoot = np.sqrt((rbMean**2) + (ybMean**2))
    # derive the "colorfulness" metric and return it
    return stdRoot + (0.3 * meanRoot)
Пример #24
0
 def RandomHue(self, bgr):
     '''
     随机色调
     :param bgr:
     :return:
     '''
     if random.random() < 0.5:
         hsv = self.BGR2HSV(bgr)
         h, s, v = cv2.split(hsv)
         adjust = random.choice([0.5, 1.5])
         h = h * adjust
         h = np.clip(h, 0, 255).astype(hsv.dtype)
         hsv = cv2.merge((h, s, v))
         bgr = self.HSV2BGR(hsv)
     return bgr
Пример #25
0
    def skinMask(self, roi):
        """YCrCb颜色空间的Cr分量+Otsu法阈值分割算法

        :param res: 输入原图像
        :return: 肤色滤波后图像
        """
        YCrCb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)  # 转换至YCrCb空间
        (y, cr, cb) = cv2.split(YCrCb)  # 拆分出Y,Cr,Cb值
        cr1 = cv2.GaussianBlur(cr, (5, 5), 0)
        _, skin = cv2.threshold(cr1, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)  # Ostu处理
        res = cv2.bitwise_and(roi, roi, mask=skin)
        plt.figure(figsize=(10, 10))
        plt.subplot(1, 2, 1)
        plt.imshow(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB))
        plt.xlabel(u'原图', fontsize=20)
        plt.subplot(1, 2, 2)
        plt.imshow(cv2.cvtColor(res, cv2.COLOR_BGR2RGB))
        plt.xlabel(u'肤色滤波后的图像', fontsize=20)
        plt.show()

        plt.figure(figsize=(10, 4))
        plt.subplot(1, 3, 1)
        hist1 = cv2.calcHist([roi], [0], None, [256], [0, 256])  # 直方图opencv
        plt.xlabel(u'opencv直方图', fontsize=20)
        plt.plot(hist1)
        plt.subplot(1, 3, 2)
        hist2 = np.bincount(roi.ravel(), minlength=256)  # np直方图
        hist2, bins = np.histogram(roi.ravel(), 256,
                                   [0, 256])  # np直方图ravel()二维变一维
        plt.plot(hist2)
        plt.xlabel(u'np直方图', fontsize=20)
        plt.subplot(1, 3, 3)
        plt.hist(roi.ravel(), 256, [0, 256])  # matlab自带直方图
        plt.xlabel(u'matlab直方图', fontsize=20)
        plt.show()

        #     gray= cv2.cvtColor(roi,cv2.IMREAD_GRAYSCALE)
        #     equ = cv2.equalizeHist(gray)
        #     cv2.imshow('equalization', np.hstack((roi, equ)))  # 并排显示
        #     cv2.waitKey(0)
        # 自适应均衡化,参数可选
        #     plt.figure()
        #     clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        #     cl1 = clahe.apply(roi)
        #     plt.show()

        return res
Пример #26
0
def BGRtoLalphabeta(img_in):
    split_src = cv2.split(img_in)
    L = 0.3811*split_src[2]+0.5783*split_src[1]+0.0402*split_src[0]
    M = 0.1967*split_src[2]+0.7244*split_src[1]+0.0782*split_src[0]
    S = 0.0241*split_src[2]+0.1288*split_src[1]+0.8444*split_src[0]

    L = np.where(L == 0.0, 1.0, L)
    M = np.where(M == 0.0, 1.0, M)
    S = np.where(S == 0.0, 1.0, S)

    _L = (1.0 / math.sqrt(3.0)) * ((1.0000 * np.log10(L)) + (1.0000 * np.log10(M)) + (1.0000 * np.log10(S)))
    Alph = (1.0 / math.sqrt(6.0)) * ((1.0000 * np.log10(L)) + (1.0000 * np.log10(M)) + (-2.0000 * np.log10(S)))
    Beta = (1.0 / math.sqrt(2.0)) * ((1.0000 * np.log10(L)) + (-1.0000 * np.log10(M)) + (-0.0000 * np.log10(S)))

    img_out = cv2.merge((_L, Alph, Beta))
    return img_out
Пример #27
0
def split(img: np.ndarray) -> [np.ndarray]:
    """
    Splits image into his channels

    Args:
        img: image as numpy array

    Returns:
        [R, G, B]
    """
    if not is_colored(img):
        raise BaseException(
            "Should be a colored image. Got grayscale instead.")

    b, g, r = cv2.split(img)
    return [r, g, b]
Пример #28
0
    def show_histogram(self):
        # hist = cv2.calcHist([self.image],
        #                     [0], #使用的通道
        #                     None, #没有使用mask
        #                     [256], #HistSize
        #                     [0.0,255.0])

        b, g, r = cv2.split(self.image)
        numbins = 256
        ranges = [0.0, 256.0]

        b_hist = cv2.calcHist([b], [0], None, [numbins], ranges)
        g_hist = cv2.calcHist([g], [0], None, [numbins], ranges)
        r_hist = cv2.calcHist([r], [0], None, [numbins], ranges)

        print(b_hist.shape)

        width = 256
        height = 256

        hist_image = np.zeros([height, width, 3], np.uint8)

        cv2.normalize(b_hist, b_hist, 0, height * 0.9, cv2.NORM_MINMAX)
        cv2.normalize(g_hist, g_hist, 0, height * 0.9, cv2.NORM_MINMAX)
        cv2.normalize(r_hist, r_hist, 0, height * 0.9, cv2.NORM_MINMAX)

        for i in range(1, numbins, 1):
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(b_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(b_hist[i][0]))),
                     (255, 0, 0)
                     )
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(g_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(g_hist[i][0]))),
                     (0, 255, 0)
                     )
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(r_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(r_hist[i][0]))),
                     (0, 0, 255)
                     )

        # cv2.imshow("Histogram", hist_image)
        demo_utils.show_cvimage_to_label(hist_image, self.histogram_label)

        self.show_histogram2()
Пример #29
0
def detect_saturation(img, vp, config):
    # get thresholded saturation
    img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    _, S, _ = cv2.split(img_hsv)
    kernel_size = config['roi'].getint('kernel_size')
    S = cv2.GaussianBlur(S, (kernel_size, kernel_size), 0)
    _, S_threshold = cv2.threshold(S, config['roi'].getint('saturation_thr'),
                                   255, cv2.THRESH_BINARY_INV)
    # mask out sky using vp
    height = img.shape[0]
    width = img.shape[1]
    vertices = np.array([[(0, vp[1]), (width, vp[1]), (width, height),
                          (0, height)]])
    mask = np.zeros_like(S_threshold)
    cv2.fillPoly(mask, vertices, 255)
    S_threshold_masked = cv2.bitwise_and(S_threshold, mask)
    return S_threshold_masked
Пример #30
0
def whiteBalance(img):
    r, g, b = cv2.split(img)
    r_avg = cv2.mean(r)[0]
    g_avg = cv2.mean(g)[0]
    b_avg = cv2.mean(b)[0]

    k = (r_avg + g_avg + b_avg) / 3
    kr = k / r_avg
    kg = k / g_avg
    kb = k / b_avg

    r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
    g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
    b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)

    balanceImg = cv2.merge([b, g, r])
    return balanceImg