def clearImage(image):
    # Convert the image from BGR to gray
    dark_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    channels = cv2.split(image)

    # Get the maximum value of each channel
    # and get the dark channel of each image
    # record the maximum value of each channel
    a_max_dst = [float("-inf")] * len(channels)
    for idx in xrange(len(channels)):
        a_max_dst[idx] = channels[idx].max()

    dark_image = cv2.min(channels[0], cv2.min(channels[1], channels[2]))

    # Gaussian filtering the dark channel
    dark_image = cv2.GaussianBlur(dark_image, (25, 25), 0)

    image_t = (255. - 0.95 * dark_image) / 255.
    image_t = cv2.max(image_t, 0.5)

    # Calculate t(x) and get the clear image
    for idx in xrange(len(channels)):
        channels[idx] = cv2.max(
            cv2.add(
                cv2.subtract(channels[idx].astype(np.float32),
                             int(a_max_dst[idx])) / image_t,
                int(a_max_dst[idx])), 0.0) / int(a_max_dst[idx]) * 255
        channels[idx] = channels[idx].astype(np.uint8)

    return cv2.merge(channels)
Example #2
0
def recolorRGV(src, dst):
    """
    BGRからRGV(赤、緑、値)への変換をシミュレートする
    コードの内容:
    dst.b = min(src.b, src.g, src.r)
    dst.g = src.g
    dst.r = src.r
    :param src: BGR形式の入力画像
    :param dst: BGR形式の出力画像
    :return: None
    """
    b, g, r = cv2.split(src)
    cv2.min(b, g, b)
    cv2.min(b, r, b)
    # Python: cv2.min(src1, src2[, dst]) → dst
    # Python: cv.Min(src1, src2, dst) → None
    # Python: cv.MinS(src, value, dst) → None
    # Parameters:
    # src1 – first input array.
    # src2 – second input array of the same size and type as src1.
    # value – real scalar value.
    # dst – output array of the same size and type as src1.
    # The functions min calculate the per-element minimum of two arrays:
    #     dst = min(src1, src2)
    # or array and a scalar:
    #     dst = min(src1, value)
    # In the second variant, when the input array is multi-channel, each channel is compared with value independently.
    cv2.merge((b, g, r), dst)
Example #3
0
def DarkChannel(im, sz):
    b, g, r = cv2.split(im)  #取 R,G,B通道
    dc = cv2.min(cv2.min(r, g), b)  #取三通道的最小值作为暗通道
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    dark = cv2.erode(dc, kernel)  #腐蚀暗通道
    # dark = dc
    return dark
Example #4
0
def DarkChannel(im):
    b, g, r = cv.split(im)
    dc = cv.min(cv.min(r, g), b)
    kernel = cv.getStructuringElement(cv.MORPH_RECT,
                                      (im.shape[0], im.shape[1]))
    dark = cv.erode(dc, kernel)
    return dark
def clearImage(frame):

    channels = cv.split(frame)
    # Get the maximum value of each channel
    # and get the dark channel of each image
    # record the maximum value of each channel
    a_max_dst = [float("-inf")] * len(channels)
    for idx in range(len(channels)):
        a_max_dst[idx] = channels[idx].max()

    dark_image = cv.min(channels[0], cv.min(channels[1], channels[2]))

    # Gaussian filtering the dark channel
    dark_image = cv.GaussianBlur(dark_image, (25, 25), 0)

    image_t = (255.0 - 0.95 * dark_image) / 255.0
    image_t = cv.max(image_t, 0.5)

    # Calculate t(x) and get the clear image
    for idx in range(len(channels)):
        channels[idx] = (cv.max(
            cv.add(
                cv.subtract(channels[idx].astype(np.float32),
                            int(a_max_dst[idx])) / image_t,
                int(a_max_dst[idx]),
            ),
            0.0,
        ) / int(a_max_dst[idx]) * 255)
        channels[idx] = channels[idx].astype(np.uint8)

    return cv.merge(channels)
Example #6
0
def _remove_table_lines_on_image(gray_np_image, horizontal_lines,
                                 vertical_lines):
    """Принимает на вход картинку и маску таблицы.
    Размалёвывает плюсы
    """
    table_mask = cv2.min(
        horizontal_lines,
        vertical_lines)  # Горизонтальные и вертикальные линии вместе
    table_mask[table_mask > TABLE_LINE_BLACK_THRESHOLD] = 255
    table_mask[table_mask <= TABLE_LINE_BLACK_THRESHOLD] = 0
    img_no_lines = (gray_np_image | ~table_mask
                    )  # Убрали из изображения сами линии
    # Замажем чёрным всё, в окрестности чего много точек.
    # Почти все плюсы превратятся в "жирные" кляксы
    # TODO: Здесь мутные константы, которые я подбирал руками для наших кондуитов. Это — треш
    img_no_lines = ~cv2.adaptiveThreshold(
        img_no_lines, DEFAULT_IMAGE_BW_THRESHOLD, cv2.ADAPTIVE_THRESH_MEAN_C,
        cv2.THRESH_BINARY, 27, -9)
    # Вернём наместро сами плюсы
    img_no_lines = cv2.min(img_no_lines, gray_np_image)
    # Очистим точки в границах таблицы, чтобы не мешались
    img_no_lines |= ~table_mask
    if DEBUG:
        cv2.imwrite("_plus.png", img_no_lines)
        cv2.imwrite("_table_mask.png", table_mask)
    return img_no_lines
Example #7
0
def DarkChannel(im, sz):
    b, g, r = cv2.split(im)
    dc = cv2.min(cv2.min(r, g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    dark = cv2.erode(dc, kernel)

    return dark
Example #8
0
def DCP(im,sz):
    b,g,r = cv.split(im) #split image into r,g,b
    dc = cv.min(cv.min(r,g),b); #dark channel will be min of r,g,b
    kernel = cv.getStructuringElement(cv.MORPH_RECT,(sz,sz)) #do this for all patches
    dcp = cv.erode(dc,kernel) #erode will take the minimum in patch
    # print(dcp.shape)
    return dcp
Example #9
0
def thresholdChannels(img,thresh):
	mergeAux=[]
	for channel in cv2.split(img):
		mergeAux.append(cv2.threshold(channel,thresh,255,cv2.cv.CV_THRESH_BINARY)[1])	
	aux = cv2.min(mergeAux[0],mergeAux[1])
	aux = cv2.min(aux,mergeAux[2])
	return aux
Example #10
0
 def __find_edges(self, img, intensity_values=25):
     '''
     Adapted edge detector from Pupil Labs
     '''
     hist = np.bincount(img.ravel(), minlength=256)
     lowest_spike_index = 255
     highest_spike_index = 0
     max_intensity = 0
     found_section = False
     for i in range(len(hist)):
         intensity = hist[i]
         if intensity > intensity_values:
             max_intensity = np.maximum(intensity, max_intensity)
             lowest_spike_index = np.minimum(lowest_spike_index, i)
             highest_spike_index = np.maximum(highest_spike_index, i)
             found_section = True
     if not found_section:
         lowest_spike_index = 200
         highest_spike_index = 255
     bin_img = cv2.inRange(
         img, np.array(0),
         np.array(lowest_spike_index + self.intensity_range))
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
     bin_img = cv2.dilate(bin_img, kernel, iterations=2)
     spec_mask = cv2.inRange(img, np.array(0),
                             np.array(highest_spike_index - 5))
     spec_mask = cv2.erode(spec_mask, kernel)
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
     img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
     edges = cv2.Canny(img, 160, 160 * 2, apertureSize=5)
     edges = cv2.min(edges, spec_mask)
     edges = cv2.min(edges, bin_img)
     return edges
Example #11
0
def peer(img):
    b, g, r = cv2.split(img)
    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 30, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)
    mmax = cv2.max(r, cv2.max(g, b))
    mmin = cv2.min(r, cv2.min(g, b))

    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GE)
    m7 = cv2.compare(r, b, cv2.CMP_GE)
    mask = m1 & m2 & m3 & m6 & m4 & m5 & m7
    cv2.imshow("b", b)
    cv2.imshow("g", g)
    cv2.imshow("r", r)
    cv2.imshow('r_thre', m1)
    cv2.imshow('g_thre',m2)
    cv2.imshow('b_thre',m3)
    cv2.imshow('max-min',m4)
    cv2.imshow('absdiff',m5)
    cv2.imshow('r_g',m6)
    cv2.imshow('r_b',m7)
    cv2.imshow('res',mask)
    return mask
Example #12
0
def multiply_3x3_mat(src, mat):
    """RGBの各ピクセルに対して3x3の行列演算を行う"""

    # 正規化用の係数を調査
    normalize_val = (2**(8 * src.itemsize)) - 1

    # 0 .. 1 に正規化して RGB分離
    b, g, r = np.dsplit(src / normalize_val, 3)

    # 行列計算
    ret_r = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
    ret_g = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
    ret_b = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

    # オーバーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.min(ret_r, 1.0)
    ret_g = cv2.min(ret_g, 1.0)
    ret_b = cv2.min(ret_b, 1.0)

    # アンダーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.max(ret_r, 0.0)
    ret_g = cv2.max(ret_g, 0.0)
    ret_b = cv2.max(ret_b, 0.0)

    # RGB結合
    ret_mat = np.dstack((ret_b, ret_g, ret_r))

    # 0 .. 255 に正規化
    ret_mat *= normalize_val

    return np.uint8(ret_mat)
def detect_black_white_blobs(img, s_low=50, s_high=200, v_low=20, v_high=130):
    blurred = cv2.GaussianBlur(img, (5,5), 5)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    # low sat: grey shades (white not included)
    ret_val, low_sat_mask = cv2.threshold(s, s_low, 255, cv2.THRESH_BINARY_INV)
    # high sat
    ret_val, high_sat_mask = cv2.threshold(v, s_high, 255, cv2.THRESH_BINARY)
    # low val: dark/black shades
    ret_val, low_val_mask = cv2.threshold(v, v_low, 255, cv2.THRESH_BINARY_INV)
    # high val: includes white
    ret_val, high_val_mask = cv2.threshold(v, v_high, 255, cv2.THRESH_BINARY)
    grey_or_dark = cv2.min(high_sat_mask, low_val_mask) # grey or dark
    grey_and_bright = cv2.min(low_sat_mask, high_val_mask) # grey and bright (white)
    black_or_white = cv2.max(grey_or_dark, grey_and_bright)
    mask = img.copy()
    mask = cv2.merge([grey_or_dark, low_sat_mask, high_val_mask], mask)
    # Debug code here:
    collage = glue_2x2(*(map(half_size, (low_sat_mask, high_sat_mask, low_val_mask, high_val_mask))))
    collage = cv2.cvtColor(collage, cv2.COLOR_GRAY2BGR)
    height, width = h.shape
    put_text(collage, "Low sat", (0,20))
    put_text(collage, "High sat", (width/2,20))
    put_text(collage, "Low val", (0,20 + height/2))
    put_text(collage, "High val", (width/2,20 + height/2))
    return collage
Example #14
0
def dark_channel(img, tamanho=15):
    r, g, b = cv2.split(img)  #separar a imagem
    min_img = cv2.min(r, cv2.min(g, b))  #menor vetor
    kernel = cv2.getStructuringElement(
        cv2.MORPH_RECT, (tamanho, tamanho))  #gera estrutura retangular
    dc_img = cv2.erode(min_img, kernel)  #Corrige imagem
    return dc_img
Example #15
0
def findColorMarks(img,mask):
	cuttedImg = cv2.merge([cv2.min(mask,layer) for layer in cv2.split(img)])
	print '-FINDSPOT-findColorMarks'
	cuttedImgBP = getHistRGB(cuttedImg,mask)
	cuttedImgBP = cv2.min(cuttedImgBP,mask)
	#return cv2.merge([cuttedImgBP,]*3)
	return identifyLevels(cuttedImgBP)
Example #16
0
def darkChannel(image):
    size = 15
    b,g,r = cv2.split(image)
    dc = cv2.min(cv2.min(r, g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))
    dark = cv2.erode(dc, kernel)
    return dark
def dark_channl(frames, location_list, block_size):
    r, g, b = cv2.split(frames[-1])
    min_img = cv2.min(r, cv2.min(g, b))
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
    dc_img = cv2.erode(min_img, kernel)

    ret, thresh1 = cv2.threshold(dc_img, 170, 255, cv2.THRESH_BINARY)

    cv2.imshow("dark_channl", thresh1)

    int_diff = cv2.integral(thresh1)
    # This is a key parameter. Change this value can control motion_block number.
    # threshold = block_size * block_size / 2
    # threshold = 400
    result = list()
    for pt in iter(location_list):
        xx, yy, _bz, _bz = pt
        t11 = int_diff[xx, yy]
        t22 = int_diff[xx + block_size, yy + block_size]
        t12 = int_diff[xx, yy + block_size]
        t21 = int_diff[xx + block_size, yy]
        block_diff = t11 + t22 - t12 - t21
        if block_diff > 0:
            result.append((xx, yy, block_size, block_size))
    return result
Example #18
0
def Dark_channel(img, r):
    win_size = 2 * r + 1
    B, G, R = cv2.split(img)
    temp = cv2.min(cv2.min(B, G), R)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (win_size, win_size))
    dark = cv2.erode(temp, kernel)
    return dark
Example #19
0
def multiply_3x3_mat(src, mat):
    """RGBの各ピクセルに対して3x3の行列演算を行う"""

    # 正規化用の係数を調査
    normalize_val = (2 ** (8 * src.itemsize)) - 1

    # 0 .. 1 に正規化して RGB分離
    b, g, r = np.dsplit(src / normalize_val, 3)

    # 行列計算
    ret_r = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
    ret_g = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
    ret_b = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

    # オーバーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.min(ret_r, 1.0)
    ret_g = cv2.min(ret_g, 1.0)
    ret_b = cv2.min(ret_b, 1.0)

    # アンダーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.max(ret_r, 0.0)
    ret_g = cv2.max(ret_g, 0.0)
    ret_b = cv2.max(ret_b, 0.0)

    # RGB結合
    ret_mat = np.dstack( (ret_b, ret_g, ret_r) )

    # 0 .. 255 に正規化
    ret_mat *= normalize_val

    return np.uint8(ret_mat)
Example #20
0
def DarkChannel_Norm(image, size):
    # normalized input
    b, g, r = cv2.split(image)
    dc = cv2.min(cv2.min(r,g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size,size))
    # Erode: only for normalized pics
    darkch = cv2.erode(dc, kernel)
    return darkch
 def computeSigmaMin(self):
     image0 = self.image[..., 0]
     image1 = self.image[..., 1]
     image2 = self.image[..., 2]
     # epsilon=0.000000000000001
     self.sigmaMin = cv2.min(cv2.min(image0, image1), image2)
     sumImage = image0 + image1 + image2
     self.sigmaMin = self.sigmaMin / (sumImage + self.epsilon)
def BGR_2_HSV_(img):

    h = np.zeros((img.shape[0], img.shape[1]), np.float32)
    s = np.zeros((img.shape[0], img.shape[1]), np.float32)
    v = np.zeros((img.shape[0], img.shape[1]), np.float32)

    img_r = img.copy()
    b = img[:, :, 0]
    g = img[:, :, 1]
    r = img[:, :, 2]
    max_v = cv.max(cv.max(b, g), r)
    min_v = cv.min(cv.min(b, g), r)
    delta = max_v - min_v
    v = max_v
    zero_m = (max_v == 0.)
    zero_m = zero_m.astype(np.float32)
    nonzeor_m = (max_v != 0.)
    nonzeor_m = nonzeor_m.astype(np.float32)

    exp_m = zero_m * 10e-8

    s = delta / (max_v + exp_m)
    s = s * nonzeor_m

    rmax = (r == max_v)
    rmax = rmax.astype(np.float32)

    gmax = (g == max_v)
    gr = (g != r)
    gmax = gmax.astype(np.float32)
    gr = gr.astype(np.float32)
    gmax = gmax * gr

    bmax = (b == max_v)
    br = (b != r)
    bg = (b != g)
    bmax = bmax.astype(np.float32)
    br = br.astype(np.float32)
    bg = bg.astype(np.float32)
    bmax = bmax * br * bg

    h += ((g - b) / (delta + 10e-8)) * rmax
    h += (((b - r) / (delta + 10e-8)) + 2.) * gmax
    h += (((r - g) / (delta + 10e-8)) + 4.) * bmax

    h = h * (np.pi / 3.)

    neg_m = (h < 0.0)
    neg_m = neg_m.astype(np.float32)
    h += neg_m * (2. * np.pi)

    h = h / (2. * np.pi)

    img_r[:, :, 0] = h
    img_r[:, :, 1] = s
    img_r[:, :, 2] = v

    return img_r
Example #23
0
def recolor_rgv(src, dst):
    """Simulate conversion from BGR to RGV (red, green, value).

       (b, g, r) -> ((min(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.min(b, g, b)  # b = min(g, b)
    cv2.min(b, r, b)
    cv2.merge((b, g, r), dst)
Example #24
0
def find_darkchannel(image, patch_win_size):

    patch_win_size = int(patch_win_size)
    b,g,r = cv2.split(image)
    dc = cv2.min(cv2.min(r,g),b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(patch_win_size,patch_win_size))
    dark_channel = cv2.erode(dc,kernel)

    return dark_channel
Example #25
0
def split_chan(im, sz):
    b, g, r = cv2.split(im)
    bright = cv2.max(cv2.max(r, g), b)
    # dark = cv2.min(cv2.min(r, g), b)
    dc = cv2.min(cv2.min(r, g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    dark = cv2.erode(dc, kernel)
    cv2.imwrite('dark.png', dark)
    return dark, bright, r, g, b
Example #26
0
def compute_dark_channel(img, radius=7):
    h, w = img.shape[:2]
    window_size = 2 * radius + 1
    b, g, r = cv2.split(img)
    bgr_min_img = cv2.min(cv2.min(b, g), r)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                       (window_size, window_size))
    dark_channel_img = cv2.erode(bgr_min_img, kernel)
    return dark_channel_img
def dark_channel(img, size=15):
    r, g, b = cv2.split(img)
    min_img = cv2.min(r, cv2.min(g, b))
    # 这个函数的第一个参数表示内核的形状,有三种形状可以选择。
    # 矩形:MORPH_RECT;交叉形:MORPH_CROSS;椭圆形:MORPH_ELLIPSE
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))
    # 获得size*size的锚框
    dc_img = cv2.erode(min_img, kernel)
    return dc_img
Example #28
0
 def toColoursRGV(self):
     
     if not self._isColour:
         return None
     else:
         b, g, r = cv2.split(self._image)
         cv2.min(b, g, b)
         cv2.min(b, r, b)
         return OpenCvImage(cv2.merge((b, g, r)))
Example #29
0
def recolor_rgv(src, dst):
    """Simulate conversion from BGR to RGV (red, green, value).

       (b, g, r) -> ((min(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.min(b, g, b)  # b = min(g, b)
    cv2.min(b, r, b)
    cv2.merge((b, g, r), dst)
Example #30
0
    def saliency_map(self, u1, d1, u1_dimensions):

        # Produce S - step 3 of algorithm defined in [Katramados
        #                                             / Breckon 2011]

        if self.multi_layer_map:

            # Initial MiR Matrix M0
            height, width = u1_dimensions
            mir = np.ones((height, width))

            # Convert pixels to 32-bit floats
            mir = mir.astype(np.float32)

            # Use T-API for hardware acceleration
            mir = cv2.UMat(mir)

            for layer in range(self.pyramid_height):

                # corresponding pyramid layers are in same index pos.
                un = self.u_layers[layer]
                dn = self.d_layers[layer]

                # scale layers to original dimenstions
                un_scaled = cv2.resize(un, (width, height))
                dn_scaled = cv2.resize(dn, (width, height))

                # Calculate Minimum Ratio (MiR) Matrix
                matrix_ratio = cv2.divide(un_scaled, dn_scaled)
                matrix_ratio_inv = cv2.divide(dn_scaled, un_scaled)

                # Caluclate pixelwise min
                pixelwise_min = cv2.min(matrix_ratio, matrix_ratio_inv)
                mir_n = cv2.multiply(pixelwise_min, mir)
                mir = mir_n

        else:

            # Check if u1 & d1 are same size
            # (possible discrepencies from fractional height/width
            # when creating pyramids)

            # resize d1 to u1
            d1 = cv2.resize(d1, (u1_dimensions[1], u1_dimensions[0]))

            # Calculate Minimum Ratio (MiR) Matrix
            matrix_ratio = cv2.divide(u1, d1)
            matrix_ratio_inv = cv2.divide(d1, u1)

            # Caluclate pixelwise min
            mir = cv2.min(matrix_ratio, matrix_ratio_inv)

        # Derive salience by subtracting from scalar 1
        s = cv2.subtract(1.0, mir)

        return s
Example #31
0
def dark_channel(img, size = 15):
    r, g, b = cv2.split(img)
    #rgb图像分解成三个通道
    min_img = cv2.min(r, cv2.min(g, b))
    #求暗通道
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))
    #定义size*size的矩形(MORPH_RECT)
    dc_img = cv2.erode(min_img,kernel)
    #对输入图像用特定结构元素进行腐蚀操作,该结构元素确定腐蚀操作过程中的邻域的形状,各点像素值将被替换为对应邻域上的最小值
    return dc_img
Example #32
0
def compositeThreshold(gray, mode='com'):
    if mode == 'otsu':
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        return otsu_bin
    elif mode == 'yen':
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return yen_bin
    elif mode == 'li':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        return li_bin
    elif mode == 'niblack':
        niblack = threshold_niblack(gray, window_size=13, k=0.8)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return niblack_bin
    elif mode == 'sauvola':
        sauvola = threshold_sauvola(gray, window_size=13)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        return sauvola_bin
    elif mode == 'com':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return cv2.min(cv2.min(otsu_bin, li_bin), yen_bin)
    elif mode == "niblack-multi":
        thr = np.zeros((gray.shape), dtype=np.uint8)
        thr[thr >= 0] = 255
        for k in np.linspace(-0.8, 0.2, 5):  #(-1.8,0.2,5)
            thresh_niblack = threshold_niblack(gray, window_size=25, k=k)
            binary_niblack = gray > thresh_niblack
            binary_niblack = binary_niblack.astype(np.uint8) * 255
            showResult("binary_niblack", binary_niblack)
            thr = cv2.min(thr, binary_niblack)
        return thr
    else:
        sauvola = threshold_sauvola(gray, window_size=25, k=0.25)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        niblack = threshold_niblack(gray, window_size=25, k=0.25)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return cv2.max(sauvola, niblack)
Example #33
0
    def __getitem__(self, index):

        img_path, fix_path, map_path = self.imgs[index]

        img = self.loader(img_path)
        w, h = img.size
        fixpts = self.mat_loader(fix_path, (w, h))
        smap = self.map_loader(map_path)

        fixmap = self.pts2pil(fixpts, img)

        if self.train:
            if random.random() > 0.5:
                #img = img.transpose(Image.FLIP_LEFT_RIGHT)
                #smap = smap.transpose(Image.FLIP_LEFT_RIGHT)
                #fixmap = fixmap.transpose(Image.FLIP_LEFT_RIGHT)
                img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)
                smap = smap.transpose(PIL.Image.FLIP_LEFT_RIGHT)
                fixmap = fixmap.transpose(PIL.Image.FLIP_LEFT_RIGHT)

        if self.transform is not None:
            openImg = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
            b2, g2, r2 = cv2.split(openImg)
            minValue = cv2.min(cv2.min(r2, g2), b2)
            img = self.transform(img)
            r2 = self.transform(r2)
            g2 = self.transform(g2)
            b2 = self.transform(b2)
            minValue = self.transform(minValue)
            newImg = torch.cat((img, r2, g2, b2, minValue), 0)
            smap = self.transform(smap)
            fixmap = self.transform(fixmap)
            '''
            img = self.transform(img)
            r = self.transform(r)
            g = self.transform(g)
            b = self.transform(b)
            minData = []
            minData.append([])
            for i in range(480): 
                minData[0].append([])
                for j in range(640):
                   minData[0][i].append(min(r[0][i][j],g[0][i][j],b[0][i][j]).item())
            minData = torch.tensor(minData)
            newImg = torch.cat((img,r,g,b, minData),0)
            smap = self.transform(smap)
            fixmap = self.transform(fixmap)
        return newImg, fixmap, smap
        
        
        if self.transform is not None:
            img = self.transform(img)
            smap = self.transform(smap)
            fixmap = self.transform(fixmap)'''
        return newImg, fixmap, smap
Example #34
0
    def rgbfilter_gray(self, image, rgbthreshold):
        b,g,r = cv2.split(image)
        rd = rgbthreshold
        min1 = cv2.min(b,g)
        min1 = cv2.min(min1,r)
        max1 = cv2.max(b,g)
        max1 = cv2.max(max1,r)

        diff = cv2.absdiff(max1,min1)
        res = cv2.compare(diff,rd,cv2.CMP_LT)
        return res
Example #35
0
def rgb2gray(image):
    """
    Args:
        image (np.ndarray):

    Returns:
        np.ndarray:
    """
    r, g, b = cv2.split(image)
    return cv2.add(cv2.multiply(cv2.max(cv2.max(r, g), b), 0.5),
                   cv2.multiply(cv2.min(cv2.min(r, g), b), 0.5))
Example #36
0
def rgb2gray(image):
    """
    Args:
        image (np.ndarray): Shape (height, width, channel)

    Returns:
        np.ndarray: Shape (height, width)
    """
    r, g, b = cv2.split(image)
    return cv2.add(cv2.multiply(cv2.max(cv2.max(r, g), b), 0.5),
                   cv2.multiply(cv2.min(cv2.min(r, g), b), 0.5))
Example #37
0
def get_cmyk(r, g, b):
    def complement(ch):
        inv_ch = MAX - ch
        inv_k = MAX - k
        return ((inv_ch - k) / inv_k) * MAX

    k = 0.01 + cv2.min(cv2.min(MAX - r, MAX - g), MAX - b) * 0.99
    c = complement(r)
    m = complement(g)
    y = complement(b)
    return c, m, y, k
	def getColorMask(self, colorHueMin, colorHueMax):
		if self.mask1 == None or self.mask1.shape!=self.hueImg.shape:
			self.mask1 = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		if self.mask2 == None or self.mask2.shape!=self.hueImg.shape:
			self.mask2 = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		if self.mask == None or self.mask.shape!=self.hueImg.shape:
			self.mask  = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		cv2.threshold(self.hueImg, colorHueMin, 255, cv2.THRESH_BINARY_INV, self.mask1)
		cv2.threshold(self.hueImg, colorHueMax, 255, cv2.THRESH_BINARY, self.mask2)
		if colorHueMin > colorHueMax: #hueImg wraps around; in this case we want the AND of the two masks
			cv2.min(self.mask1, self.mask2, self.mask)
		else: #min < max --> we want the OR of the two masks
			cv2.max(self.mask1, self.mask2, self.mask)
Example #39
0
def genColorMask(hue, colorHueMin, colorHueMax, colorMask=None, colorMask1=None, colorMask2=None):
	if colorMask1 == None or colorMask1.shape!=hue.shape:
		colorMask1 = numpy.zeros(hue.shape, numpy.uint8) 
	if colorMask2 == None or colorMask2.shape!=hue.shape:
		colorMask2 = numpy.zeros(hue.shape, numpy.uint8) 
	if colorMask == None or colorMask.shape!=hue.shape:
		colorMask  = numpy.zeros(hue.shape, numpy.uint8) 
	cv2.threshold(hue, colorHueMin, 255, cv2.THRESH_BINARY_INV, colorMask1)
	cv2.threshold(hue, colorHueMax, 255, cv2.THRESH_BINARY, colorMask2)
	if colorHueMin > colorHueMax: #hue wraps around; in this case we want the AND of the two masks
		cv2.min(colorMask1, colorMask2, colorMask)
	else: #min < max --> we want the OR of the two masks
		cv2.max(colorMask1, colorMask2, colorMask)
	return (colorMask, colorMask1, colorMask2)
Example #40
0
def recolorRGV(src, dst):
    """ Simulate conversion from BGR to RGV (red, green, value)

    The source and destination images must both be in BGR format

    Blues are desaturated
    dst.b = min(src.b, src.g, src.r)
    dst.g = src.g
    dst.r = src.r

    """

    b,g,r = cv2.split(src)
    cv2.min(b,g,b)
    cv2.min(b,r,b)
    cv2.merge((b,g,r),dst)
Example #41
0
def recolorRGV(src,dst):
	"""Simulate conversion from BGR to RGV(red,green,value)
	The source and destination images must both be in BGR format.
	Blues are desaturated.
	Pseudocode:
	dst.b = min(src.b,src.g,src.r)
	dst.g = src.g
	dst.r = src.r
	"""

	b,g,r = cv2.split(src)
	#min() compute the per-element minimums of first two arguments
	#and writes them to the third argument
	cv2.min(b,g,b)
	cv2.min(b,r,b)
	cv2.merge((b,g,r),dst)
Example #42
0
    def test_cudaarithm_logical(self):
        npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
        npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)

        self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
                                         cv.bitwise_or(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
                                         cv.bitwise_and(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
                                         cv.bitwise_xor(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
                                         cv.bitwise_not(npMat1)))

        self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
                                         cv.min(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
                                         cv.max(npMat1, npMat2)))
Example #43
0
def simplest_color_balance(src, percent=1):
    # assert(input.channels() ==3)
    # assert(percent > 0 && percent < 100)

    half_percent = float(percent) / 200.0

    channels = cv2.split(src)
    out = []
    for channel in channels:
        # find the low and high percentile values (based on the input percentile)
        flat = channel.ravel().tolist()
        flat.sort()
        lowval = flat[int(floor(float(len(flat)) * half_percent))]
        highval = flat[int(ceil(float(len(flat)) * (1.0-half_percent)))]

        # saturate below the low percentile and above the high percentile
        # channel = cv2.threshold(channel, highval, -1, cv2.THRESH_TRUNC) # truncate values to max of highval
        # for row in channel:
        #     for c in xrange(len(row)):
        #         if row[c] < lowval
        channel = cv2.max(channel, lowval)
        channel = cv2.min(channel, highval)

        # scale the channel
        channel = cv2.normalize(channel, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
        out.append(channel)

    out = cv2.merge(out)
    return out
Example #44
0
def findSpotsInRed(original,mask,levelNumber,level):

	redChannel = cv2.min(mask,cv2.split(original)[0])
	redChannel,contByLvl,imageList = greyValueSegmentation(redChannel,levelNumber)
	

	# rawContours,hierarchy = cv2.findContours(redChannel.copy(),
	# 	cv2.cv.CV_RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
	# bigCont = []
	# if len(rawContours)>0:
	# 	for cnt in zip(hierarchy[0],rawContours):
	# 		#contornos que (no tengan hijos o tengan hermano izquierdo) y no sean unipuntuales
	# 		#print cnt[0]
	# 		#if (cnt[0][2]<0 or cnt[0][0]>-1) and len(cnt[1])>1:
	# 		if len(cnt[1])>1:
	# 			bigCont.append(cv2.approxPolyDP(cnt[1],3,True))
	#cv2.drawContours(original,bigCont,-1,(0,255,0),2)
	
	redChannel = cv2.merge([np.clip(imageList[level],0,1)*255,]*3)

	#redChannel = cv2.merge([redChannel,]*3)
	
	cv2.drawContours(redChannel,contByLvl[level],-1,(0,255,0),2)
	
	#return original
	return redChannel
Example #45
0
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
def whiteish_areas(img):
    h,s,v = cv2.split(cv2.cvtColor(small, cv2.COLOR_BGR2HSV))
    h_close = hue_close_to(img, 0, 20)
    ret_val, v_high = cv2.threshold(v, 180, 255, cv2.THRESH_BINARY)
    whiteish = cv2.min(h_close, v_high)
    dilated = cv2.dilate(whiteish, kernel=None, iterations=7)
    closed = cv2.erode(dilated, kernel=None, iterations=7)
    return closed
def draw_note(img, note_type, loc, accidental,measure_height):
    """ returns bounding box """
    #For now, just draw a circle
    if note_type == QUARTER:
        templ = cv2.imread('%s/quarter_masked.png'%TEMPLATES_FOLDER,0)
        offset = (20,91)
    elif note_type == HALF:
        templ = cv2.imread('%s/half_masked.png'%TEMPLATES_FOLDER,0)
        offset = (20,91)
    else:
        templ = cv2.imread('%s/whole_masked.png'%TEMPLATES_FOLDER,0)
        offset = (20,98)
    #Rescale, assuming template height is measure height
    scale = measure_height/float(templ.shape[0])
    new_size = (int(templ.shape[1]*scale),int(templ.shape[0]*scale))
    templ_resized = cv2.resize(templ,new_size).astype('uint8')
    off_resized = (offset[0]*scale,offset[1]*scale)
    #Apply the image plus a mask
    tl =     (loc[0]-off_resized[0],
              loc[1]-off_resized[1])
    img[ tl[1]:tl[1]+templ_resized.shape[0],
        tl[0]:tl[0]+templ_resized.shape[1] ] = cv2.min( 
                            img[ tl[1]:tl[1]+templ_resized.shape[0],
                                 tl[0]:tl[0]+templ_resized.shape[1] ],
                            templ_resized )
    bb = BoundingBox(tl,templ_resized.shape[1],templ_resized.shape[0])
    if accidental != NONE:
        if accidental == FLAT:
            atempl = cv2.imread('%s/flat_masked.png'%TEMPLATES_FOLDER,0)
            aoff   = (20,52)
        else:
            atempl = cv2.imread('%s/sharp_masked.png'%TEMPLATES_FOLDER,0)
            aoff   = (20,40)
        atempl_resized = cv2.resize(atempl,(int(scale*atempl.shape[1]),int(scale*atempl.shape[0])) )
        aoff_resized = (aoff[0]*scale,aoff[1]*scale)
        atl =     (loc[0]-off_resized[0] - atempl_resized.shape[1]*0.8,
                   loc[1]-aoff_resized[1])
        img[ atl[1]:atl[1]+atempl_resized.shape[0],
             atl[0]:atl[0]+atempl_resized.shape[1] ] = cv2.min( 
                            img[ atl[1]:atl[1]+atempl_resized.shape[0],
                                 atl[0]:atl[0]+atempl_resized.shape[1] ],
                                 atempl_resized )
        abb = BoundingBox(atl,atempl_resized.shape[1],atempl_resized.shape[0])
    else:
        abb = None
    return bb,abb
Example #48
0
def recolourRGV(src, dst):
    """Simulate conversion from BGR to RGV (red, green, value).
    
    The source and destination images must both be in BGR format.
    
    Blues are desaturated. The effect is similar to Technicolor
    Process 1 (used in early colour movies).
    
    Pseudocode:
    dst.b = min(src.b, src.g, src.r)
    dst.g = src.g
    dst.r = src.r
    
    """
    b, g, r = cv2.split(src)
    cv2.min(b, g, b)
    cv2.min(b, r, b)
    cv2.merge((b, g, r), dst)
Example #49
0
def remove_logo(orig, child):
	print "Removing logo in square"
	
	im_o = cv2.imread(orig)
	im_c = cv2.imread(child)
	#cv2.imwrite(orig+'6.png',(im_o*0.5)+(im_c*0.5))    # 50% transparency
	#cv2.imwrite(orig+'7.png',(im_o*0.35)+(im_c*0.65))  # 65% transparency
	#cv2.imwrite(orig+'8.png',cv2.min(im_o,im_c))       # darken only?? http://gimp-savvy.com/BOOK/index.html?node55.html
	cv2.imwrite(orig,cv2.min(im_o,im_c))
	os.remove(child)
Example #50
0
def recon_l_pyr(pyr):
    nlevs=len(pyr)
    lowpass=np.array(pyr[nlevs-1],dtype=np.uint8)
    for i in range(nlevs-2,-1,-1):
        band=pyr[i]
        lowpass=cv2.pyrUp(lowpass)[:band.shape[0],:band.shape[1],:]
        highpass=cv2.add(np.array(lowpass,dtype=np.int16),band)
        highpass=cv2.min(highpass,np.array([255,255,255]))
        highpass=cv2.max(highpass,np.array([0,0,0]))
        lowpass=np.array(highpass,dtype=np.uint8)
    return lowpass
Example #51
0
 def gera_mapa_caract(self, R, G, B):
     tmp1 = cv2.max(R, G)
     RGBMax = cv2.max(B, tmp1)
     RGBMax[RGBMax <= 0] = 0.0001   
     RGMin = cv2.min(R, G)
     RG = (R - G) / RGBMax
     BY = (B - RGMin) / RGBMax
     RG[RG < 0] = 0
     BY[BY < 0] = 0
     RGFM = self.piram_gauss_CSD(RG)
     BYFM = self.piram_gauss_CSD(BY)
     return RGFM, BYFM
Example #52
0
 def final_fitting(c,edges):
     #use the real edge pixels to fit, not the aproximated contours
     support_mask = np.zeros(edges.shape,edges.dtype)
     cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
     # #draw into the suport mast with thickness 2
     new_edges = cv2.min(edges, support_mask)
     new_contours = cv2.findNonZero(new_edges)
     if self._window and visualize:
         new_edges[new_edges!=0] = 255
         overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
     new_e = cv2.fitEllipse(new_contours)
     return new_e,new_contours
Example #53
0
def refine(sigSquares,img,bigCont,minArea,maxArea,direction):#refDim
	lh,lw=img.shape[0]/len(sigSquares),img.shape[1]/len(sigSquares[0])
	mask = np.zeros((img.shape),np.uint8)
	#refSquares = np.zeros((len(sigSquares)*refDim[1],len(sigSquares[0])*refDim[0]),np.uint8)
	for row in enumerate(sigSquares):
		for col in enumerate(row[1]):
			#if the square value is greater than 0 (i.e there is a contour in this tile)
			#then we get the sigSquares of this particular tile
			if sigSquares[row[0]][col[0]]!=0:
				patch = img[row[0]*lh:(row[0]+1)*lh,col[0]*lw:(col[0]+1)*lw,:]
				patchCont = stapleContThresh(patch,minArea,maxArea,direction)[1]
				patchSquares = findFrame(patchCont,(lw,lh),2,255,5)
				removeNotConnected(patchSquares)
				mask[row[0]*lh:(row[0]+1)*lh,col[0]*lw:(col[0]+1)*lw,:] = cv2.merge([paintSQS(patchSquares,(lw,lh),255),]*3)
	return cv2.min(img,mask)
def paintSQS(sqHist,img):
	totalH,totalW = img.shape[:2]
	rowNum,colNum = sqHist.shape[:2]

	canvas = np.zeros((totalH,totalW),np.uint8)
	lw = totalW/colNum
	lh = totalH/rowNum
	#paint the result
	for row in range(rowNum-1):
		for col in range(colNum-1):
			if sqHist[row][col]!=0:
				plane = np.ones((lh,lw),np.uint8)*255
				canvas[row*lh:(row+1)*lh,col*lw:(col+1)*lw]=plane	
	
	return cv2.merge([cv2.min(canvas,layer) for layer in cv2.split(img)])
def doAndPack(img,dirList,thresh,cannyList,blatList,relevanceThresh,probThresh):
	print '-----------------------------'
	print 'NEW IMAGE'
	print '-----------------------------'
	h, w = 375,450
	
	aux = []
	for channel in cv2.split(img):
		aux.append(cv2.equalizeHist(channel))

	eqImg = cv2.merge(aux)
	

	aux = []
	aux.append(stapleContCanny(eqImg,dirList,cannyList))
	aux.append(stapleContThresh(img,dirList,thresh))
	aux.append(stapleContBlurAT(eqImg,dirList,blatList))

	
	cpImg0,cpImg1,cpImg2 = eqImg.copy(),img.copy(),eqImg.copy()

	percent = 0.01
	line = int(min(img.shape[0]*percent,img.shape[1]*percent))

	cv2.drawContours(cpImg0, aux[0], -1, (0,0,255),line)
	cv2.drawContours(cpImg1, aux[1], -1, (0,0,255),line)
	cv2.drawContours(cpImg2, aux[2], -1, (0,0,255),line)


	img0 = getSigSquares([aux[0],],img.shape,(10,10),0)[0]
	img1 = getSigSquares([aux[1],],img.shape,(10,10),0)[0]
	img2 = getSigSquares([aux[2],],img.shape,(10,10),0)[0]

	intersecImg = cv2.min(img0,cv2.min(img1,img2))
	intersecImg = cv2.merge([cv2.min(intersecImg,layer) for layer in cv2.split(img)])

	img0 = cv2.merge([cv2.min(img0,layer) for layer in cv2.split(cpImg0)])
	img1 = cv2.merge([cv2.min(img1,layer) for layer in cv2.split(cpImg1)])
	img2 = cv2.merge([cv2.min(img2,layer) for layer in cv2.split(cpImg2)])

	background = np.zeros((h*2,w*3,3),np.uint8)
	background[0:h,0:w,0:3]=cv2.resize(img,(w,h))
	background[0:h,w:2*w,0:3]=cv2.resize(eqImg,(w,h))
	background[0:h,2*w:3*w,0:3]=cv2.resize(intersecImg,(w,h))

	# background[0:h,0:w,0:3]=cv2.resize(cpImg0,(w,h))
	# background[0:h,w:2*w,0:3]=cv2.resize(cpImg1,(w,h))
	# background[0:h,2*w:3*w,0:3]=cv2.resize(cpImg2,(w,h))

	background[h:2*h,0:w,0:3]=cv2.resize(img0,(w,h))
	background[h:2*h,w:2*w,0:3]=cv2.resize(img1,(w,h))
	background[h:2*h,2*w:3*w,0:3]=cv2.resize(img2,(w,h))
	return background
Example #56
0
def findYellowWalls(img, hsv, hue, sat, val, debug):
	displayImage("hue", hue)
	displayImage("sat", sat)
	#generate mask based on hue
	yellowMinHue = 15
	yellowMaxHue = 35
	hueMask = cv2.inRange(hue, numpy.array([yellowMinHue]), numpy.array([yellowMaxHue]))
	displayImage("hue mask", hueMask)
	yellowMinSat = 100
	yellowMaxSat = 255
	satMask = cv2.inRange(sat, numpy.array([yellowMinSat]), numpy.array([yellowMaxSat]))
	displayImage("sat mask", satMask)
	mask = cv2.min(hueMask, satMask) # AND of both masks
	displayImage("mask", mask)

	# find contours with at least [const] area 
	minArea = 400 # determined empirically

	contours, heirarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
	nextLargest = None
	nextArea = 0
	index = 0
	walls = []
	for contour in contours:
		area = cv2.contourArea(contour)
		if area > minArea:
			(x,y, width, height) = cv2.boundingRect(contour)
			angle = 78 * (x+width/2 - img.shape[0]/2) / img.shape[0]
			walls.append( (angle, x, y, width, height, area, contour) )
			if debug:
				print "area = {0}\tangle={1}".format(area, angle)
				cv2.rectangle(img, (x,y), (x+width,y+height), (255,0,0), 3)
		elif area>nextArea:
			nextArea = area
			nextLargest = index
		index += 1
	if nextLargest != None:
		print "next largest: area = {0}".format(nextArea)
		(x,y, width, height) = cv2.boundingRect(contours[nextLargest])
		cv2.rectangle(img, (x,y), (x+width,y+height), (0,255,0), 3)


	displayImage("img", img)

	return walls
	def findYellowWalls(self):
		#generate mask based on hue
		self.hueMask = cv2.inRange(self.hueImg, numpy.array([self.yellowMinHue]), numpy.array([self.yellowMaxHue]), self.hueMask)
		self.satMask = cv2.inRange(self.satImg, numpy.array([self.yellowMinSat]), numpy.array([self.yellowMaxSat]), self.satMask)
		self.mask = cv2.min(self.hueMask, self.satMask, self.mask) # AND of both masks

		# find contours with at least [const] area 

		contours, heirarchy = cv2.findContours(self.mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
		walls = []
		for contour in contours:
			area = cv2.contourArea(contour)
			if area > self.minWallArea:
				(x,y, width, height) = cv2.boundingRect(contour)
				angle = 78 * (x+width/2 - self.hueImg.shape[0]/2) / self.hueImg.shape[0]
				walls.append( wall.Wall(angle, x, y, width, height, area) )

		return walls
Example #58
0
 def CFMGetFM(self, R, G, B):
     # max(R,G,B)
     tmp1 = cv2.max(R, G)
     RGBMax = cv2.max(B, tmp1)
     RGBMax[RGBMax <= 0] = 0.0001    # prevent dividing by 0
     # min(R,G)
     RGMin = cv2.min(R, G)
     # RG = (R-G)/max(R,G,B)
     RG = (R - G) / RGBMax
     # BY = (B-min(R,G)/max(R,G,B)
     BY = (B - RGMin) / RGBMax
     # clamp nagative values to 0
     RG[RG < 0] = 0
     BY[BY < 0] = 0
     # obtain feature maps in the same way as intensity
     RGFM = self.FMGaussianPyrCSD(RG)
     BYFM = self.FMGaussianPyrCSD(BY)
     # return
     return RGFM, BYFM
Example #59
0
    def detect(self,frame,user_roi,visualize=False):
        u_r = user_roi
        if self.window_should_open:
            self.open_window((frame.img.shape[1],frame.img.shape[0]))
        if self.window_should_close:
            self.close_window()

        if self._window:
            debug_img = np.zeros(frame.img.shape,frame.img.dtype)


        #get the user_roi
        img = frame.img
        r_img = img[u_r.view]
        #        bias_field = preproc.EstimateBias(r_img)
        # r_img = preproc.Unbias(r_img, bias_field)
        r_img = preproc.GaussBlur(r_img)
        r_img = preproc.RobustRescale(r_img)
        frame.img[u_r.view] = r_img
        gray_img = cv2.cvtColor(r_img,cv2.COLOR_BGR2GRAY)


        # coarse pupil detection

        if self.coarse_detection.value:
            integral = cv2.integral(gray_img)
            integral =  np.array(integral,dtype=c_float)
            x,y,w,response = eye_filter(integral,self.coarse_filter_min,self.coarse_filter_max)
            p_r = Roi(gray_img.shape)
            if w>0:
                p_r.set((y,x,y+w,x+w))
            else:
                p_r.set((0,0,-1,-1))
        else:
            p_r = Roi(gray_img.shape)
            p_r.set((0,0,None,None))
            w = img.shape[0]/2

        coarse_pupil_width = w/2.
        padding = coarse_pupil_width/4.
        pupil_img = gray_img[p_r.view]



        # binary thresholding of pupil dark areas
        hist = cv2.calcHist([pupil_img],[0],None,[256],[0,256]) #(images, channels, mask, histSize, ranges[, hist[, accumulate]])
        bins = np.arange(hist.shape[0])
        spikes = bins[hist[:,0]>40] # every intensity seen in more than 40 pixels
        if spikes.shape[0] >0:
            lowest_spike = spikes.min()
            highest_spike = spikes.max()
        else:
            lowest_spike = 200
            highest_spike = 255

        offset = self.intensity_range.value
        spectral_offset = 5
        if visualize:
            # display the histogram
            sx,sy = 100,1
            colors = ((0,0,255),(255,0,0),(255,255,0),(255,255,255))
            h,w,chan = img.shape
            hist *= 1./hist.max()  # normalize for display

            for i,h in zip(bins,hist[:,0]):
                c = colors[1]
                cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)
            cv2.line(img,(w,int(lowest_spike*sy)),(int(w-.5*sx),int(lowest_spike*sy)),colors[0])
            cv2.line(img,(w,int((lowest_spike+offset)*sy)),(int(w-.5*sx),int((lowest_spike+offset)*sy)),colors[2])
            cv2.line(img,(w,int((highest_spike)*sy)),(int(w-.5*sx),int((highest_spike)*sy)),colors[0])
            cv2.line(img,(w,int((highest_spike- spectral_offset )*sy)),(int(w-.5*sx),int((highest_spike - spectral_offset)*sy)),colors[3])

        # create dark and spectral glint masks
        self.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img,image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        cv2.dilate(binary_img, kernel,binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=highest_spike - spectral_offset)
        cv2.erode(spec_mask, kernel,spec_mask, iterations=1)

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))

        #open operation to remove eye lashes
        pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, kernel)

        if self.blur > 1:
            pupil_img = cv2.medianBlur(pupil_img,self.blur.value)

        edges = cv2.Canny(pupil_img,
                            self.canny_thresh,
                            self.canny_thresh*self.canny_ratio,
                            apertureSize= self.canny_aperture)


        # remove edges in areas not dark enough and where the glint is (spectral refelction from IR leds)
        edges = cv2.min(edges, spec_mask)
        edges = cv2.min(edges,binary_img)

        overlay =  img[u_r.view][p_r.view]
        if visualize:
            b,g,r = overlay[:,:,0],overlay[:,:,1],overlay[:,:,2]
            g[:] = cv2.max(g,edges)
            b[:] = cv2.max(b,binary_img)
            b[:] = cv2.min(b,spec_mask)

            # draw a frame around the automatic pupil ROI in overlay.
            overlay[::2,0] = 255 #yeay numpy broadcasting
            overlay[::2,-1]= 255
            overlay[0,::2] = 255
            overlay[-1,::2]= 255
            # draw a frame around the area we require the pupil center to be.
            overlay[padding:-padding:4,padding] = 255
            overlay[padding:-padding:4,-padding]= 255
            overlay[padding,padding:-padding:4] = 255
            overlay[-padding,padding:-padding:4]= 255

        if visualize:
            c = (100.,frame.img.shape[0]-100.)
            e_max = ((c),(self.pupil_max.value,self.pupil_max.value),0)
            e_recent = ((c),(self.target_size.value,self.target_size.value),0)
            e_min = ((c),(self.pupil_min.value,self.pupil_min.value),0)
            cv2.ellipse(frame.img,e_min,(0,0,255),1)
            cv2.ellipse(frame.img,e_recent,(0,255,0),1)
            cv2.ellipse(frame.img,e_max,(0,0,255),1)

        #get raw edge pix for later
        raw_edges = cv2.findNonZero(edges)

        def ellipse_true_support(e,raw_edges):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            distances = dist_pts_ellipse(e,raw_edges)
            support_pixels = raw_edges[distances<=1.3]
            # support_ratio = support_pixel.shape[0]/ellipse_circumference
            return support_pixels,ellipse_circumference

        # if we had a good ellipse before ,let see if it is still a good first guess:
        if self.strong_prior:
            e = p_r.sub_vector(u_r.sub_vector(self.strong_prior[0])),self.strong_prior[1],self.strong_prior[2]

            self.strong_prior = None
            if raw_edges is not None:
                support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
                support_ratio =  support_pixels.shape[0]/ellipse_circumference
                if support_ratio >= self.strong_perimeter_ratio_range[0]:
                    refit_e = cv2.fitEllipse(support_pixels)
                    if self._window:
                        cv2.ellipse(debug_img,e,(255,100,100),thickness=4)
                        cv2.ellipse(debug_img,refit_e,(0,0,255),thickness=1)
                    e = refit_e
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    goodness = min(1.,support_ratio)
                    pupil_ellipse = {}
                    pupil_ellipse['confidence'] = goodness
                    pupil_ellipse['ellipse'] = e
                    pupil_ellipse['roi_center'] = e[0]
                    pupil_ellipse['major'] = max(e[1])
                    pupil_ellipse['minor'] = min(e[1])
                    pupil_ellipse['apparent_pupil_size'] = max(e[1])
                    pupil_ellipse['axes'] = e[1]
                    pupil_ellipse['angle'] = e[2]
                    e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
                    norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
                    pupil_ellipse['norm_pupil'] = norm_center
                    pupil_ellipse['center'] = e_img_center
                    pupil_ellipse['timestamp'] = frame.timestamp

                    self.target_size.value = max(e[1])

                    self.confidence.value = goodness
                    self.confidence_hist.append(goodness)
                    self.confidence_hist[:-200]=[]
                    if self._window:
                        #draw a little animation of confidence
                        cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
                        cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
                        self.gl_display_in_window(debug_img)
                    return pupil_ellipse





        # from edges to contours
        contours, hierarchy = cv2.findContours(edges,
                                            mode=cv2.RETR_LIST,
                                            method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
        # contours is a list containing array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) )

        ### first we want to filter out the bad stuff
        # to short
        good_contours = [c for c in contours if c.shape[0]>self.min_contour_size.value]
        # now we learn things about each contour through looking at the curvature.
        # For this we need to simplyfy the contour so that pt to pt angles become more meaningfull
        aprox_contours = [cv2.approxPolyDP(c,epsilon=1.5,closed=False) for c in good_contours]

        if self._window:
            x_shift = coarse_pupil_width*2
            color = zip(range(0,250,15),range(0,255,15)[::-1],range(230,250))
        split_contours = []
        for c in aprox_contours:
            curvature = GetAnglesPolyline(c)
            # we split whenever there is a real kink (abs(curvature)<right angle) or a change in the genreal direction
            kink_idx = find_kink_and_dir_change(curvature,80)
            segs = split_at_corner_index(c,kink_idx)

            #TODO: split at shart inward turns
            for s in segs:
                if s.shape[0]>2:
                    split_contours.append(s)
                    if self._window:
                        c = color.pop(0)
                        color.append(c)
                        s = s.copy()
                        s[:,:,0] += debug_img.shape[1]-coarse_pupil_width*2
                        # s[:,:,0] += x_shift
                        # x_shift += 5
                        cv2.polylines(debug_img,[s],isClosed=False,color=map(lambda x: x,c),thickness = 1,lineType=4)#cv2.CV_AA

        split_contours.sort(key=lambda x:-x.shape[0])
        # print [x.shape[0]for x in split_contours]
        if len(split_contours) == 0:
            # not a single usefull segment found -> no pupil found
            self.confidence.value = 0
            self.confidence_hist.append(0)
            if self._window:
                self.gl_display_in_window(debug_img)
            return {'timestamp':frame.timestamp,'norm_pupil':None}


        # removing stubs makes combinatorial search feasable
        split_contours = [c for c in split_contours if c.shape[0]>3]

        def ellipse_filter(e):
            in_center = padding < e[0][1] < pupil_img.shape[0]-padding and padding < e[0][0] < pupil_img.shape[1]-padding
            if in_center:
                is_round = min(e[1])/max(e[1]) >= self.min_ratio
                if is_round:
                    right_size = self.pupil_min.value <= max(e[1]) <= self.pupil_max.value
                    if right_size:
                        return True
            return False

        def ellipse_on_blue(e):
            center_on_dark = binary_img[e[0][1],e[0][0]]
            return bool(center_on_dark)

        def ellipse_support_ratio(e,contours):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_area =  np.pi*a*b
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            actual_area = cv2.contourArea(cv2.convexHull(np.concatenate(contours)))
            actual_contour_length = sum([cv2.arcLength(c,closed=False) for c in contours])
            area_ratio = actual_area / ellipse_area
            perimeter_ratio = actual_contour_length / ellipse_circumference #we assume here that the contour lies close to the ellipse boundary
            return perimeter_ratio,area_ratio


        def final_fitting(c,edges):
            #use the real edge pixels to fit, not the aproximated contours
            support_mask = np.zeros(edges.shape,edges.dtype)
            cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
            # #draw into the suport mast with thickness 2
            new_edges = cv2.min(edges, support_mask)
            new_contours = cv2.findNonZero(new_edges)
            if self._window:
                new_edges[new_edges!=0] = 255
                overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
            new_e = cv2.fitEllipse(new_contours)
            return new_e,new_contours


        # finding poential candidates for ellipse seeds that describe the pupil.
        strong_seed_contours = []
        weak_seed_contours = []
        for idx, c in enumerate(split_contours):
            if c.shape[0] >=5:
                e = cv2.fitEllipse(c)
                # is this ellipse a plausible canditate for a pupil?
                if ellipse_filter(e):
                    distances = dist_pts_ellipse(e,c)
                    fit_variance = np.sum(distances**2)/float(distances.shape[0])
                    if fit_variance <= self.inital_ellipse_fit_threshhold:
                        # how much ellipse is supported by this contour?
                        perimeter_ratio,area_ratio = ellipse_support_ratio(e,[c])
                        # logger.debug('Ellipse no %s with perimeter_ratio: %s , area_ratio: %s'%(idx,perimeter_ratio,area_ratio))
                        if self.strong_perimeter_ratio_range[0]<= perimeter_ratio <= self.strong_perimeter_ratio_range[1] and self.strong_area_ratio_range[0]<= area_ratio <= self.strong_area_ratio_range[1]:
                            strong_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,100,100),thickness=4)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,100,100),thickness=3)
                        else:
                            weak_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,0,0),thickness=2)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,0,0))

        sc = np.array(split_contours)


        if strong_seed_contours:
            seed_idx = strong_seed_contours
        elif weak_seed_contours:
            seed_idx = weak_seed_contours

        if not (strong_seed_contours or weak_seed_contours):
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        # if self._window:
        #     cv2.polylines(debug_img,[split_contours[i] for i in seed_idx],isClosed=False,color=(255,255,100),thickness=3)

        def ellipse_eval(contours):
            c = np.concatenate(contours)
            e = cv2.fitEllipse(c)
            d = dist_pts_ellipse(e,c)
            fit_variance = np.sum(d**2)/float(d.shape[0])
            return fit_variance <= self.inital_ellipse_fit_threshhold


        solutions = pruning_quick_combine(split_contours,ellipse_eval,seed_idx,max_evals=1000,max_depth=5)
        solutions = filter_subsets(solutions)
        ratings = []


        for s in solutions:
            e = cv2.fitEllipse(np.concatenate(sc[s]))
            if self._window:
                cv2.ellipse(debug_img,e,(0,150,100))
            support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
            support_ratio =  support_pixels.shape[0]/ellipse_circumference
            # TODO: refine the selection of final canditate
            if support_ratio >=self.final_perimeter_ratio_range[0] and ellipse_filter(e):
                ratings.append(support_pixels.shape[0])
                if support_ratio >=self.strong_perimeter_ratio_range[0]:
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    if self._window:
                        cv2.ellipse(debug_img,e,(0,255,255),thickness = 2)
            else:
                #not a valid solution, bad rating
                ratings.append(-1)


        # selected ellipse
        if max(ratings) == -1:
            #no good final ellipse found
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        best = solutions[ratings.index(max(ratings))]
        e = cv2.fitEllipse(np.concatenate(sc[best]))

        #final calculation of goodness of fit
        support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
        support_ratio =  support_pixels.shape[0]/ellipse_circumference
        goodness = min(1.,support_ratio)

        #final fitting and return of result
        new_e,final_edges = final_fitting(sc[best],edges)
        size_dif = abs(1 - max(e[1])/max(new_e[1]))
        if ellipse_filter(new_e) and size_dif < .3:
            if self._window:
                cv2.ellipse(debug_img,new_e,(0,255,0))
            e = new_e


        pupil_ellipse = {}
        pupil_ellipse['confidence'] = goodness
        pupil_ellipse['ellipse'] = e
        pupil_ellipse['pos_in_roi'] = e[0]
        pupil_ellipse['major'] = max(e[1])
        pupil_ellipse['apparent_pupil_size'] = max(e[1])
        pupil_ellipse['minor'] = min(e[1])
        pupil_ellipse['axes'] = e[1]
        pupil_ellipse['angle'] = e[2]
        e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
        norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
        pupil_ellipse['norm_pupil'] = norm_center
        pupil_ellipse['center'] = e_img_center
        pupil_ellipse['timestamp'] = frame.timestamp

        self.target_size.value = max(e[1])

        self.confidence.value = goodness
        self.confidence_hist.append(goodness)
        self.confidence_hist[:-200]=[]
        if self._window:
            #draw a little animation of confidence
            cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
            cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
            self.gl_display_in_window(debug_img)
        return pupil_ellipse
Example #60
0
    rect = tuple([int(x) for x in sys.argv[2:]])
    #foto.jpg 700 300 400 500
im = cv2.imread(filename)

h,w = im.shape[:2]

mask = np.zeros((h,w),dtype='uint8')
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))

cv2.grabCut(im,mask,rect,tmp1,tmp2,3,mode=cv2.GC_INIT_WITH_RECT)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(mask)
#print maxVal, maxLoc
#print mask.dtype
flag, mask = cv2.threshold(mask, maxVal-1, 255, cv2.cv.CV_THRESH_BINARY)
cv2.imshow("mask", mask)

cv2.rectangle(im,(rect[0],rect[1]),(rect[0]+rect[2],rect[1]+rect[3]),color=255)
cv2.imshow("img+rect",im)


[b,g,r] = cv2.split(im)
cv2.imshow("result", cv2.merge([cv2.min(x,mask) for x in [b,g,r]]))

"""plt.figure()
plt.imshow(mask)
plt.colorbar()
plt.show()
"""
cv2.waitKey(0)