def distFromColor(self, distThreshold, hue, sat, val, hueWeight=1, satWeight=1, valWeight=1):
		if self.distImg  == None or self.distImg.shape != self.hueImg.shape:
			self.distImg = numpy.zeros(self.hueImg.shape, numpy.uint8)
		if self.maskedDistImg == None or self.maskedDistImg.shape!=self.hueImg.shape:
			self.maskedDistImg = numpy.zeros(self.hueImg.shape, numpy.uint8)
		if self.binImg == None or self.binImg.shape!=self.hueImg.shape:
			self.binImg = numpy.zeros(self.hueImg.shape, numpy.uint8)
		#distance = weighted average of dhue**2, dsat**2, dval**2
		scale = 1.0/(hueWeight + satWeight + valWeight)
		hueWeight *= scale
		satWeight *= scale
		valWeight *= scale
		hi = self.hueImg.item
		si = self.satImg.item
		vi = self.valImg.item
		mi = self.mask.item
		di = self.distImg.itemset
		for row in xrange(0,self.distImg.shape[0]):
			for col in xrange(0,self.distImg.shape[1]):
				if mi(row, col) != 0:
					di((row, col), 255)
				else:
					h = hi(row, col) - hue
					s = si(row, col) - sat
					v = vi(row, col) - val
					if h < -90:
						h+=180
					elif h > 90:
						h-=180
					di((row, col), math.sqrt(h*h*hueWeight + s*s*satWeight + v*v*valWeight))
		cv2.max(self.distImg, self.mask, self.maskedDistImg)
		cv2.threshold(self.maskedDistImg, self.distThreshold, 255, cv2.THRESH_BINARY_INV, self.binImg) 
def getEdgeMap2x2(image):
    '''detect edges with HED, by resizing image to 960x640 and
    run hed on 4 seperate 480x320 windows and then stitch everything back to 960x640 '''

    w, h = 960, 640
    image_resized = cv2.resize(image, (960, 640))

    pad = 32
    img00 = image_resized[pad:320 + pad, pad:480 + pad]
    img01 = image_resized[pad:320 + pad, 480 - pad:960 - pad]
    img10 = image_resized[320 - pad:640 - pad, pad:480 + pad]
    img11 = image_resized[320 - pad:640 - pad, 480 - pad:960 - pad]

    edgemap00 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8)
    edgemap01 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8)
    edgemap10 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8)
    edgemap11 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8)

    ww, hh = 480, 320

    edgemap00[0:hh, 0:ww] = cv2.copyTo(getEdgeMap(img00), mask=None)
    edgemap01[0:hh, ww - 2 * pad:] = cv2.copyTo(getEdgeMap(img01), mask=None)
    edgemap10[hh - 2 * pad:, 0:ww] = cv2.copyTo(getEdgeMap(img10), mask=None)
    edgemap11[hh - 2 * pad:, ww - 2 * pad:] = cv2.copyTo(getEdgeMap(img11),
                                                         mask=None)

    e0 = cv2.max(edgemap00, edgemap01)
    e1 = cv2.max(edgemap10, edgemap11)
    edgemap = cv2.max(e0, e1)

    edgemap_resized = cv2.resize(edgemap, (960, 640))
    return edgemap_resized
def peer(img):
    b, g, r = cv2.split(img)
    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 30, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)
    mmax = cv2.max(r, cv2.max(g, b))
    mmin = cv2.min(r, cv2.min(g, b))

    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GE)
    m7 = cv2.compare(r, b, cv2.CMP_GE)
    mask = m1 & m2 & m3 & m6 & m4 & m5 & m7
    cv2.imshow("b", b)
    cv2.imshow("g", g)
    cv2.imshow("r", r)
    cv2.imshow('r_thre', m1)
    cv2.imshow('g_thre',m2)
    cv2.imshow('b_thre',m3)
    cv2.imshow('max-min',m4)
    cv2.imshow('absdiff',m5)
    cv2.imshow('r_g',m6)
    cv2.imshow('r_b',m7)
    cv2.imshow('res',mask)
    return mask
Beispiel #4
0
def manga_filter(src):
    hoge = src
    color = src[:, :, 0:3]
    alpha = src[:, :, 3]
    inv_alpha = 255 - alpha

    # グレースケール変換
    gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)

    edge = cv2.adaptiveThreshold(cv2.max(gray, inv_alpha), 255,
                                 cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 3, 10)
    erode_edge = cv2.erode(edge, np.ones((1, 1), np.uint8))

    inv_alpha = 255 - cv2.erode(alpha, np.ones((1, 1), np.uint8))  # アルファの境を広げる
    no_edge = cv2.inpaint(color, cv2.max(255 - erode_edge, inv_alpha), 11,
                          cv2.INPAINT_NS)

    color_edge = color.copy()
    color_edge[edge > 128] = 0

    rgb = cv2.split(no_edge)
    result = cv2.merge([cv2.min(c, alpha) for c in rgb])

    result2 = blend(result, color_edge, 255 - edge)

    new_edge = extract_edge(gray)

    return {
        "orig_color": color,
        "no_edge": no_edge,
        "alpha": alpha,
        "edge_color": color_edge,
        "edge_alpha": new_edge,
    }
def peer_cost(img):
    b,g,r = cv2.split(img)

    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 40, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)

    mmax = cv2.max(r, cv2.max(g, b))
    mmin = 255
    for tmp in b[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    for tmp in g[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    for tmp in r[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    print mmin
    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GT)
    m7 = cv2.compare(r, b, cv2.CMP_GE)

    mask = m1 & m2 & m3 & m4 & m5 & m6 & m7

    return mask
def clearImage(frame):

    channels = cv.split(frame)
    # Get the maximum value of each channel
    # and get the dark channel of each image
    # record the maximum value of each channel
    a_max_dst = [float("-inf")] * len(channels)
    for idx in range(len(channels)):
        a_max_dst[idx] = channels[idx].max()

    dark_image = cv.min(channels[0], cv.min(channels[1], channels[2]))

    # Gaussian filtering the dark channel
    dark_image = cv.GaussianBlur(dark_image, (25, 25), 0)

    image_t = (255.0 - 0.95 * dark_image) / 255.0
    image_t = cv.max(image_t, 0.5)

    # Calculate t(x) and get the clear image
    for idx in range(len(channels)):
        channels[idx] = (cv.max(
            cv.add(
                cv.subtract(channels[idx].astype(np.float32),
                            int(a_max_dst[idx])) / image_t,
                int(a_max_dst[idx]),
            ),
            0.0,
        ) / int(a_max_dst[idx]) * 255)
        channels[idx] = channels[idx].astype(np.uint8)

    return cv.merge(channels)
def calc_manual_tolerance_mask(input_rgb, visualize_rgb, tolerance,
                               sample_step):
    input_h, input_w = input_rgb.shape[:2]

    combined_mask = unpad_mask(create_blank_mask(input_rgb))

    for x, y in generate_border_coords(input_rgb, sample_step):
        (x, y, w,
         h), background_mask = calc_flood_area(input_rgb, visualize_rgb, x, y,
                                               tolerance)

        if x == 0 or x == input_w - 1:
            if w == input_w:
                combined_mask = cv2.max(combined_mask, background_mask)

        if y == 0 or y == input_h - 1:
            if h == input_h:
                combined_mask = cv2.max(combined_mask, background_mask)

        # debug_show_img(background_mask)
        # debug_show_img(combined_mask)

    # visualize_rgb[:] = mask_to_rgb(best_background_mask)
    visualize_rgb[:] = mask_to_rgb(combined_mask)
    visualize_rgb[:, 0, :] = 0
    # x, y, w, h = best_box
    cv2.rectangle(visualize_rgb, (x, y), (x + w, y + h),
                  color=(255, 255, 0),
                  thickness=3)

    return combined_mask
   def make_long_frames(self):
      # Start the image producer
      self.imageQueue = queue.Queue(maxsize=2)
      self.imageProducer = ImageProducer("Producer", self.imageQueue)
      self.imageProducer.start()
      self.max_index = 0

      try:
         self.max_image = self.imageQueue.get(timeout = QUEUE_TIMEOUT)
         while True:
            # Get the next image from the queue and update the max
            self.im = self.imageQueue.get(timeout = QUEUE_TIMEOUT)
            cv2.max(self.im, self.max_image, self.max_image)

            self.max_index += 1

            # Write a long image
            if self.max_index >= NUM_LONG_MAX_IMAGES:
               self.max_index = 0
               cv2.imwrite(("long-" + self.imageProducer.get_currentimagefilename()), self.max_image)
               self.max_image.fill(0)

      except:
         cv2.imwrite(("long-" + self.imageProducer.get_currentimagefilename()), self.max_image)
         print("Exception checking file ", sys.exc_info()[0])
         self.finish()
Beispiel #9
0
   def make_long_frames(self):
      # Start the image producer
      self.imageQueue = queue.Queue(maxsize=2)
      self.imageProducer = ImageProducer("Producer", self.imageQueue)
      self.imageProducer.start()
      self.max_index = 0

      try:
         self.max_image = self.imageQueue.get(timeout = QUEUE_TIMEOUT)
         while True:
            # Get the next image from the queue and update the max
            self.im = self.imageQueue.get(timeout = QUEUE_TIMEOUT)
            cv2.max(self.im, self.max_image, self.max_image)

            self.max_index += 1

            # Write a long image
            if self.max_index >= NUM_LONG_MAX_IMAGES:
               self.max_index = 0
               cv2.imwrite(("long-" + self.imageProducer.get_currentimagefilename()), self.max_image)
               self.max_image.fill(0)

      except:
         cv2.imwrite(("long-" + self.imageProducer.get_currentimagefilename()), self.max_image)
         print("Exception checking file ", sys.exc_info()[0])
         self.finish()
Beispiel #10
0
def distFromColor(distImg, maskedDistImg, mask, binImg, distThreshold, hue, sat, val, colorHue, colorSat, colorVal, hueWeight=1, satWeight=1, valWeight=1):
	if distImg  == None or distImg.shape != hue.shape:
		distImg = numpy.zeros(hue.shape, numpy.uint8)
	if maskedDistImg == None or maskedDistImg.shape!=hue.shape:
		maskedDistImg = numpy.zeros(hue.shape, numpy.uint8)
	if binImg == None or binImg.shape!=hue.shape:
		binImg = numpy.zeros(hue.shape, numpy.uint8)
	#distance = weighted average of dhue**2, dsat**2, dval**2
	scale = 1.0/(hueWeight + satWeight + valWeight)
	hueWeight *= scale
	satWeight *= scale
	valWeight *= scale
	hi = hue.item
	si = sat.item
	vi = val.item
	mi = mask.item
	di = distImg.itemset
	for row in xrange(0,distImg.shape[0]):
		for col in xrange(0,distImg.shape[1]):
			if mi(row, col) != 0:
				di((row, col), 255)
			else:
				h = hi(row, col) - colorHue
				s = si(row, col) - colorSat
				v = vi(row, col) - colorVal
				if h < -90:
					h+=180
				elif h > 90:
					h-=180
				di((row, col), math.sqrt(h*h*hueWeight + s*s*satWeight + v*v*valWeight))
	cv2.max(distImg, mask, maskedDistImg)
	cv2.threshold(maskedDistImg, distThreshold, 255, cv2.THRESH_BINARY_INV, binImg) 
	return distImg, maskedDistImg, binImg
Beispiel #11
0
def multiply_3x3_mat(src, mat):
    """RGBの各ピクセルに対して3x3の行列演算を行う"""

    # 正規化用の係数を調査
    normalize_val = (2**(8 * src.itemsize)) - 1

    # 0 .. 1 に正規化して RGB分離
    b, g, r = np.dsplit(src / normalize_val, 3)

    # 行列計算
    ret_r = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
    ret_g = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
    ret_b = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

    # オーバーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.min(ret_r, 1.0)
    ret_g = cv2.min(ret_g, 1.0)
    ret_b = cv2.min(ret_b, 1.0)

    # アンダーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.max(ret_r, 0.0)
    ret_g = cv2.max(ret_g, 0.0)
    ret_b = cv2.max(ret_b, 0.0)

    # RGB結合
    ret_mat = np.dstack((ret_b, ret_g, ret_r))

    # 0 .. 255 に正規化
    ret_mat *= normalize_val

    return np.uint8(ret_mat)
Beispiel #12
0
def multiply_3x3_mat(src, mat):
    """RGBの各ピクセルに対して3x3の行列演算を行う"""

    # 正規化用の係数を調査
    normalize_val = (2 ** (8 * src.itemsize)) - 1

    # 0 .. 1 に正規化して RGB分離
    b, g, r = np.dsplit(src / normalize_val, 3)

    # 行列計算
    ret_r = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
    ret_g = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
    ret_b = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

    # オーバーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.min(ret_r, 1.0)
    ret_g = cv2.min(ret_g, 1.0)
    ret_b = cv2.min(ret_b, 1.0)

    # アンダーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.max(ret_r, 0.0)
    ret_g = cv2.max(ret_g, 0.0)
    ret_b = cv2.max(ret_b, 0.0)

    # RGB結合
    ret_mat = np.dstack( (ret_b, ret_g, ret_r) )

    # 0 .. 255 に正規化
    ret_mat *= normalize_val

    return np.uint8(ret_mat)
def clearImage(image):
    # Convert the image from BGR to gray
    dark_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    channels = cv2.split(image)

    # Get the maximum value of each channel
    # and get the dark channel of each image
    # record the maximum value of each channel
    a_max_dst = [float("-inf")] * len(channels)
    for idx in xrange(len(channels)):
        a_max_dst[idx] = channels[idx].max()

    dark_image = cv2.min(channels[0], cv2.min(channels[1], channels[2]))

    # Gaussian filtering the dark channel
    dark_image = cv2.GaussianBlur(dark_image, (25, 25), 0)

    image_t = (255. - 0.95 * dark_image) / 255.
    image_t = cv2.max(image_t, 0.5)

    # Calculate t(x) and get the clear image
    for idx in xrange(len(channels)):
        channels[idx] = cv2.max(
            cv2.add(
                cv2.subtract(channels[idx].astype(np.float32),
                             int(a_max_dst[idx])) / image_t,
                int(a_max_dst[idx])), 0.0) / int(a_max_dst[idx]) * 255
        channels[idx] = channels[idx].astype(np.uint8)

    return cv2.merge(channels)
def BGR_2_HSV_(img):

    h = np.zeros((img.shape[0], img.shape[1]), np.float32)
    s = np.zeros((img.shape[0], img.shape[1]), np.float32)
    v = np.zeros((img.shape[0], img.shape[1]), np.float32)

    img_r = img.copy()
    b = img[:, :, 0]
    g = img[:, :, 1]
    r = img[:, :, 2]
    max_v = cv.max(cv.max(b, g), r)
    min_v = cv.min(cv.min(b, g), r)
    delta = max_v - min_v
    v = max_v
    zero_m = (max_v == 0.)
    zero_m = zero_m.astype(np.float32)
    nonzeor_m = (max_v != 0.)
    nonzeor_m = nonzeor_m.astype(np.float32)

    exp_m = zero_m * 10e-8

    s = delta / (max_v + exp_m)
    s = s * nonzeor_m

    rmax = (r == max_v)
    rmax = rmax.astype(np.float32)

    gmax = (g == max_v)
    gr = (g != r)
    gmax = gmax.astype(np.float32)
    gr = gr.astype(np.float32)
    gmax = gmax * gr

    bmax = (b == max_v)
    br = (b != r)
    bg = (b != g)
    bmax = bmax.astype(np.float32)
    br = br.astype(np.float32)
    bg = bg.astype(np.float32)
    bmax = bmax * br * bg

    h += ((g - b) / (delta + 10e-8)) * rmax
    h += (((b - r) / (delta + 10e-8)) + 2.) * gmax
    h += (((r - g) / (delta + 10e-8)) + 4.) * bmax

    h = h * (np.pi / 3.)

    neg_m = (h < 0.0)
    neg_m = neg_m.astype(np.float32)
    h += neg_m * (2. * np.pi)

    h = h / (2. * np.pi)

    img_r[:, :, 0] = h
    img_r[:, :, 1] = s
    img_r[:, :, 2] = v

    return img_r
Beispiel #15
0
def split_chan(im, sz):
    b, g, r = cv2.split(im)
    bright = cv2.max(cv2.max(r, g), b)
    # dark = cv2.min(cv2.min(r, g), b)
    dc = cv2.min(cv2.min(r, g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    dark = cv2.erode(dc, kernel)
    cv2.imwrite('dark.png', dark)
    return dark, bright, r, g, b
Beispiel #16
0
def recolor_cmv(src, dst):
    """Simulate conversion from BGR to RGV (cyan, magenta, value).

       (b, g, r) -> ((max(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)  # b = min(g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
Beispiel #17
0
def computaFiltroCMV(img):
    largura, altura, canais = img.shape
    dstImg = np.zeros((largura, altura, canais), np.uint8)
    b, g, r = cv2.split(img)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dstImg)

    return dstImg
Beispiel #18
0
 def toColoursCMV(self):
     
     if not self._isColour:
         return None
     else:
         b, g, r = cv2.split(self._image)
         b = cv2.max(b, g)
         b = cv2.max(b, r)
         return OpenCvImage(cv2.merge((b, g, r)))
Beispiel #19
0
def recolor_cmv(src, dst):
    """Simulate conversion from BGR to RGV (cyan, magenta, value).

       (b, g, r) -> ((max(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)  # b = min(g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
 def initSigmaMax(self):
     image0 = self.image[..., 0]
     image1 = self.image[..., 1]
     image2 = self.image[..., 2]
     # epsilon=0.000000000000001
     self.sigmaMax = cv2.max(cv2.max(image0, image1), image2)
     # epsilon=0.00000001# just to avoid invalid value on division
     sumImage = image0 + image1 + image2
     self.sigmaMax = self.sigmaMax / (sumImage + self.epsilon)
     self.sigmaMaxInit = self.sigmaMax
def computaFiltroCGA(img):
    larg, alt, _ = img.shape
    dst = inicializaImagem(larg, alt, 3)

    b, g, r = cv2.split(img)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)

    return dst
def rgb2gray(image):
    """
    Args:
        image (np.ndarray): Shape (height, width, channel)

    Returns:
        np.ndarray: Shape (height, width)
    """
    r, g, b = cv2.split(image)
    return cv2.add(cv2.multiply(cv2.max(cv2.max(r, g), b), 0.5),
                   cv2.multiply(cv2.min(cv2.min(r, g), b), 0.5))
Beispiel #23
0
def rgb2gray(image):
    """
    Args:
        image (np.ndarray):

    Returns:
        np.ndarray:
    """
    r, g, b = cv2.split(image)
    return cv2.add(cv2.multiply(cv2.max(cv2.max(r, g), b), 0.5),
                   cv2.multiply(cv2.min(cv2.min(r, g), b), 0.5))
Beispiel #24
0
    def rgbfilter_gray(self, image, rgbthreshold):
        b,g,r = cv2.split(image)
        rd = rgbthreshold
        min1 = cv2.min(b,g)
        min1 = cv2.min(min1,r)
        max1 = cv2.max(b,g)
        max1 = cv2.max(max1,r)

        diff = cv2.absdiff(max1,min1)
        res = cv2.compare(diff,rd,cv2.CMP_LT)
        return res
Beispiel #25
0
 def gera_mapa_caract(self, R, G, B):
     tmp1 = cv2.max(R, G)
     RGBMax = cv2.max(B, tmp1)
     RGBMax[RGBMax <= 0] = 0.0001   
     RGMin = cv2.min(R, G)
     RG = (R - G) / RGBMax
     BY = (B - RGMin) / RGBMax
     RG[RG < 0] = 0
     BY[BY < 0] = 0
     RGFM = self.piram_gauss_CSD(RG)
     BYFM = self.piram_gauss_CSD(BY)
     return RGFM, BYFM
    def computeSpecularPart(self):
        self.specularPart = np.zeros(self.image[..., 0].shape)
        image0 = self.image[..., 0]
        image1 = self.image[..., 1]
        image2 = self.image[..., 2]
        imageMax = cv2.max(cv2.max(image0, image1), image2)
        imageSum = self.computeSum()

        # epsilon=0.000000000000001
        self.specularPart = imageMax - cv2.multiply(self.sigmaMax, imageSum)
        self.specularPart = cv2.divide(self.specularPart,
                                       1 - 3 * self.sigmaMax)
Beispiel #27
0
 def final_fitting(c,edges):
     #use the real edge pixels to fit, not the aproximated contours
     support_mask = np.zeros(edges.shape,edges.dtype)
     cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
     # #draw into the suport mast with thickness 2
     new_edges = cv2.min(edges, support_mask)
     new_contours = cv2.findNonZero(new_edges)
     if self._window and visualize:
         new_edges[new_edges!=0] = 255
         overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
     new_e = cv2.fitEllipse(new_contours)
     return new_e,new_contours
Beispiel #28
0
def recolorCMV(src, dst):
    """Simulate conversion from BGR to CMV (cyan, magenta, value).
    The source and destination images must both be in BGR format.
    Yellows are desaturated.
    Pseudocode:
        dst.b = max(src.b, src.g, src.r)
        dst.g = src.g
        dst.r = src.r
        """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
	def getColorMask(self, colorHueMin, colorHueMax):
		if self.mask1 == None or self.mask1.shape!=self.hueImg.shape:
			self.mask1 = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		if self.mask2 == None or self.mask2.shape!=self.hueImg.shape:
			self.mask2 = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		if self.mask == None or self.mask.shape!=self.hueImg.shape:
			self.mask  = numpy.zeros(self.hueImg.shape, numpy.uint8) 
		cv2.threshold(self.hueImg, colorHueMin, 255, cv2.THRESH_BINARY_INV, self.mask1)
		cv2.threshold(self.hueImg, colorHueMax, 255, cv2.THRESH_BINARY, self.mask2)
		if colorHueMin > colorHueMax: #hueImg wraps around; in this case we want the AND of the two masks
			cv2.min(self.mask1, self.mask2, self.mask)
		else: #min < max --> we want the OR of the two masks
			cv2.max(self.mask1, self.mask2, self.mask)
Beispiel #30
0
def CMVChannelMixer(src, dest):
    '''This function trasnfers the source image color channels from BGR
        to CMV (Cyan, Magneta, Value) channel by setting all the blue pixels
        to the maximum of red or green or blue value of that pixel. 
        Pseudo Code looks like below:
        src.blue = max(src.blue, src.red, src.green)
        src.red = src.red 
        src.green = src.green '''

    b, g, r = cv2.split(src)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dest)
Beispiel #31
0
 def final_fitting(c,edges):
     #use the real edge pixels to fit, not the aproximated contours
     support_mask = np.zeros(edges.shape,edges.dtype)
     cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
     # #draw into the suport mast with thickness 2
     new_edges = cv2.min(edges, support_mask)
     new_contours = cv2.findNonZero(new_edges)
     if self._window and visualize:
         new_edges[new_edges!=0] = 255
         overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
     new_e = cv2.fitEllipse(new_contours)
     return new_e,new_contours
def draw_line(image, points, color):
    """ Draw line across several points with OpenCV

      Args:
        image (np.array): image to draw on
        points (np.array): 2d coordinates to draw line between
        color (np.array): color of line
    """

    line = np.zeros(image.shape, dtype=np.uint8)
    cv2.polylines(line, np.int32([points]), False, color, 1, cv2.LINE_AA, )
    line = np.array(line, dtype='float') / 255.
    cv2.max(image, line, image)
Beispiel #33
0
def recolorCMV(src,dst):
    """ Simulate conversion from BGR to CMV (Cyan, Magenta, value)

    Yellows are desaturated.
    dst.b = max(src.b,src.g,src.r)
    dst.g = src.g
    dst.r = src.r
    """

    b,g,r = cv2.split(src)
    cv2.max(b,g,b)
    cv2.max(b,r,b)
    cv2.merge((b,g,r),dst)
Beispiel #34
0
def nevatia_babu(gray):
    kernel1 = np.array([[-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100],
                        [-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100],
                        [-100, -100, 0, 100, 100]],
                       dtype=np.float32)  #0-deg orientation

    kernel2 = np.array([[-100, 32, 100, 100, 100], [-100, 78, 92, 100, 100],
                        [-100, -100, 0, 100, 100], [-100, -100, -92, 78, 100],
                        [-100, -100, -100, -32, 100]],
                       dtype=np.float32)  #30-deg orientation

    kernel3 = np.array([[100, 100, 100, 100, 100], [-32, 78, 100, 100, 100],
                        [-100, -92, 0, 92, 100], [-100, -100, -100, -78, 32],
                        [-100, -100, -100, -100, -100]],
                       dtype=np.float32)  #60-deg orientation

    kernel4 = np.array(
        [[100, 100, 100, 100, 100], [100, 100, 100, 100, 100], [0, 0, 0, 0, 0],
         [-100, -100, -100, -100, -100], [-100, -100, -100, -100, -100]],
        dtype=np.float32)  #90-deg orientation

    kernel5 = np.array([[100, 100, 100, 100, 100], [100, 100, 100, 78, -32],
                        [100, 92, 0, -92, -100], [32, -78, -100, -100, -100],
                        [-100, -100, -100, -100, -100]],
                       dtype=np.float32)  #120-deg orientation

    kernel6 = np.array([[100, 100, 100, 32, -100], [100, 100, 92, -78, -100],
                        [100, 100, 0, -100, -100], [100, 78, -92, -100, -100],
                        [100, -32, -100, -100, -100]],
                       dtype=np.float32)  #150-deg orientation

    #CV_32F - the pixel can have any value between 0-1.0
    #NORM_MINMAX
    #CV_8UC1 - number of channels
    # https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#normalize

    k1 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel1), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)
    k2 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel2), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)
    k3 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel3), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)
    k4 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel4), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)
    k5 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel5), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)
    k6 = cv2.normalize(cv2.filter2D(gray, cv2.CV_32F, kernel6), None, 0, 255,
                       cv2.NORM_MINMAX, cv2.CV_8UC1)

    magn = cv2.max(k1, cv2.max(k2, cv2.max(k3, cv2.max(k4, cv2.max(k5, k6)))))
    return magn
Beispiel #35
0
def genColorMask(hue, colorHueMin, colorHueMax, colorMask=None, colorMask1=None, colorMask2=None):
	if colorMask1 == None or colorMask1.shape!=hue.shape:
		colorMask1 = numpy.zeros(hue.shape, numpy.uint8) 
	if colorMask2 == None or colorMask2.shape!=hue.shape:
		colorMask2 = numpy.zeros(hue.shape, numpy.uint8) 
	if colorMask == None or colorMask.shape!=hue.shape:
		colorMask  = numpy.zeros(hue.shape, numpy.uint8) 
	cv2.threshold(hue, colorHueMin, 255, cv2.THRESH_BINARY_INV, colorMask1)
	cv2.threshold(hue, colorHueMax, 255, cv2.THRESH_BINARY, colorMask2)
	if colorHueMin > colorHueMax: #hue wraps around; in this case we want the AND of the two masks
		cv2.min(colorMask1, colorMask2, colorMask)
	else: #min < max --> we want the OR of the two masks
		cv2.max(colorMask1, colorMask2, colorMask)
	return (colorMask, colorMask1, colorMask2)
Beispiel #36
0
def recolourCMV(source, destination):
    """
    Simulate conversion from BGR to CMV (cyan, magenta, value).abs
    Source and destination images must both be in BGR format.abs

    Yellows are desaturated. Pseudocode:
    destination.blue  = max(source.blue, source.green, source.red)
    destination.green = source.green
    destination.red   = source.red
    """
    blue, green, red = cv2.split(source)
    cv2.max(blue, green, maximum)
    cv2.max(maximum, red, blue)
    cv2.merge((blue, green, red), destination)
Beispiel #37
0
def color_similarity_2d(image, color):
    """
    Args:
        image: 2D array.
        color: (r, g, b)

    Returns:
        np.ndarray: uint8
    """
    r, g, b = cv2.split(cv2.subtract(image, (*color, 0)))
    positive = cv2.max(cv2.max(r, g), b)
    r, g, b = cv2.split(cv2.subtract((*color, 0), image))
    negative = cv2.max(cv2.max(r, g), b)
    return cv2.subtract(255, cv2.add(positive, negative))
Beispiel #38
0
def Canny_maxtrix_parallel_imp(src, thresh1, thresh2):
    assert thresh1 < thresh2

    Gx = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=1, dy=0)
    Gy = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=0, dy=1)
    magnitude, angle = cv2.cartToPolar(Gx, Gy, angleInDegrees=True)

    angle %= 180
    angle_0_mask = (angle < 22.5) | (angle >= 157.5)
    angle_45_mask = (67.5 > angle) & (angle >= 22.5)
    angle_90_mask = (112.5 > angle) & (angle >= 67.5)
    angle_135_mask = (157.5 > angle) & (angle >= 112.5)

    dsize = (src.shape[1], src.shape[0])
    M = np.array([[1, 0, -1], [0, 1, 0]], dtype=np.float32)
    shift_left = cv2.warpAffine(magnitude, M, dsize)
    M = np.array([[1, 0, 1], [0, 1, 0]], dtype=np.float32)
    shift_right = cv2.warpAffine(magnitude, M, dsize)

    M = np.array([[1, 0,  0], [0, 1, -1]], dtype=np.float32)
    shift_up = cv2.warpAffine(magnitude, M, dsize)
    M = np.array([[1, 0,  0], [0, 1, 1]], dtype=np.float32)
    shift_down = cv2.warpAffine(magnitude, M, dsize)

    M = np.array([[1, 0,  1], [0, 1, 1]], dtype=np.float32)
    shift_right_down = cv2.warpAffine(magnitude, M, dsize)
    M = np.array([[1, 0,  -1], [0, 1, -1]], dtype=np.float32)
    shift_left_up = cv2.warpAffine(magnitude, M, dsize)

    M = np.array([[1, 0,  1], [0, 1, -1]], dtype=np.float32)
    shift_right_up = cv2.warpAffine(magnitude, M, dsize)
    M = np.array([[1, 0,  -1], [0, 1, 1]], dtype=np.float32)
    shift_left_down = cv2.warpAffine(magnitude, M, dsize)

    shift_left_right_max = cv2.max(shift_left, shift_right)
    shift_up_down_max = cv2.max(shift_up, shift_down)
    shift_rd_lu_max = cv2.max(shift_right_down, shift_left_up)
    shift_ru_lf_max = cv2.max(shift_right_up, shift_left_down)

    magnitude[angle_0_mask] *= (magnitude[angle_0_mask] >= shift_left_right_max[angle_0_mask])
    magnitude[angle_45_mask] *= (magnitude[angle_45_mask] >= shift_rd_lu_max[angle_45_mask])
    magnitude[angle_90_mask] *= (magnitude[angle_90_mask] >= shift_up_down_max[angle_90_mask])
    magnitude[angle_135_mask] *= (magnitude[angle_135_mask] >= shift_ru_lf_max[angle_135_mask])

    strong_edge = ((magnitude > thresh2) * 255).astype('uint8')
    full_edge = ((magnitude > thresh1) * 255).astype('uint8')
    optimized_edge = geodesicDilation(strong_edge, full_edge, kernel=np.ones((3, 3)))

    return cv2.convertScaleAbs(src=optimized_edge)
Beispiel #39
0
def extract_white_letters(image, threshold=128):
    """Set letter color to black, set background color to white.
    This function will discourage color pixels (Non-gray pixels)

    Args:
        image: Shape (height, width, channel)
        threshold (int):

    Returns:
        np.ndarray: Shape (height, width)
    """
    r, g, b = cv2.split(cv2.subtract((255, 255, 255, 0), image))
    minimum = cv2.min(cv2.min(r, g), b)
    maximum = cv2.max(cv2.max(r, g), b)
    return cv2.multiply(cv2.add(maximum, cv2.subtract(maximum, minimum)), 255.0 / threshold)
Beispiel #40
0
def Example_ImConv2D_Kirsch():
    ip = cv2IP.ConvIP()
    Img = ip.ImRead(srcImg)
    ip.ImShow("original", Img)
    src_gray = ip.ImBGR2Gray(Img)
    kernels = ip.GetKirschKernel()
    grad_planes = []
    for i in range(0, len(kernels)):
        grad_planes.append(ip.Conv2D(src_gray, kernels[i]))
    temp_1 = cv2.max(grad_planes[0], grad_planes[1], grad_planes[2])
    temp_2 = cv2.max(grad_planes[3], grad_planes[4], grad_planes[5])
    temp_3 = cv2.max(grad_planes[6], grad_planes[7])
    final = cv2.max(temp_1, temp_2, temp_3)
    ip.ImShow("Kirsch Images", final)
    del ip
Beispiel #41
0
def recolorCMV(src, dst):
    """
    BGRからCMV(シアン、マゼンタ、値)への変換をシミュレートする
    コードの内容:
    dst.b = max(src.b, src.g, src.r)
    dst.g = src.g
    dst.r = src.r
    :param src: BGR形式の入力画像
    :param dst: BGR形式の出力画像
    :return: None
    """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
Beispiel #42
0
def stitch(new_img, base_img, H):
    """
    Stitch 2 images together
    :param new_img: image to stitch
    :param base_img: base image
    :param H: Homography coordinate
    :param BASE_ON_TOP: Set to True for placing new image at the back
    :return: stitched image
    """
    # do the invert because we want to transform new_image to the base_image
    H_inv = np.linalg.pinv(H)

    min_x, max_x, min_y, max_y = findDimensions(new_img, H_inv)

    # Adjust max_x and max_y by base img size
    max_x = max(max_x, base_img.shape[1])
    max_y = max(max_y, base_img.shape[0])

    # create translation transform matrix
    move_h = np.matrix(np.identity(3), np.float32)

    if min_x < 0:
        move_h[0, 2] -= math.ceil(min_x)
        max_x -= min_x

    if min_y < 0:
        move_h[1, 2] -= math.ceil(min_y)
        max_y -= min_y

    mod_inv_h = move_h * H_inv

    img_w = int(math.ceil(max_x))
    img_h = int(math.ceil(max_y))

    base_img_warp = cv2.warpPerspective(base_img, move_h, (img_w, img_h))
    new_img_warp = cv2.warpPerspective(new_img, mod_inv_h, (img_w, img_h))

    # create empty matrix
    canvas = np.zeros((img_h, img_w, 3), np.uint8)

    # combining
    canvas = cv2.max(canvas, base_img_warp)
    canvas = cv2.max(canvas, new_img_warp)

    # remove black edges
    canvas = remove_black_edges(canvas)

    return canvas
Beispiel #43
0
def simplest_color_balance(src, percent=1):
    # assert(input.channels() ==3)
    # assert(percent > 0 && percent < 100)

    half_percent = float(percent) / 200.0

    channels = cv2.split(src)
    out = []
    for channel in channels:
        # find the low and high percentile values (based on the input percentile)
        flat = channel.ravel().tolist()
        flat.sort()
        lowval = flat[int(floor(float(len(flat)) * half_percent))]
        highval = flat[int(ceil(float(len(flat)) * (1.0-half_percent)))]

        # saturate below the low percentile and above the high percentile
        # channel = cv2.threshold(channel, highval, -1, cv2.THRESH_TRUNC) # truncate values to max of highval
        # for row in channel:
        #     for c in xrange(len(row)):
        #         if row[c] < lowval
        channel = cv2.max(channel, lowval)
        channel = cv2.min(channel, highval)

        # scale the channel
        channel = cv2.normalize(channel, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
        out.append(channel)

    out = cv2.merge(out)
    return out
Beispiel #44
0
def recolorCMV(src,dst):
	"""Simulate conversion from BGR to CMV(cyan,magenta,value)
	The source and destination images must both be in BGR format.
	Yellows are desaturated.
	Pseudocode:
	dst.b = max(src.b,src.g,src.r)
	dst.g = src.g
	dst.r = src.r
	"""

	b,g,r = cv2.split(src)
	#max() compute the per-element maximums of first two arguments
	#and writes them to the third argument
	cv2.max(b,g,b)
	cv2.max(b,r,b)
	cv2.merge((b,g,r),dst)
    def compute_boxscore(self, boxsize=17):
        """
        Basically, if we count the number of dark pixels on the image should
        give us a clue if the box is checked or not. But sometimes the
        sponsor write around the box, which increase the number of dark
        pixels. This is why we start by cropping precisely around the box.
        Then we compute the Canny Edge and Canny Curve and merge them.
        :param boxsize:
        :return:
        """
        # Negative image
        img = 255 - np.copy(self.img)

        # detect the box
        left, right, top, bottom = self._box_coordinates(
            img, squarsize=boxsize)

        # Detect the line borders withe Canny Edge
        canny1 = cv2.Canny(img, 20, 20)
        # Detect line itself with Canny Curve
        canny2 = self._canny_curve_detector(
            img, low_thresh=20, high_thresh=20)
        # Merge the two Canny by keeping the maximum for each pixels
        canny = cv2.max(canny1, canny2)
        # Crop around the box
        canny = canny[top:bottom, left:right]
        self.canny = canny
        # Comupte the integral of the image.
        count = cv2.sumElems(canny)[0] / 255
        return count
Beispiel #46
0
    def test_cudaarithm_logical(self):
        npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
        npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)

        self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
                                         cv.bitwise_or(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
                                         cv.bitwise_and(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
                                         cv.bitwise_xor(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
                                         cv.bitwise_not(npMat1)))

        self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
                                         cv.min(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
                                         cv.max(npMat1, npMat2)))
def best_outline(image_580_360, display=False):
    '''Returns the result (contour outlines), a list of parameters for saving, plus a binary mask image'''
    h, w = image_580_360.shape[0:2]
    img = image_580_360 #ADDED
    im = img.astype(np.float32)+0.001 #to avoid division by 0
    c1c2c3 = np.arctan(im/np.dstack((cv2.max(im[...,1], im[...,2]), cv2.max(im[...,0], im[...,2]), cv2.max(im[...,0], im[...,1]))))
    bimg,gimg,rimg = cv2.split(c1c2c3)
    rimg = cv2.normalize(rimg, rimg, 0,255,cv2.NORM_MINMAX,dtype=cv2.cv.CV_8UC1)
    gimg = cv2.normalize(gimg, gimg, 0,255,cv2.NORM_MINMAX,dtype=cv2.cv.CV_8UC1)
    bimg = cv2.normalize(bimg, bimg, 0,255,cv2.NORM_MINMAX,dtype=cv2.cv.CV_8UC1)
    accumulation_mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
    plot_circular_contour(gimg, accumulation_mask,  "green" if display else None)
    plot_circular_contour(bimg, accumulation_mask,  "blue" if display else None)
    params = plot_circular_contour(rimg, accumulation_mask, "red" if display else None)
    contours = cv2.findContours(accumulation_mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)[0]
    return(contours, params, accumulation_mask)
Beispiel #48
0
def greyValueSegmentation(img,segNum):
	aux = cv2.minMaxLoc(img)
	step = int((aux[1]-aux[0])/segNum)
	segMask = np.zeros(img.shape,np.uint8)
	retList = []
	imageList = []
	for i in range(segNum):
		if i==segNum-1:
			valRange = (int(aux[0]+i*step),-1)
		else:	
			valRange = (int(aux[0]+i*step),int(aux[0]+(i+1)*step))
		auxMask = intervalThreshold(img,valRange)
		auxMask = cv2.threshold(auxMask,1,valRange[1],cv2.cv.CV_THRESH_BINARY)[1]
		
		rawContours,hierarchy = cv2.findContours(auxMask.copy(),
		cv2.cv.CV_RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
		bigCont = []
		if len(rawContours)>0:
			for cnt in zip(hierarchy[0],rawContours):
				#contornos que (no tengan hijos o tengan hermano izquierdo) y no sean unipuntuales
				#print cnt[0]
				#if (cnt[0][2]<0 or cnt[0][0]>-1) and len(cnt[1])>1:
				if len(cnt[1])>1:
					bigCont.append(cv2.approxPolyDP(cnt[1],3,True))
		retList.append(bigCont)
		imageList.append(auxMask)
		segMask = cv2.max(segMask,auxMask)
	return segMask,retList,imageList
def detect_black_white_blobs(img, s_low=50, s_high=200, v_low=20, v_high=130):
    blurred = cv2.GaussianBlur(img, (5,5), 5)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    # low sat: grey shades (white not included)
    ret_val, low_sat_mask = cv2.threshold(s, s_low, 255, cv2.THRESH_BINARY_INV)
    # high sat
    ret_val, high_sat_mask = cv2.threshold(v, s_high, 255, cv2.THRESH_BINARY)
    # low val: dark/black shades
    ret_val, low_val_mask = cv2.threshold(v, v_low, 255, cv2.THRESH_BINARY_INV)
    # high val: includes white
    ret_val, high_val_mask = cv2.threshold(v, v_high, 255, cv2.THRESH_BINARY)
    grey_or_dark = cv2.min(high_sat_mask, low_val_mask) # grey or dark
    grey_and_bright = cv2.min(low_sat_mask, high_val_mask) # grey and bright (white)
    black_or_white = cv2.max(grey_or_dark, grey_and_bright)
    mask = img.copy()
    mask = cv2.merge([grey_or_dark, low_sat_mask, high_val_mask], mask)
    # Debug code here:
    collage = glue_2x2(*(map(half_size, (low_sat_mask, high_sat_mask, low_val_mask, high_val_mask))))
    collage = cv2.cvtColor(collage, cv2.COLOR_GRAY2BGR)
    height, width = h.shape
    put_text(collage, "Low sat", (0,20))
    put_text(collage, "High sat", (width/2,20))
    put_text(collage, "Low val", (0,20 + height/2))
    put_text(collage, "High val", (width/2,20 + height/2))
    return collage
    def compute_boxscore(self, boxsize=17):
        """
        Basically, if we count the number of dark pixels on the image should
        give us a clue if the box is checked or not. But sometimes the
        sponsor write around the box, which increase the number of dark
        pixels. This is why we start by cropping precisely around the box.
        Then we compute the Canny Edge and Canny Curve and merge them.
        :param boxsize:
        :return:
        """
        # Negative image
        img = 255 - np.copy(self.img)

        # detect the box
        left, right, top, bottom = self._box_coordinates(
            img, squarsize=boxsize)

        # Detect the line borders withe Canny Edge
        canny1 = cv2.Canny(img, 20, 20)
        # Detect line itself with Canny Curve
        canny2 = self._canny_curve_detector(
            img, low_thresh=20, high_thresh=20)
        # Merge the two Canny by keeping the maximum for each pixels
        canny = cv2.max(canny1, canny2)
        # Crop around the box
        canny = canny[top:bottom, left:right]
        self.canny = canny
        # Comupte the integral of the image.
        count = cv2.sumElems(canny)[0] / 255
        return count
Beispiel #51
0
def greyValueSegmentation(img,segNum):
	aux = cv2.minMaxLoc(img)
	step = int((aux[1]-aux[0])/segNum)
	segMask = np.zeros(img.shape,np.uint8)
	retList = []
	imageList = []
	for i in range(segNum):
		if i==segNum-1:
			print 'HIGHEST VALUE'
			valRange = (int(aux[0]+i*step),-1)
			auxMask = intervalThreshold(img,valRange)
			auxMask = cv2.threshold(auxMask,1,255,cv2.cv.CV_THRESH_BINARY)[1]
		else:	
			valRange = (int(aux[0]+i*step),int(aux[0]+(i+1)*step))
			auxMask = intervalThreshold(img,valRange)
			auxMask = cv2.threshold(auxMask,1,valRange[1],cv2.cv.CV_THRESH_BINARY)[1]
		


		rawContours,hierarchy = cv2.findContours(auxMask.copy(),
		cv2.cv.CV_RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
		bigCont = []
		if len(rawContours)>0:
			for cnt in zip(hierarchy[0],rawContours):
				if len(cnt[1])>1:
					bigCont.append(cv2.approxPolyDP(cnt[1],3,True))
		retList.append(bigCont)
		imageList.append(auxMask)
		segMask = cv2.max(segMask,auxMask)
	return segMask,retList,imageList
Beispiel #52
0
def faceSwapImages(im1):
    im1 = ensureImageLessThanMax(im1)
    im1_all_landmarks = get_landmarks(im1)
    im1 = im1.astype(numpy.float64)

    for im1_face_landmarks in im1_all_landmarks:
        im2_direction, im2,im2_landmarks,im2_flipped,im2_landmarks_flipped = random.choice(FACESWAPS)

        #swap the face if theyre pointing the wrong direction
        im1_direction = getFaceDirection(im1_face_landmarks)
        if im2_direction != im1_direction:
            im2 = im2_flipped
            im2_landmarks = im2_landmarks_flipped

        M = transformation_from_points(im2_landmarks[ALIGN_POINTS],
                                       im1_face_landmarks[ALIGN_POINTS])

        mask = get_face_mask(im2, im2_landmarks)
        warped_mask = cv2.warpPerspective(mask,
                                          M,
                                          (im1.shape[1], im1.shape[0]))
        combined_mask = cv2.max(get_face_mask(im1, im1_face_landmarks), warped_mask)

        #warp onto im1 to try and reduce any color correction issues around the edge of im2
        warped_im2 = cv2.warpPerspective(im2,
                                         M,
                                         (im1.shape[1], im1.shape[0]),
                                         dst=im1.copy(),
                                         borderMode=cv2.BORDER_TRANSPARENT)

        warped_corrected_im2 = correct_colours(im1, warped_im2, im1_face_landmarks)

        im1 = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
    im1 = numpy.clip(im1, 0, 255, out=im1).astype(numpy.uint8)
    return im1
def bpSignificantSquares(img,sigSquares,probThresh):
	print '-BP-'
	#get a list of the regions of the image for which the histogram for backproyection will be calculated
	print '-BP-getRegionList'
	myTime = clock()
	regionList = getSignificantRegions(img,sigSquares)
	print '-BP- len of regionList: '+str(len(regionList))
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	mask = np.zeros((img.shape[:2]),np.uint8)

	print '-BP-gethist and bp'
	myTime = clock()
	for region in regionList:
		hist = getHist(region)[0]
		aux = cv2.calcBackProject([cv2.cvtColor(img,cv2.cv.CV_BGR2HSV)],
			[0,1], hist, [0,180,0,256],1)
		bp = cv2.threshold(aux,probThresh,255,cv2.THRESH_BINARY)[1]
		mask = cv2.max(mask,bp)
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	aux = np.zeros((mask.shape),np.uint8)
	print '-BP-getComponentOf '
	aux = getComponentOf(mask,(mask.shape[1]/2,mask.shape[0]/2),True)
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	return cv2.merge([mask,]*3) , cv2.merge([aux*255,]*3)
Beispiel #54
0
def main():

    '''
    print("Python    : %s " % sys.version)
    print("OpenCV    : %s " % cv2.__version__)
    print("Numpy     : %s " % numpy.__version__)
    print("Matplotlib: %s " % matplotlib.__version__)
    '''
    #画像表示
    img = cv2.imread("data/face.jpg")
    #cv2.imshow('res', img)

    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    #cv2.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    #show()

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Turn off the axis

    sobeled_x = cv2.Sobel(gray, cv2.CV_32F, 1, 0)
    sobeled_y = cv2.Sobel(gray, cv2.CV_32F, 0, 1)

    cv2.imshow('gray_sobel_edge', sobeled_x)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    cv2.imshow('gray_sobel_edge', sobeled_y)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    gray_abs_sobelx = cv2.convertScaleAbs(sobeled_x)
    gray_abs_sobely = cv2.convertScaleAbs(sobeled_y)

    gray_sobel_edge = cv2.addWeighted(gray_abs_sobelx,0.5,gray_abs_sobely,0.5,0)
    cv2.imshow('gray_sobel_edge',gray_sobel_edge)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


    laplace = cv2.Laplacian(gray, cv2.CV_64F)

    img64 = numpy.double(img)
    abs(laplace)
    img64[:,:,2] += cv2.max(abs(laplace) - 200, 0) * 4
    numpy.clip(img64, 0, 255, out = img64)
    img = img64.astype('uint8')

    gray_sobel_edge = cv2.cvtColor(gray_sobel_edge, cv2.COLOR_GRAY2BGR)
    img = cv2.addWeighted(img, 0.5,gray_sobel_edge,0.5,0)

    plt.axis('off')
    plt.title("Input Stream")
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()



    '''
def recolorCMV(src, dst):
    """Simulate conversion from BGR to CMV (cyan, magenta, value).
    
    The source and destination images must both be in BGR format.
    
    Yellows are desaturated.
    
    Pseudocode:
    dst.b = max(src.b, src.g, src.r)
    dst.g = src.g
    dst.r = src.r
    
    """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
def plate_text_image(img):
    '''Extract the plate text coloured area, everything else is set to white.'''
    blackish_mask = cv2.inRange(img, (0,0,0), (50,50,50))
    dilated_black = cv2.dilate(blackish_mask, kernel=None, iterations=3)
    whiteish_mask = cv2.inRange(img, (150,150,150), (255,255,255))
    dilated_white = cv2.dilate(whiteish_mask, kernel=None, iterations=3)
    plate_mask = cv2.bitwise_or(dilated_black, dilated_white)
    result = cv2.max(select_mask_area(img, plate_mask), non_mask)
    cv2.imshow('plate_text_mask', np.ma.vstack((img, cv2.cvtColor(plate_mask, cv2.COLOR_GRAY2BGR), result)))
    return result
Beispiel #57
0
 def CFMGetFM(self, R, G, B):
     # max(R,G,B)
     tmp1 = cv2.max(R, G)
     RGBMax = cv2.max(B, tmp1)
     RGBMax[RGBMax <= 0] = 0.0001    # prevent dividing by 0
     # min(R,G)
     RGMin = cv2.min(R, G)
     # RG = (R-G)/max(R,G,B)
     RG = (R - G) / RGBMax
     # BY = (B-min(R,G)/max(R,G,B)
     BY = (B - RGMin) / RGBMax
     # clamp nagative values to 0
     RG[RG < 0] = 0
     BY[BY < 0] = 0
     # obtain feature maps in the same way as intensity
     RGFM = self.FMGaussianPyrCSD(RG)
     BYFM = self.FMGaussianPyrCSD(BY)
     # return
     return RGFM, BYFM
Beispiel #58
0
def recon_l_pyr(pyr):
    nlevs=len(pyr)
    lowpass=np.array(pyr[nlevs-1],dtype=np.uint8)
    for i in range(nlevs-2,-1,-1):
        band=pyr[i]
        lowpass=cv2.pyrUp(lowpass)[:band.shape[0],:band.shape[1],:]
        highpass=cv2.add(np.array(lowpass,dtype=np.int16),band)
        highpass=cv2.min(highpass,np.array([255,255,255]))
        highpass=cv2.max(highpass,np.array([0,0,0]))
        lowpass=np.array(highpass,dtype=np.uint8)
    return lowpass
def main():
#    c_main()

#cdef c_main():

    if len(sys.argv) != 3:
        print "./binarization_for_ocr.py <in_file> <out_file>"
        quit()

    # cdef unicode
    filename = sys.argv[1]
    # cdef unicode
    outfile = sys.argv[2]

    # cv2.ocl.setUseOpenCL(True)

    # cdef np.ndarray[DTYPE_t, ndim=2]
    image_color = cv2.imread(filename, cv2.IMREAD_COLOR)

    if image_color is None:
        print "input file is not found"
        quit()

    channels = cv2.split(image_color)
    image = channels[1] # drop blue channel for yellowish books

    mode_0 = mode(channels[0].flat)[0]
    mode_1 = mode(channels[1].flat)[0]
    mode_2 = mode(channels[2].flat)[0]

    channels[0] = cv2.absdiff(channels[0], mode_0)
    channels[1] = cv2.absdiff(channels[1], mode_1)
    channels[2] = cv2.absdiff(channels[2], mode_2)
    img_diff = cv2.max(channels[0], channels[1])
    img_diff = cv2.max(img_diff, channels[2])

    img_diff = cv2.fastNlMeansDenoising(img_diff, 100, 7, 21) ###

    process(image, img_diff, outfile, True, 3)
    def update(self,frame,events):
        img = frame.img
        height = img.shape[0] 
        width = img.shape[1] 

        #blur = cv2.GaussianBlur(img,(5,5),0)
        blur = frame.img
        edges = []
        blue, green, red = 0, 1, 2
        
        # thresh_mode
        if self.thresh_mode == "BINARY":
            cv2_thresh_mode = cv2.THRESH_BINARY

        if self.thresh_mode == "BINARY_INV":
            cv2_thresh_mode = cv2.THRESH_BINARY_INV

        if self.thresh_mode == "TRUNC":
            cv2_thresh_mode = cv2.THRESH_TRUNC

        if self.thresh_mode == "TOZERO":
            cv2_thresh_mode = cv2.THRESH_TOZERO

        # apply the threshold to each channel 
        for channel in (blur[:,:,blue], blur[:,:,green], blur[:,:,red]):
          retval, edg = cv2.threshold(channel, self.threshold, 255, cv2_thresh_mode)
          edges.append(edg)

        # lets merge the channels again
        edges.append(np.zeros((height, width, 1), np.uint8))
        edges_edt = cv2.max(edges[blue], edges[green])
        edges_edt = cv2.max(edges_edt, edges[red])
        merge = [edges_edt, edges_edt, edges_edt]

        # lets check the result
        frame.img = cv2.merge(merge)