def warmer(img, amount):

	if (amount < 0 or amount > 1):
		raise NameError('amount must be between 0 and 1')

	incr_ch_lut = create_LUT_8UC1([0, 64,      192,      256],
	                              [0, 64 + 40*amount, 192 + 45*amount, 256])
	decr_ch_lut = create_LUT_8UC1([0, 64,      192,      256],
                       	          [0, 64 - 52*amount, 192 - 85*amount, 192])

	img_rgb = cv2.imread("images/" + filename + ".jpg")
	 
	c_r, c_g, c_b = cv2.split(img_rgb)
	c_r = cv2.LUT(c_r, incr_ch_lut).astype(np.uint8)
	c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
	img_rgb = cv2.merge((c_r, c_g, c_b))

	c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
	 

	c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb,
	    cv2.COLOR_RGB2HSV))
	c_s = cv2.LUT(c_s, incr_ch_lut).astype(np.uint8)
	 
	img_warmer = cv2.cvtColor(cv2.merge(
	                      (c_h, c_s, c_v)),
	                       cv2.COLOR_HSV2RGB)

	return img_warmer
def bpSignificantSquares(img,sigSquares,probThresh):
	print '-BP-'
	#get a list of the regions of the image for which the histogram for backproyection will be calculated
	print '-BP-getRegionList'
	myTime = clock()
	regionList = getSignificantRegions(img,sigSquares)
	print '-BP- len of regionList: '+str(len(regionList))
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	mask = np.zeros((img.shape[:2]),np.uint8)

	print '-BP-gethist and bp'
	myTime = clock()
	for region in regionList:
		hist = getHist(region)[0]
		aux = cv2.calcBackProject([cv2.cvtColor(img,cv2.cv.CV_BGR2HSV)],
			[0,1], hist, [0,180,0,256],1)
		bp = cv2.threshold(aux,probThresh,255,cv2.THRESH_BINARY)[1]
		mask = cv2.max(mask,bp)
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	aux = np.zeros((mask.shape),np.uint8)
	print '-BP-getComponentOf '
	aux = getComponentOf(mask,(mask.shape[1]/2,mask.shape[0]/2),True)
	print '-BP- ===>takes '+str(clock()-myTime)
	myTime = clock()

	return cv2.merge([mask,]*3) , cv2.merge([aux*255,]*3)
Example #3
0
    def execute(self, image):
        gray = cv2.cvtColor(image, cv.CV_BGR2GRAY)

        cv2.Canny(gray, self.threshold1.get(), self.threshold2.get(), gray)
        cv2.merge((gray, gray, gray), image)

        return image
def HistEqual(imgBGR):
    # hist euql in BGR
    b,g,r = cv2.split(imgBGR)
    b = cv2.equalizeHist(b)
    g = cv2.equalizeHist(g)
    r = cv2.equalizeHist(r)
    imgBGRHist = cv2.merge((b,g,r))

    # hist euql in HSV
    imgHSV = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV)
    h,s,v = cv2.split(imgHSV)
##    h = cv2.equalizeHist(h)
##    s = cv2.equalizeHist(s)
    v = cv2.equalizeHist(v)
    imgHSV = cv2.cvtColor(cv2.merge((h,s,v)), cv2.COLOR_HSV2BGR)

    # hist euql in LAB
    imgLAB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2LAB)
    l,a,b = cv2.split(imgLAB)
    l = cv2.equalizeHist(l)
##    a = cv2.equalizeHist(a)
##    b = cv2.equalizeHist(b)
    imgLAB = cv2.cvtColor(cv2.merge((l,a,b)), cv2.COLOR_LAB2BGR)

    # normalization
##    imgBGRL1 = imgBGR.copy().astype(np.float)
##    imgBGRL2 = imgBGR.copy().astype(np.float)
##    imgBGRINF = imgBGR.copy().astype(np.float)
##    cv2.normalize(imgBGRL1, imgBGRL1, 255, 0, cv2.NORM_L1)
##    cv2.normalize(imgBGRL2, imgBGRL2, 255, 0, cv2.NORM_L2)
##    cv2.normalize(imgBGRINF, imgBGRINF, 255, 0, cv2.NORM_INF)

    return imgBGRHist, imgHSV, imgLAB
def CannyEdge(frame,color='all'):
	#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html
	frame = cv2.medianBlur(frame,5)
	if color == 'all':
		b,g,r = cv2.split(frame)
		b= cv2.Canny(b,70,200)
		g= cv2.Canny(g,70,200)
		r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
	if color == 'b':
		b,g,r = cv2.split(frame)
		b= cv2.Canny(b,70,200)
		#g= cv2.Canny(g,70,200)
		#r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))	
	if color == 'g':
		b,g,r = cv2.split(frame)
		#b= cv2.Canny(b,70,200)
		g= cv2.Canny(g,70,200)
		#r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
	if color == 'r':
		b,g,r = cv2.split(frame)
		#b= cv2.Canny(b,70,200)
		#g= cv2.Canny(g,70,200)
		r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
def Harris_match(img1, img2):
    """算法主程序"""
    # 下面执行一点颜色空间的转换
    r, g, b = cv2.split(img1)
    img1 = cv2.merge((b, g, r))
    r, g, b = cv2.split(img2)
    img2 = cv2.merge((b, g, r))
    G_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    G_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # 计算并描述角点
    harrisim1 = Harris.Compute_Harris_Response(G_img1)
    filtered_coords1 = numpy.array(Harris.Get_Harris_Points(harrisim1), dtype=int)
    patches1 = Harris.get_descriptors(G_img1, filtered_coords1)

    harrisim2 = Harris.Compute_Harris_Response(G_img2)
    filtered_coords2 = numpy.array(Harris.Get_Harris_Points(harrisim2), dtype=int)
    patches2 = Harris.get_descriptors(G_img2, filtered_coords2)

    # Harris.Plot_harris_points(img1, filtered_coords1)
    # Harris.Plot_harris_points(img2, filtered_coords2)
    matches = Harris.match_twosided(patches1, patches2)

    plt.figure()
    plt.gray()
    Harris.plot_matches(img1, img2, filtered_coords1, filtered_coords2, matches, show_below=False)
    plt.show()
Example #7
0
def ORBit(local_colour_camera_image, local_colour_training_image):
	# Convert Original Image of Scene into Grayscale
	b,g,r = cv2.split(local_colour_camera_image)
	local_colour_camera_image = cv2.merge([r,g,b])
	local_camera_image = cv2.cvtColor(local_colour_camera_image, cv2.COLOR_RGB2GRAY)

	# Convert Training Image into Grayscale
	b,g,r = cv2.split(local_colour_training_image)
	local_colour_training_image = cv2.merge([r,g,b])
	local_training_image = cv2.cvtColor(local_colour_training_image, cv2.COLOR_RGB2GRAY)

	# Initiate ORB detector
	orb = cv2.ORB(100,1.2)

	# Find the keypoints and descriptors with ORB
	kp1, des1 = orb.detectAndCompute(local_training_image,None)
	kp2, des2 = orb.detectAndCompute(local_camera_image, None) #c1:c2,r1:r2]

	# create BFMatcher object
	bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

	# Match descriptors.
	matches = bf.match(des1,des2)

	# Sort them in the order of their distance.
	matches = sorted(matches, key = lambda x:x.distance)

	# Draw first 30 matches.
	out = drawMatches(local_colour_training_image,kp1,local_colour_camera_image,kp2,matches[:30])
	return out
    def splitmergeOp(self):
        # read image
        im = cv2.imread(self.Image)

        (B, G, R) = cv2.split(im)
        print(G[5, 80])

        # show each channel individually
        cv2.imshow("Red", R)
        cv2.imshow("Green", G)
        cv2.imshow("Blue", B)
        cv2.waitKey(0)

        # merge the image back together again
        merged = cv2.merge([B, G, R])
        cv2.imshow("Merged", merged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        # visualize each channel in color
        zeros = np.zeros(im.shape[:2], dtype = "uint8")
        cv2.imshow("Red", cv2.merge([zeros, zeros, R]))
        cv2.imshow("Green", cv2.merge([zeros, G, zeros]))
        cv2.imshow("Blue", cv2.merge([B, zeros, zeros]))
        cv2.waitKey(0)

        return
Example #9
0
 def removeBackground(self, image):
     discValue = 10
     threshold = 1
     hsvt = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
     
     for roiHist in self.negHistograms:
         dst = cv2.calcBackProject([hsvt],[0,1],roiHist,[0,180,0,256],1)
         cv2.imshow('dst', dst)
         disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(discValue,discValue))
         cv2.filter2D(dst, -1,disc,dst)
         ret,thresh = cv2.threshold(dst,threshold,255,cv2.THRESH_BINARY_INV)
         thresh = cv2.merge((thresh,thresh,thresh))
         image = cv2.bitwise_and(image,thresh)
     
     
     for roiHist in self.posHistograms:
         dst = cv2.calcBackProject([hsvt],[0,1],roiHist,[0,180,0,256],1)
         #cv2.imshow('dst', dst)
         disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(discValue,discValue))
         cv2.filter2D(dst, -1,disc,dst)
         ret,thresh = cv2.threshold(dst,threshold,255,cv2.THRESH_BINARY)
         thresh = cv2.merge((thresh,thresh,thresh))
         image = cv2.bitwise_and(image,thresh)
         
         
         #res = np.hstack((thresh,res))
     
     cv2.imshow('backProj', image)
     return image
Example #10
0
def color_distort (image0):

  # save alpha channel separately
  assert is_bgra(image0), image0.shape
  b, g, r, a = cv2.split(image0)
  image0 = cv2.merge((b, g, r))

  hsv = cv2.cvtColor(image0, cv2.COLOR_BGR2HSV)

  def make_lut(a, n=256):
    '''create a lokup table that will saturate the image historgram,
       either into white (for a > 1), or to black (for 0 < a < 1) '''
    assert a > 0
    if a >= 1:
      lut = np.power(np.arange(n, dtype=float) / (n-1), a) * (n-1)
    else:
      lut = (n-1) - make_lut(1/a, n)[::-1]
    return lut.astype(np.uint8)

  # change hue pixelwise
  dhue = (np.random.rand() - 0.5) * COEF_HUE
  hsv[:,:,0] = np.mod(hsv[:,:,0].astype(int) + dhue, 255).astype(np.uint8)
  # change histogram of values
  dval = np.exp((np.random.rand() - 0.5) * COEF_INTENSITY)
  lut = make_lut(dval)
  hsv[:,:,2] = cv2.LUT(hsv[:,:,2], lut)

  image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

  # restore saved alpha channel
  b, g, r = cv2.split(image)
  image = cv2.merge((b, g, r, a))

  return image
Example #11
0
 def apply(self, src, dst):
     """ Apply the filter with a BGR source/destination"""
     b,g,r = cv2.split(src)
     utils.applyLookupArray(self._bLookupArray,b,b)
     utils.applyLookupArray(self._gLookupArray,g,g)
     utils.applyLookupArray(self._rLookupArray,r,r)
     cv2.merge((b,g,r), dst)
Example #12
0
    def preprocess(self, image):
        self.shape=image.shape
        b, g, r = cv2.split(image)

        if self.convert:
            img = cv2.merge([r, g, b])
        else:
            img = cv2.merge([b,g,r])

        self.original=img

        if self.size!=(-1,-1):
            img = cv2.resize(img,self.size)

        if self.idx==0:
            self.average = img
        else:
            self.average*=self.idx/(self.idx+1.0)
            self.average+=img*(1.0/(self.idx+1.0))

        img -= self.average


        self.idx+=1
        return self.filter_func(img)
Example #13
0
def blend(foregroundSrc, backgroundSrc, dst, alphaMask):
    
    # Calculate the normalized alpha mask.
    maxAlpha = numpy.iinfo(alphaMask.dtype).max
    normalizedAlphaMask = (1.0 / maxAlpha) * alphaMask
    
    # Calculate the normalized inverse alpha mask.
    normalizedInverseAlphaMask = \
        numpy.ones_like(normalizedAlphaMask)
    normalizedInverseAlphaMask[:] = \
        normalizedInverseAlphaMask - normalizedAlphaMask
    
    # Split the channels from the sources.
    foregroundChannels = cv2.split(foregroundSrc)
    backgroundChannels = cv2.split(backgroundSrc)
    
    # Blend each channel.
    numChannels = len(foregroundChannels)
    i = 0
    while i < numChannels:
        backgroundChannels[i][:] = \
            normalizedAlphaMask * foregroundChannels[i] + \
            normalizedInverseAlphaMask * backgroundChannels[i]
        i += 1
    
    # Merge the blended channels into the destination.
    cv2.merge(backgroundChannels, dst)
Example #14
0
def to_bgra(img):
    # Already BGRA
    if img.shape[2] == 4:
        return img
    # Greyscale
    elif img.shape[2] == 1:
        return cv2.merge(
            (
                img[:, :, 0],
                img[:, :, 0],
                img[:, :, 0],
                np.ones(img.shape[:2], img.dtype)
            )
        )
    # Greyscale w/ alpha
    elif img.shape[2] == 2:
        return cv2.merge(
            (
                img[:, :, 0],
                img[:, :, 0],
                img[:, :, 0],
                img[:, :, 1]
            )
        )
    # BGR
    else:
        return cv2.merge(
            (
                img[:, :, 0],
                img[:, :, 1],
                img[:, :, 2],
                np.ones(img.shape[:2], img.dtype)
            )
        )
Example #15
0
def hist(img):
    ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    cv2.equalizeHist(channels[0], channels[0])  #输入通道、输出通道矩阵
    cv2.merge(channels, ycrcb)  #合并结果通道
    cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img
Example #16
0
	def obtainLine(self, imgRaw, imgLas):
		u1 = u2 = 0

		height, width, depth = imgRaw.shape
		imgLine = np.zeros((height,width,depth), np.uint8)

		diff = cv2.subtract(imgLas, imgRaw)
		r,g,b = cv2.split(diff)
		kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
		r = cv2.morphologyEx(r, cv2.MORPH_OPEN, kernel)
		imgGray = cv2.merge((r,r,r))
		edges = cv2.threshold(r, 20.0, 255.0, cv2.THRESH_BINARY)[1]
		edges3 = cv2.merge((edges,edges,edges))
		lines = cv2.HoughLines(edges, 1, np.pi/180, 200)

		if lines is not None:
			rho, theta = lines[0][0]
			#-- Calculate coordinates
			u1 = rho / np.cos(theta)
			u2 = u1 - height * np.tan(theta)

			#-- Draw line
			cv2.line(imgLine,(int(round(u1)),0),(int(round(u2)),height-1),(255,0,0),5)

		return [[u1, u2], [imgLas, imgGray, edges3, imgLine]]
def save_images(dataset_folder, save_frequency):
    print('Saving images')
    import cv2
    import pickle
    import os

    # saves all the files in the dataset folder as image files in a different folder
    image_folder= 'images/'

    # all files
    prefix='image_'
    prefixed = sorted([filename for filename in os.listdir(dataset_folder) if filename.startswith(prefix)])

    # only keep some of the files (at a regular interval)
    sparce_files=[i for i in prefixed if int(i.split('_')[1])%save_frequency==0]

    for image_pickle_file in sparce_files:
        image = pickle.load( open( dataset_folder+'/'+image_pickle_file, "rb" ) )

        red, green, blue, depth = cv2.split(image)
        rgb_image = cv2.merge((red,green,blue))
        depth_image = cv2.merge((depth,depth,depth))

        image_name = image_pickle_file.replace('.pickle','')

        print('saving image {}'.format(image_name))

        save_image2(image_folder+'rgb/'+image_name , rgb_image)
        save_image2(image_folder+'depth/'+image_name , depth_image)
Example #18
0
File: OCR.py Project: xancros/TFG
def redAreaDetection(image, name, show=False):
    img = image.copy()
    test3 = getBinaryInvMask(img)
    pp = cv2.bitwise_and(img, img, mask=test3)
    # cambiar espacio rgb -> HSV
    img = image.copy()
    b, g, r = cv2.split(img)
    r = preProcessImage(r)
    g = preProcessImage(g)
    b = preProcessImage(b)
    rgb = [b, g, r]
    prc = cv2.merge(rgb)
    prueba = cv2.cvtColor(prc, cv2.COLOR_BGR2HSV)
    imag2 = cv2.cvtColor(pp, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(imag2)
    # s += 50
    s = cv2.equalizeHist(s)
    v = cv2.equalizeHist(v)
    chs = [h, s, v]
    imgRes = cv2.merge(chs)
    # cv2.imshow("source",img)
    # cv2.imshow("imag2",imag2)
    # imgRes = imag2.copy()
    test = cv2.cvtColor(imgRes, cv2.COLOR_HSV2BGR)
    im = cv2.inRange(imgRes, (0, 100, 30), (15, 255, 255))
    im2 = cv2.inRange(imgRes, (160, 100, 30), (180, 255, 255))
    #### imgF = im + im2
    imgF = cv2.bitwise_or(im, im2)
    imP = cv2.inRange(prueba, (0, 100, 30), (15, 255, 255))
    imP2 = cv2.inRange(prueba, (160, 100, 30), (180, 255, 255))
    ##### imgFP = imP + imP2
    imgFP = cv2.bitwise_or(imP, imP2)
    # cv2.imshow("PRUEBA", test)
    #
    # printHSV_Values(imgRes)
    # printHSV_Values(prueba)

    ##### final = imgF + imgFP
    final = cv2.bitwise_or(imgF, imgFP)
    # cv2.imshow("PRUEBA2", test3)
    # cv2.imshow("pr",imP)
    # cv2.imshow("ORIGINAL", imP2)
    # cv2.imshow("imgF",imgF)
    # cv2.imshow("imFP",imgFP)
    # cv2.imshow("final",final)
    # cv2.waitKey(800)
    # cv2.destroyAllWindows()

    if (show):
        cv2.imshow("image", image)
        cv2.imshow("win1", im)
        cv2.imshow("win2", im2)
        cv2.imshow("win3", imgF)
        cv2.waitKey()
        cv2.destroyWindow("image")
        cv2.destroyWindow("win1")
        cv2.destroyWindow("win2")
        cv2.destroyWindow("win3")
    return final
Example #19
0
def afilter(src, dst):
    graySrc = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(graySrc, 40, 50)
    # normalizedInverseAlpha = (1.0 / 255) * (255 - edges)
    channels = cv2.split(src)
    for channel in channels:
        channel[:] = channel * edges
    cv2.merge(channels, dst)
Example #20
0
def recolor_rc(src, dst):
    """Simulate conversion from BGR to RC (red, cyan).

       (b, g, r) -> (0.5b + 0.5g, 0.5b + 0.5g, r)
    """
    b, g, r = cv2.split(src)
    cv2.addWeighted(b, 0.5, g, 0.5, b)
    cv2.merge((b, b, r), dst)
Example #21
0
def switchBR(img_src):
    if len(img_src.shape) == 3:
        b, g, r = cv2.split(img_src)
        img_dest = img_src.copy()
        cv2.merge((r, g, b), img_dest)
        return img_dest
    else:
        return img_src
Example #22
0
def split_and_show(img):
    ''' split into multiple channels and show '''
    img = cv2.resize(img, (THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT))
    b, g, r = cv2.split(img)
    zeros = np.zeros(img.shape[:2], dtype=img.dtype)
    cv2.imshow('red', cv2.merge([zeros, zeros, r]))
    cv2.imshow('blue', cv2.merge([b, zeros, zeros]))
    cv2.imshow('green', cv2.merge([zeros, g, zeros]))
 def Apply(self, slidePath, maskPath, outputDir, tumorFolderName, maskFolderName):
     '''Greate Dataset by dividing slide into patches.
        The result will be stored into outputPath/folderName
     '''
     slide = openslide.open_slide(slidePath)
     mask = openslide.open_slide(maskPath)
     max_level = mask.level_count - 1  if mask.level_count < slide.level_count else slide.level_count - 1
     if(self._fetchingLevel>max_level or self._fetchingLevel<0):
         print "the level to fetch data is out of the range of TIFF image"
         return 0;
     
     splits = slidePath.split("/")
     tiffImgName = splits[-1]
     dataName = tiffImgName.split('.tif')[0]
     
     slidePathDir = outputDir + '/' +tumorFolderName
     if os.path.exists(slidePathDir) is False:
         os.system('mkdir '+slidePathDir)
         
     maskPathDir = outputDir + '/' +maskFolderName
     if os.path.exists(maskPathDir) is False:
         os.system('mkdir '+maskPathDir)
     
     level_size = slide.level_dimensions[self._fetchingLevel]
     zero_level_size = slide.level_dimensions[0]
     
     window_H = window_W = int(level_size[0]/self._win_propotion)
     windowShape = (window_H, window_W)
     
     h = w = 0
     step = int(zero_level_size[0]/self._win_propotion)
     while(h<zero_level_size[0]):
         while(w<zero_level_size[1]):
             if ( h + step > zero_level_size[0] ):
                 h = zero_level_size[0] - step
             if ( w + step > zero_level_size[1] ):
                 w = zero_level_size[1] - step
                 
             slideTile = self._GetPatch(slide, h, w, windowShape, self._fetchingLevel)
             maskTile = self._GetPatch(mask, h, w, windowShape, self._fetchingLevel)
             
             b,g,r,a = cv2.split(slideTile)
             slideTile = cv2.merge([b,g,r])
             
             b,g,r,a = cv2.split(maskTile)
             maskTile = cv2.merge([b,g,r])
             
             if ( maskTile.max()>100 ):
                 slidePathFile = slidePathDir + '/' + dataName + '_' + str(self._patchID) + '.tif'  
                 maskPathFile = maskPathDir + '/' + dataName + '_Mask_' + str(self._patchID) + '.tif'
                 
                 cv2.imwrite( slidePathFile, slideTile )
                 cv2.imwrite( maskPathFile, maskTile )
                 self._patchID = self._patchID + 1
             
             w = w + step
         w = 0
         h = h + step
Example #24
0
def recolor_rgv(src, dst):
    """Simulate conversion from BGR to RGV (red, green, value).

       (b, g, r) -> ((min(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.min(b, g, b)  # b = min(g, b)
    cv2.min(b, r, b)
    cv2.merge((b, g, r), dst)
Example #25
0
def equalize_bgr(img, dst=None):
    if img is None:
        raise ValueError('img is None')

    dst = cv2.cvtColor(img, cv2.COLOR_BGR2HLS, dst)
    h, l, s = cv2.split(dst)
    cv2.equalizeHist(l, dst=l)
    cv2.merge((h, l, s), dst=dst)
    return cv2.cvtColor(dst, cv2.COLOR_HLS2BGR, dst=dst)
Example #26
0
def recolor_cmv(src, dst):
    """Simulate conversion from BGR to RGV (cyan, magenta, value).

       (b, g, r) -> ((max(b, g, r), g, r)
    """
    b, g, r = cv2.split(src)
    cv2.max(b, g, b)  # b = min(g, b)
    cv2.max(b, r, b)
    cv2.merge((b, g, r), dst)
def norm_rgb(image):
    ycrcb = cv2.cvtColor(image,cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    #print channels[0]
    cv2.equalizeHist(channels[0], channels[0])
    #print channels[0]
    ycrcb_tmp = ycrcb.copy()
    cv2.merge(channels, ycrcb)
    #print ycrcb == ycrcb_tmp
    return cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR)
Example #28
0
    def split(self,img):
        b,g,r = cv2.split(img)

        n = np.zeros_like(b)
        b = cv2.merge((b,n,n))
        g = cv2.merge((n,g,n))
        r = cv2.merge((n,n,r))
        cv2.imshow('b',b)
        cv2.imshow('g',g)
        cv2.imshow('r',r)
Example #29
0
    def externalCall(self):
        d_red = cv2.cv.RGB(200, 100, 100)
        l_red = cv2.cv.RGB(250, 200, 200)
        d_green = cv2.cv.RGB(100, 200, 100)
        l_green = cv2.cv.RGB(200, 250, 200)

        orig = self.inputOrginalImageName.data
        img = orig.copy()
        #img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img2 = self.inputImageName.data

        detector = cv2.FeatureDetector_create('MSER')
        fs = detector.detect(img2)
        #print dir(detector)

        fs.sort(key=lambda x: -x.size)


        def supress(x):
                for f in fs:
                    distx = f.pt[0] - x.pt[0]
                    disty = f.pt[1] - x.pt[1]
                    dist = math.sqrt(distx*distx + disty*disty)
                    #print f.size/1.5
                    if (f.size > x.size) and (dist < f.size/2) \
                        :#or (f.size > self.maxRadius.value) or (f.size < self.minRadius.value):
                        #print dist, self.minRadius.value, self.maxRadius.value

                        return True

        sfs = [x for x in fs if not supress(x)]

        mask = np.zeros_like(img2)

        for f in sfs:
                cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(2), d_red, 1)
                cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size/2), d_green, 1)
                cv2.circle(mask, (int(f.pt[0]), int(f.pt[1])), int(f.size/1.5*self.maskScale.value), 255, -1)

        h, w = orig.shape[:2]
        #vis = np.zeros((h, w*2+5), np.uint8)
        #vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        #vis[:h, :w] = orig
        #vis[:h, w+5:w*2+5] = img

        filt = np.zeros_like(orig)

        cv2.merge((mask, mask, mask), filt)

        filt = cv2.bitwise_and(orig, filt)

        self.outputImageMask.data = mask
        self.outputImageName.data = img
        self.outputOrginalImageFilt.data = filt
Example #30
0
    def execute(self, image):
        morph = cv2.cvtColor(image, cv.CV_BGR2GRAY)
        cv2.morphologyEx(
            morph,
            cv2.MORPH_CLOSE,
            self._kernel,
            dst=morph,
            iterations=self.iterations.get())
        cv2.merge((morph, morph, morph), image)

        return image
print("channels: {}".format(img.shape[2]))

(height, width) = img.shape[:2]
center = (width // 2, height // 2)

cv2.imshow("nomadProgramer", img)

(Blue, Green, Red) = cv2.split(img)

cv2.imshow("Red Channel", Red)
cv2.imshow("Green Channel", Green)
cv2.imshow("Blue Channel", Blue)
cv2.waitKey(0)

zeros = np.zeros(img.shape[:2], dtype = "uint8")
cv2.imshow("Red", cv2.merge([zeros, zeros, Red]))
cv2.imshow("Green", cv2.merge([zeros, Green, zeros]))
cv2.imshow("Blue", cv2.merge([Blue, zeros, zeros]))
cv2.waitKey(0)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray Filter", gray)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow("HSV Filter", hsv)
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
cv2.imshow("LAB Filter", lab)
cv2.waitKey(0)

BGR = cv2.merge([Blue, Green, Red])
cv2.imshow("Blue, Green and Red", BGR)
Example #32
0
def lab_contrast(image):
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)
    cl = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)).apply(l)
    limg = cv2.merge((cl, a, b))
    return cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
def show_hsv_equalized(image):
    H, S, V = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))
    eq_V = cv2.equalizeHist(V)
    eq_image = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2RGB)
    plt.imshow(eq_image)
    plt.show()
Example #34
0
import cv2
import numpy as np

img = cv2.imread('output1-2.jpg', 0)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1]  # ensure binary
ret, labels = cv2.connectedComponents(img)

# Map component labels to hue val
label_hue = np.uint8(250 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])

# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2RGB)

# set bg label to black
labeled_img[label_hue == 0] = 0
#select car number
for i in range(labeled_img.shape[0]):
    for j in range(labeled_img.shape[1]):
        if (j > 330 and j < 395 and i > 230 and i < 270):
            if (labeled_img[i][j][0] == 255 and labeled_img[i][j][1] == 110
                    and labeled_img[i][j][2] == 0):
                labeled_img[i][j] = [255, 255, 255]
            elif (labeled_img[i][j][1] > 110 and labeled_img[i][j][1] != 204):
                labeled_img[i][j] = [255, 255, 255]
            else:
                labeled_img[i][j] = [0, 0, 0]
        else:
            labeled_img[i][j] = [0, 0, 0]
img_IOA = labeled_img[230:270, 330:395]
Example #35
0
import numpy as np
from PIL import Image

'opencv中通道C的格式是:BGR,PIL中通道的格式是RGB'
img = cv2.imread("../images/1.jpg")  # 读取图片
# print(img.shape)

'通道分离的方式一:'  # 索引赋值
b = img[:, :, 0:1]
g = img[:, :, 1:2]
r = img[:, :, 2:]
# img_new = np.concatenate([r, g, b], axis=2)                   # cat融合

'通道分离的方式二:'
b, g, r = cv2.split(img)
img_new = cv2.merge([r, g, b])
print(b.shape)  # 注意这里通道分离后,只有宽和高两个维度
print(g.shape)
print(r.shape)

'通道分离的方式三:'
zeros = np.zeros([img.shape[0], img.shape[1]], dtype=np.uint8)  # 注意这里要加上uint8
print(zeros.shape)
img_b = cv2.merge([b, zeros, zeros])  # 注意这种合并的方式, 实现通道分离显示
img_g = cv2.merge([zeros, g, zeros])  # 注意这种合并的方式, 实现通道分离显示
img_r = cv2.merge([zeros, zeros, r])  # 注意这种合并的方式, 实现通道分离显示
print(img_b.shape)

'通道分离的方式四:分别进行赋值'
img_new[..., 0] = 0
img_new[..., 1] = 0
Example #36
0
	if K.image_data_format() == "channels_first":
		image = (testX[i][0] * 255).astype("uint8")
 
	# otherwise we are using "channels_last" ordering
	else:
		image = (testX[i] * 255).astype("uint8")

# initialize the text label color as green (correct)
	color = (0, 255, 0)
 
	# otherwise, the class label prediction is incorrect
	if prediction[0] != np.argmax(testY[i]):
		color = (0, 0, 255)
 
	# merge the channels into one image and resize the image from
	# 28x28 to 96x96 so we can better see it and then draw the
	# predicted label on the image
	image = cv2.merge([image] * 3)
	image = cv2.resize(image, (96, 96), interpolation=cv2.INTER_LINEAR)
	cv2.putText(image, label, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
		color, 2)
 
	# add the image to our list of output images
	images.append(image)
 
# construct the montage for the images
montage = build_montages(images, (96, 96), (4, 4))[0]
 
# show the output montage
cv2.imshow("Fashion MNIST", montage)
cv2.waitKey(0)
#Splitting RGB components of an image
#Individual channel investigation hepls in understanding edge detection and thresholding

(B, G, R) = cv2.split(img) # as RGB image is stored in reverse channel order
#Showing individual channels in grayscale format
cv2.imshow("Red_Component", R)
cv2.imshow('Blue_Component', B)
cv2.imshow('Green_Component', G)
cv2.waitKey(0)


# In[4]:

#Merging the channels back
merged = cv2.merge([B, G, R])
cv2.imshow("Merged_Image", merged)
cv2.waitKey(0)


# In[5]:

#Merging the channels in reverse order - changes the color component of original image
cv2.imshow("Merged_Image2", cv2.merge([R, G, B]))
cv2.waitKey(0)


# In[8]:

#Visualize each channel in its corresponding color
zeros = np.zeros(img.shape[:2], dtype='uint8')
Example #38
0
def main():
    # if not os.path.exists(args.directory):
    #     os.mkdir(args.directory)
    try:
        for i in range(10, 12):
            index = 0
            config = rs.config()
            config.enable_stream(rs.stream.color)
            config.enable_stream(rs.stream.depth)
            pipeline = rs.pipeline()
            # rs.config.enable_device_from_file(config, "realsense_bag/kist_scene/scene2.bag")
            rs.config.enable_device_from_file(
                config, "realsense_bag/kist_scene/scene" + str(i) + ".bag")
            # rs.config.enable_device_from_file(config, "bag/20200811_141534.bag")
            # rs.config.enable_device_from_file(config, "bag/20200727_195335.bag")
            # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
            profile = pipeline.start(config)

            align_to = rs.stream.color
            align = rs.align(align_to)

            depth_sensor = profile.get_device().first_depth_sensor()
            depth_scale = depth_sensor.get_depth_scale()
            # print("scale : " + str(depth_scale))
            # compare_img = cv2.imread("/home/user/real_scene_color2.png")

            profile = pipeline.get_active_profile()

            color_stream = profile.get_stream(rs.stream.color)
            color_profile = rs.video_stream_profile(color_stream)
            color_intrinsics = color_profile.get_intrinsics()

            depth_profile = rs.video_stream_profile(
                profile.get_stream(rs.stream.depth))
            depth_intrinsics = depth_profile.get_intrinsics()

            print("*** color intrinsics ***")
            print(color_intrinsics)

            print("*** depth intrinsics ***")
            print(depth_intrinsics)

            if not os.path.exists("/home/user/kist_scene/scene" + str(i)):
                os.mkdir("/home/user/kist_scene/scene" + str(i))

            while True:
                # print("frame:", index)
                # print("scale : " + str(depth_scale))
                frames = pipeline.wait_for_frames()
                # print("frames type : " + str(type(frames)))
                # align the deph to color frame
                aligned_frames = align.process(frames)
                # get aligned frames
                aligned_depth_frame = aligned_frames.get_depth_frame()
                aligned_color_frame = aligned_frames.get_color_frame()
                aligned_depth_image = np.asanyarray(
                    aligned_depth_frame.get_data())
                # scaled_depth_image = depth_image * depth_scale
                aligned_color_image = np.asanyarray(
                    aligned_color_frame.get_data())
                # convert color image to BGR for OpenCV
                r, g, b = cv2.split(aligned_color_image)
                aligned_color_image = cv2.merge((b, g, r))

                # depth_intrinsics = rs.video_stream_profile(
                #     depth_image.profile).get_intrinsics()
                #
                # print(depth_intrinsics)

                depth_frame = frames.get_depth_frame()
                color_frame = frames.get_color_frame()
                depth_image = np.asanyarray(depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())
                r, g, b = cv2.split(color_image)
                color_image = cv2.merge((b, g, r))

                # print("frame_image type : " + str(type(color_image)))
                # print("frame_image shape : " + str(color_image.shape))
                # print("depth_image type : " + str(type(depth_image)))
                # print("depth_image shape : " + str(depth_image.shape))
                # depth_image *= 10

                # print(depth_image)
                # print(scaled_depth_image)
                # print(compare_img)
                # print(color_image)

                # row, col = depth_image.shape
                # flag = True
                #
                # for i in range(row):
                #     if flag is False:
                #         break
                #     for j in range(col):
                #         if flag is False:
                #             break
                #         if img[i][j] != depth_image[i][j]:
                #             flag = False
                #             break
                # if flag is True:
                #     print("index : " + str(index))
                #     exit()

                # if compare_img is color_image:
                #     print("index : " + str(i))
                #     break
                # print("true")
                # else:
                # print("no")
                # print(aligned_depth_image)
                # print(aligned_depth_image.shape)
                # print(min(aligned_depth_image[:,:,0]))

                # print(scaled_depth_image)
                # scaled_depth_image *= 1000
                # scaled_depth_image = scaled_depth_image.astype(np.uint64)
                # print(scaled_depth_image)

                # cv2.imwrite("/home/user/sample_image/image0_depth_raw.png", depth_image)
                # cv2.imwrite("/home/user/bag_images2/depth_image" + str(index) + ".png", depth_image)
                # cv2.imwrite("/home/user/bag_images2/color_image" + str(index) + ".png", color_image)
                # break
                # if index % 10 == 0:
                # cv2.imwrite("/home/user/kist_scene/depth_image" + str(index) + ".png", aligned_depth_image)
                # cv2.imwrite("/home/user/kist_scene/color_image" + str(index) + ".png", color_image)

                # cv2.imshow("color", color_image)
                # # cv2.imshow("depth", depth_image)
                # cv2.imshow("aligned_depth_image", aligned_depth_image)
                # cv2.waitKey(0)
                print("index : " + str(index))
                # cv2.imwrite("/home/user/kist_scene/depth_image" + str(index) + ".png", aligned_depth_image)
                # cv2.imwrite("/home/user/kist_scene/color_image" + str(index) + ".png", color_image)
                cv2.imwrite(
                    "/home/user/kist_scene/scene" + str(i) + "/depth_image" +
                    str(index) + ".png", aligned_depth_image)
                cv2.imwrite(
                    "/home/user/kist_scene/scene" + str(i) + "/color_image" +
                    str(index) + ".png", color_image)
                if index == 100:
                    break
                index += 1
    finally:
        pass
Example #39
0
# Complete the code for dilation

# Erosion

erosion = cv2.erode(dilation, kernel, iterations=1)
cv2.imwrite(filename + '/6. Erosion.jpg', erosion)
cv2.imshow('Erosion', erosion)

# Find differences between two images
diff = cv2.absdiff(dilation, erosion)
cv2.imwrite(filename + '/7. Difference.jpg', diff)

# Statement missing to show the difference

# splitting the channels of maze
b, g, r = cv2.split(img)
mask_inv = cv2.bitwise_not(diff)
cv2.imwrite(filename + '/8. Mask.jpg', mask_inv)
cv2.imshow('Mask', mask_inv)
cv2.imshow('Mask', mask_inv)

# masking out the green and red colour from the solved path
r = cv2.bitwise_and(r, r, mask=mask_inv)
b = cv2.bitwise_and(b, b, mask=mask_inv)

res = cv2.merge((b, g, r))
cv2.imwrite(filename + '/9. SolvedMaze.jpg', res)
cv2.imshow('Solved Maze', res)
cv2.waitKey(0)
cv2.destroyAllWindows()