def estimateHSflow(frame1, frame2, lam = 80):
    H, W = frame1.shape

    # build the image pyramid
    pyramid_spacing = 1.0/0.8
    pyramid_levels = int(1 + np.floor(np.log(min(W, H) / 16.0) / np.log(pyramid_spacing * 1.0)))
    #pyramid_levels = 1
    smooth_sigma = np.sqrt(2.0)
    #  use cv2.GaussianBlur
    f = ft.fspecialGauss(2 * round(1.5 * smooth_sigma) + 1, smooth_sigma)

    pyramid1 = []
    pyramid2 = []

    pyramid1.append(frame1)
    pyramid2.append(frame2)
    for m in range(1, pyramid_levels):
        # TODO #1: build Gaussian pyramid for coarse-to-fine optical flow
        # estimation
        ph = int(np.ceil(pyramid1[-1].shape[0] * 0.8))
        pw = int(np.ceil(pyramid1[-1].shape[1] * 0.8))
        pyramid1[-1] = cv2.filter2D(pyramid1[-1], -1, f, borderType=cv2.BORDER_CONSTANT)
        pyramid1.append(cv2.resize(pyramid1[-1], (pw, ph), interpolation = cv2.INTER_CUBIC))
        pyramid2[-1] = cv2.filter2D(pyramid2[-1], -1, f, borderType=cv2.BORDER_CONSTANT)
        pyramid2.append(cv2.resize(pyramid2[-1], (pw, ph), interpolation = cv2.INTER_CUBIC))
    # coarst-to-fine compute the flow
    uv = np.zeros(((H, W, 2)))

    for levels in range(pyramid_levels - 1, -1, -1):
        print "level %d" % (levels)
        H1, W1 = pyramid1[levels].shape
        uv = ft.resample_flow(uv, H1, W1)
        uv = estimateHSflowlayer(pyramid1[levels], pyramid2[levels], uv, lam, 10)

    return uv
def HS(im1, im2, alpha, ite,):

	#set up initial velocities
	uInitial = np.zeros([im1.shape[0],im1.shape[1]])
	vInitial = np.zeros([im1.shape[0],im1.shape[1]])

	# Set initial value for the flow vectors
	u = uInitial
	v = vInitial

	# Estimate derivatives
	[fx, fy, ft] = computeDerivatives(im1, im2)

	# Averaging kernel
	kernel=np.matrix([[1/12, 1/6, 1/12],[1/6, 0, 1/6],[1/12, 1/6, 1/12]])

	print fx[100,100],fy[100,100],ft[100,100]

	# Iteration to reduce error
	for i in range(ite):
		# Compute local averages of the flow vectors
		uAvg = cv2.filter2D(u,-1,kernel)
		vAvg = cv2.filter2D(v,-1,kernel)

		uNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
		uDenom = alpha + fx**2 + fy**2
		u = uAvg - np.divide(uNumer,uDenom)

		# print np.linalg.norm(u)

		vNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
		vDenom = alpha + fx**2 + fy**2
		v = vAvg - np.divide(vNumer,vDenom)
	return (u,v)
Пример #3
0
 def OFMGetFM(self, src):
     # creating a Gaussian pyramid
     GaussianI = self.FMCreateGaussianPyr(src)
     # convoluting a Gabor filter with an intensity image to extract oriemtation features
     GaborOutput0   = [ np.empty((1,1)), np.empty((1,1)) ]  # dummy data: any kinds of np.array()s are OK
     GaborOutput45  = [ np.empty((1,1)), np.empty((1,1)) ]
     GaborOutput90  = [ np.empty((1,1)), np.empty((1,1)) ]
     GaborOutput135 = [ np.empty((1,1)), np.empty((1,1)) ]
     for j in range(2,9):
         GaborOutput0.append(   cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel0) )
         GaborOutput45.append(  cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel45) )
         GaborOutput90.append(  cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel90) )
         GaborOutput135.append( cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel135) )
     # calculating center-surround differences for every oriantation
     CSD0   = self.FMCenterSurroundDiff(GaborOutput0)
     CSD45  = self.FMCenterSurroundDiff(GaborOutput45)
     CSD90  = self.FMCenterSurroundDiff(GaborOutput90)
     CSD135 = self.FMCenterSurroundDiff(GaborOutput135)
     # concatenate
     dst = list(CSD0)
     dst.extend(CSD45)
     dst.extend(CSD90)
     dst.extend(CSD135)
     # return
     return dst
Пример #4
0
    def compute_gradients(cls, img):
        gradient_filter_x = numpy.ones((3,1), numpy.float32)

        gradient_filter_x[0][0] = -1
        gradient_filter_x[1][0] = 0
        gradient_filter_x[2][0] = 1

        gradient_filter_y = numpy.ones((1,3), numpy.float32)

        gradient_filter_y[0][0] = -1
        gradient_filter_y[0][1] = 0
        gradient_filter_y[0][2] = 1

        dst_x = cv2.filter2D(img, -1, gradient_filter_x)
        dst_y = cv2.filter2D(img, -1, gradient_filter_y)

        dst = numpy.zeros(img.shape, numpy.float32)

        w,h,_ = img.shape
        for x in range(w):
            for y in range(h):
                x_c = dst_x[x][y]
                y_c = dst_y[x][y]
                x_co = max(x_c)
                y_co = max(y_c)
                dst[x][y] = math.atan2(x_co, y_co)

        return dst
Пример #5
0
    def _divide(self):        
        block_size = self.spec.block_size # shortcut
        half_block = (block_size-1)/2
        
        rows, columns = self.dividing.nonzero()
        for i in range(len(rows)):
            row = rows[i]
            column = columns[i]

            write_block(self._cell_block, self.cells, row, column, block_size)
            cv2.filter2D(self._cell_block, cv2.CV_32F, self._tension_kernel,
                         self._probability, borderType=cv2.BORDER_CONSTANT)
            cv2.threshold(self._probability, self._tension_min, 0, 
                          cv2.THRESH_TOZERO, self._probability)
            self._probability[self._cell_block] = 0
            self._probability **= self.spec.tension_power
            self._probability *= self._distance_kernel
            
            # optimized version of np.random.choice
            np.cumsum(self._probability.flat, out=self._cumulative)
            total = self._cumulative[-1]
            if total < 1.0e-12:
                # no viable placements, we'll have precision problems anyways
                continue 
            self._cumulative /= total
            
            index = self._indices[np.searchsorted(self._cumulative, 
                                                  rdm.random())]
            local_row, local_column = np.unravel_index(index, 
                                                       self._probability.shape)
            self.set_alive(row+(local_row-half_block), 
                           column+(local_column-half_block))
Пример #6
0
def find_content(img_hsv, hist_sample):
    """ img hsv, hist_sample as np.array, -> 1 channel distance """
    src_img_cp = img_hsv
    # normalize the sample histogram
    cv2.normalize(hist_sample, hist_sample, 0, 179, cv2.NORM_MINMAX)
    distance = cv2.calcBackProject([img_hsv], [0], hist_sample, [0, 180], 0.5)

    print('ssssssssssssssssssssss distance -------------------')
    # show the distance
    ava.cv.utl.show_image_wait_2(distance) # ------------

    # convolute with circular, morphology
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    cv2.filter2D(distance, -1, kernel, distance)

    print('==================== distance convoluted -------------------')
    # show the smoothed distance
    ava.cv.utl.show_image_wait_2(distance) # ------------

    # threshold
    ret, thresh = cv2.threshold(distance, 55, 180, cv2.THRESH_BINARY)
    # thresh = cv2.merge([thresh, thresh, thresh])

    # do the bitwise_and
    #result = cv2.bitwise_and(src_img_cp, thresh)
    return thresh
    def apply_skin_mask(self, frame):
        # transform from rgb to hsv
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # detect skin using the hand histogram
        self.skin_mask = cv2.calcBackProject([hsv], [0, 1], self.hand_histogram, [0, 180, 0, 256], 1)
        # create a elliptical kernel (12 is the best in my case)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
        cv2.filter2D(self.skin_mask, -1, kernel, self.skin_mask)
        # Apply gaussian filter on the result to give better result
        cv2.GaussianBlur(self.skin_mask, (3, 3), 0, self.skin_mask)
        # change the threshold to suit the brightness (20-30 gave me best results so far)
        _, thresh = cv2.threshold(self.skin_mask, 20, 255, 0)
        # Apply gaussian filter on the result to give better result
        cv2.GaussianBlur(self.skin_mask, (5, 5), 0, self.skin_mask)

        # Trying other types of threshold (all of them didn't work)
        # _, thresh = cv2.adaptiveThreshold(skin_mask, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 0, 11, 2)
        # _, thresh = cv2.threshold(skin_mask, 0, 255, 0+cv2.THRESH_OTSU) #OTSU Threshold don't give satisfying results

        # Multichannel
        thresh = cv2.merge((thresh, thresh, thresh))
        # Mask the hand from the original frame
        self.skin_mask = cv2.bitwise_and(frame, thresh)
        # remove faulty skin (kernel of size 9x9)
        self.skin_mask = cv2.morphologyEx(self.skin_mask, cv2.MORPH_OPEN, (31, 31), iterations=5)
        # reduce black spaces in the hand
        cv2.morphologyEx(self.skin_mask, cv2.MORPH_CLOSE, (9, 9), self.skin_mask, iterations=5)
        # Draw Skin masking (JFD)
        if self.DEBUGGING:
            cv2.imshow('skin mask', self.skin_mask)
        return self.skin_mask
Пример #8
0
def analisis(imgRecorte, punto, umbral):
	# Paso 1: Aplicar operador desenfoque de movimiento 90 grados
	imgRecorte = cv2.filter2D(imgRecorte, -1, motionBlur90)

	# Paso 2: Aplicar filtro Umbral
	_, imgRecorte = cv2.threshold(imgRecorte,umbral,255,cv2.THRESH_BINARY)
	
	# Paso 3: Aplicar 2 iteraciones de desenfoque de movimiento 0 grados
	imgRecorte = cv2.filter2D(imgRecorte, -1, motionBlur0)	
	imgRecorte = cv2.filter2D(imgRecorte, -1, motionBlur0)


	# Paso 4: Deteccion
	puntoAImagen, puntoBImagen = Punto(punto.x, 0), Punto(punto.x,ancho - 1)

	for fila in xrange(punto.y, -1, -1):
		if imgRecorte[fila, 4] == 0:
			puntoAImagen = Punto(punto.x, fila + 1)
			break

	for fila in xrange(punto.y, alto):
		if imgRecorte[fila, 4] == 0:
			puntoBImagen = Punto(punto.x, fila - 1)
			break
	
	return [puntoAImagen, puntoBImagen]
Пример #9
0
def function():
    """
        We will use this function to apply some transformations on our camera feed
    :return:
    """

    cam = cv2.VideoCapture(1)
    # while cam.read()[1].empty:
    #     pass

    kernel_1 = np.asarray([[-1,-2,-1],[0,0,0],[1,2,1]])
    kernel_2 = np.transpose(kernel_1)
    while True:
        frame = cam.read()[1]
        # frame = np.flip(cam.read()[1], axis=1)
        trans_frame_1 = cv2.filter2D(src=frame, ddepth=-1, kernel=kernel_1)
        trans_frame_2 = cv2.filter2D(src=frame, ddepth=-1, kernel=kernel_2)
        combined_trans = np.add(trans_frame_1, trans_frame_2)
        edge_enhanced_image = np.add(frame, combined_trans)
        # show_frame = np.hstack((frame, combined_trans))
        show_frame = np.hstack((frame, combined_trans))
        # cv2.imshow('Video feed', show_frame)
        cv2.imshow('Video feed', detect_lines(frame))
        if cv2.waitKey(1) == 27: # esc key
            break
    pass
Пример #10
0
    def locateMarker(self, frame):
        self.frameReal = frame
        self.frameImag = frame
        self.frameRealThirdHarmonics = frame
        self.frameImagThirdHarmonics = frame

        # Calculate convolution and determine response strength.
        self.frameReal = cv2.filter2D(self.frameReal, cv2.CV_32F, self.matReal)
        self.frameImag = cv2.filter2D(self.frameImag, cv2.CV_32F, self.matImag)
        
        
        
        self.frameRealSq = np.multiply(self.frameReal, self.frameReal)
        self.frameImagSq = np.multiply(self.frameImag, self.frameImag)
        self.frameSumSq = self.frameRealSq + self.frameImagSq

        # Calculate convolution of third harmonics for quality estimation.
        self.frameRealThirdHarmonics = cv2.filter2D(self.frameRealThirdHarmonics, cv2.CV_32F, self.matRealThirdHarmonics)
        self.frameImagThirdHarmonics = cv2.filter2D(self.frameImagThirdHarmonics, cv2.CV_32F, self.matImagThirdHarmonics)
        
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(self.frameSumSq)
        self.lastMarkerLocation = max_loc
        (xm, ym) = max_loc
        self.determineMarkerOrientation(frame)
        self.determineMarkerQuality()
        return max_loc
Пример #11
0
def convolution(img, edges, bars, rots, schmids):
    '''
        38種類のfilterで畳み込みを行い、
        いくつかの種類ごとに最大の反応を示した8個のresponseを返す
        filter -> float64

        edges -> gabor filter  同じスケールの違う角度が6しゅるい x 3つのスケール

    '''
    responses = []
    sums = []
    max_responses = []

    # gabor filterから3つ
    for i,kernel in enumerate(edges):

        response = cv2.filter2D(img, cv2.CV_64F, kernel)
        responses.append(cv2.filter2D(img, cv2.CV_64F, kernel))
        sums.append(response.sum())
            
        if (i+1)%6 == 0 :
            # 最大値がふくまれているresを保存する?
            # print np.argmax(sums),np.max(sums)
            max_responses.append(responses[np.argmax(sums)])

            # 初期化
            responses = []
            sums = []

    for i,kernel in enumerate(bars):
        response = cv2.filter2D(img, cv2.CV_64F, kernel)
        responses.append(cv2.filter2D(img, cv2.CV_64F, kernel))
        sums.append(response.sum())
            
        if (i+1)%6 == 0 :
            # 最大値がふくまれているresを保存する?
            # print np.argmax(sums),np.max(sums)
            max_responses.append(responses[np.argmax(sums)])

            # 初期化
            responses = []
            sums = []
    

    for i,kernel in enumerate(rots):
        max_responses.append(cv2.filter2D(img, cv2.CV_64F, kernel))
    
    ## If use Schmid filter bank
    # max_responses = []
    # for i,kernel in enumerate(schmids):
        # max_responses.append(cv2.filter2D(img, cv2.CV_64F, kernel))



    ## 結果の表示
    # for i in range(8):
        # plt.imshow(max_responses[i])
        # plt.pause(1)

    return max_responses
Пример #12
0
def create_image_backprops(images, **kwargs):
    """
    """
    pathdir = kwargs.get("path", "")
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    for index, path in images["Path"].items():
        rect = images["Crop"][index]
        path = os.path.join(pathdir, path)
        im_base = open_if_path(path, hsv=True)
        im_crop = get_image_crop(im_base, rect)

        histogram = cv2.calcHist([im_crop], [0, 1], None, [180, 256], [0, 180, 0, 256])
        cv2.normalize(histogram, histogram, 0, 255, cv2.NORM_MINMAX)
        backprop = cv2.calcBackProject([im_base], [0, 1], histogram, [0, 180, 0, 256], 1)
        cv2.filter2D(backprop, -1, kernel, backprop)
        _, im_thresh = cv2.threshold(backprop, 50, 255, 0)

        im_mask = cv2.merge((im_thresh, im_thresh, im_thresh))
        im_clean = cv2.bitwise_and(im_base, im_mask)
        examine = np.vstack([im_base, im_mask, im_clean])
        cv2.imshow(path, examine)

        while cv2.waitKey(5) != ord(" "):
            pass
    cv2.destroyAllWindows()
Пример #13
0
def main(argv):

    img = cv2.imread("IMAG4184.jpg",cv2.IMREAD_GRAYSCALE)

    #kernel = numpy.array([[1,-1,-1] ,[-1,1,-1] ,[-1,-1,1]])

    kernel = numpy.array([[1,0,0,0,-1],
                          [0,1,0,-1,0],
                          [0,0,-0.8,0,0],
                          [0,-1,0,1,0],
                          [-1,0,0,0,1]])


    cv2.filter2D(img,-1,kernel,img)

    #th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    #        cv2.THRESH_BINARY,11,2)
    #kernel = numpy.ones((2,2),numpy.uint8)
    #erosion = cv2.erode(img,kernel,iterations = 1)
    #dilation = cv2.dilate(erosion,kernel,iterations = 1)


#    cv2.imshow("da",img)
#    cv2.waitKey(0)
#    cv2.destroyAllWindows()

    cv2.imwrite("copie.jpg",img)
Пример #14
0
    def _divide(self):
        blockSize = self.params.blockSize # shortcut
        halfBlock = (blockSize - 1)/2
        rows, columns = map(list, self.dividing.nonzero())
        distanceKernel = _generateDistanceKernel(blockSize)
        connectedKernel = np.array([[0, 1, 0],
                                    [1, 0, 1],
                                    [0, 1, 0]], dtype=np.uint8)
        probability = np.empty((blockSize, blockSize), dtype=np.float32)

        for row, column in zip(rows, columns):
            biofilm = _getBlock(self.biofilm, row, column, blockSize)

            cv2.filter2D(biofilm, cv.CV_32F, connectedKernel, probability)
            cv2.threshold(probability, 0.1, 1.0, cv2.THRESH_BINARY, probability)
            probability[biofilm] = 0
            probability *= distanceKernel**self.params.distancePower
            probability *= _getBlock(self.surfaceTension, row, column, 
                                     blockSize, dtype=float)\
                           **self.params.tensionPower

            # now select at random
            flattened = probability.flatten()
            total = flattened.sum()
            if total < 1.0e-12:
                # no viable placements, we'll have precision problems anyways
                continue 
            flattened /= total

            index = np.random.choice(np.arange(len(flattened)), p=flattened)
            localRow, localColumn = np.unravel_index(index, biofilm.shape)

            self.biofilm[row + (localRow - halfBlock),
                        column + (localColumn - halfBlock)] = 1
Пример #15
0
def buf() :

    current_dset = dataset.dataset('data/basil/front', dtype=float)
    comp_dset = dataset.dataset('data/basil/front', dtype=float)


    kernel = np.array([[0,0,0], 
                       [0,18,0], 
                       [0,0,0]], dtype=float)

    time_kernel = np.array([[-1,-1,-1], 
                            [-1,-1,-1], 
                            [-1,-1,-1]], dtype=float)


    for i in comp_dset :
        i[...] = cv2.filter2D(i, -1, time_kernel)


    it = dataset.buffered_iterator(current_dset, 'movement3', bufsize = 30, outtype=np.uint8, start=1, end_offset=1)
    for i in it :

        i[...] = cv2.filter2D(i, -1, kernel)

        i[...] = comp_dset[it.index-1] + i + comp_dset[it.index+1] 

        mask = ((i > 100) | (i < -100))
        i[...][mask] = 0

        mask = (i != 0)
        i[...][mask] = 255

        mask = ((i[:,:,0] == 0) & (i[:,:,1] == 0) & (i[:,:,2] == 0))
        mask = np.invert(mask)
        i[...][mask] = 255 
Пример #16
0
def Pyramid(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(40,40))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	kernelg1 = 0.125*np.ones((4,4),np.float32)
	kernelg2 = 0.25*np.ones((2,2),np.float32)
	lastFeatures = []	
	for img in features:
		tote = cv2.sumElems(img)
		tote = tote/img.size
		img = img/tote
		print img
		print cv2.sumElems(img)
		print img.size
		lastFeatures.append(img1)
	return lastFeatures
Пример #17
0
def findFeatures(img, maxima, blob_img, corn_img):

    kernel = np.array([[-1, -1, 0, 1, 1],
                       [-1, -1, 0, 1, 1],
                       [ 0,  0, 0, 0, 0],
                       [ 1,  1, 0,-1,-1],
                       [ 1,  1, 0,-1,-1]], np.float32)

    kernel2 = np.array([[-1,-1,-1,-1, -1],
                        [-1, 1, 1, 1, -1],
                        [-1, 1, 8, 1, -1],
                        [-1, 1, 1, 1, -1],
                        [-1,-1,-1,-1, -1]], np.float32)

    blob_img = cv2.filter2D(img, -1, kernel2)
    corn_img = cv2.filter2D(img, -1, kernel)

    dims = img.shape[0:2]  # dims[0] - width, dims[1] = height
   # extract maxima via non-maximum suppression

    M= np.zeros((dims[0], dims[1]), dtype=np.int32)
    nonMaximumSuppression(blob_img,M, dims, True, 0, maxima)
    nonMaximumSuppression(blob_img,M,dims, False, 1, maxima)
    nonMaximumSuppression(corn_img,M, dims, True, 2, maxima)
    nonMaximumSuppression(corn_img,M,dims, False, 3,maxima)
  #  print dims
  #  filter with sobel
  #  compute descriptor    computeDescriptors(img, dims, maxima)
    num = len(maxima)
    if num == 0:
        max = 0
        return
    return blob_img, corn_img
Пример #18
0
def thinning_Hi(img):
	kpw = []
	kpb = []

	Init(kpw, kpb)
	src_w = np.array(img, dtype=np.float32)/255.
	thresh, src_b = cv.threshold(src_w, 0.5, 1.0, cv.THRESH_BINARY_INV)
	thresh, src_f = cv.threshold(src_w, 0.5, 1.0, cv.THRESH_BINARY)
	thresh, src_w = cv.threshold(src_w, 0.5, 1.0, cv.THRESH_BINARY)
	th = 1.
	while th > 0:
		th = 0.
		for i in range(8):
			src_w = cv.filter2D(src_w, cv.CV_32F, kpw[i])
			src_b = cv.filter2D(src_b, cv.CV_32F, kpb[i])
			thresh, src_w = cv.threshold(src_w, 2.99, 1, cv.THRESH_BINARY)
			thresh, src_b = cv.threshold(src_b, 2.99, 1, cv.THRESH_BINARY)
			src_w = np.array(np.logical_and(src_w,src_b), dtype=np.float32)
			th += np.sum(src_w)
			src_f = np.array(np.logical_xor(src_f, src_w), dtype=np.float32)
			src_w = src_f.copy()
			thresh, src_b = cv.threshold(src_f, 0.5, 1.0, cv.THRESH_BINARY_INV)

	thresh, ret_img = cv.threshold(src_f, 0.5, 255.0, cv.THRESH_BINARY)
	return ret_img.astype(np.uint8)
Пример #19
0
def CalcGridient(): 

	#创建算子向量 :

	#mx : 对每个元素的左面一个乘以-1,自己乘以0,右边乘以1 然后相加
	#my : 对每个元素的上面一个乘以-1,自己乘以0,下面乘以1 然后相加
	mx = np.array([[-1, 0, 1]]) 
	my = np.array([[-1, 0, 1]]).T #转置

	#读入灰度图片,并且将其转成一个 numpy 对象,从而可以更好的计算
	img1_gray = cv2.imread('img1.png',0) 
	im = np.array(img1_gray).astype(np.uint8)

	#分别计算x 方向, y 方向的梯度
	gx = cv2.filter2D(im, cv2.CV_32F, mx)
	gy = cv2.filter2D(im, cv2.CV_32F, my)

	#存储梯度强度
	M = [0]*360
	
	#二维遍历所有的像素点,计算他们的梯度强度
	for x in range(1,gx.shape[0]):
		for y in range(1,gx.shape[1]):
			#计数
			M[math.trunc( math.sqrt((gx[x][y]**2+gy[x][y]**2)) ) ]+=1;

	#输出所有的灰度强度分布
	s = sum(M)
	for m in M:	print (m+0.0)/s
Пример #20
0
def hdr_do(src):
    assert(src.dtype == np.dtype('uint8'))
    src = np.array(src, dtype='float32') / 255
    
    L_cone = np.squeeze(src.dot(BGR2L_cone))
    L_rod = np.squeeze(src.dot(BGR2L_rod))

    ret, L_cone = \
        cv2.threshold(L_cone, 0, 0,type=cv2.THRESH_TOZERO, dst=L_cone)
    ret, L_rod = \
        cv2.threshold(L_rod, 0, 0, type=cv2.THRESH_TOZERO, dst=L_rod)

    sigma_cone = beta_cone * np.power(L_cone, alpha)
    sigma_rod = beta_rod * np.power(L_rod, alpha)
    
    eps = np.finfo(np.float32).eps
    
    R_cone = R_max*np.divide(np.power(L_cone, n), eps + \
                             np.power(sigma_cone, n) + np.power(L_cone, n))
    R_rod = R_max*np.divide(np.power(L_rod, n), eps + \
                            np.power(sigma_rod, n) + np.power(L_rod, n))
    
    G_cone = cv2.filter2D(R_cone, -1, hh)
    G_rod = cv2.filter2D(R_rod, -1, hh)

    # cv2.GaussianBlur(src,(21,21),1) - cv2.GaussianBlur(src,(21,21),4)
    # not linear seperable, thus cannot use sepFilter

    a = np.power(L_cone, -t)
    a = np.subtract(a, np.min(a.ravel()), out=a)
    w = 1/(1 + a)
    Lout = w*G_cone + (1-w)*G_rod
    rst = src*np.expand_dims(Lout/np.power(L_cone,s), axis=3)
    
    return rst
Пример #21
0
def hist1(img, samimg):
	oriimg = img
	img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
	M = cv2.calcHist([samimg],[0, 1], None, [180, 256], [0, 180, 0, 256] )
	I = cv2.calcHist([img],[0, 1], None, [180, 256], [0, 180, 0, 256] )
	# h,s,v = cv2.split(img)
	# M = np.histogram2d(h.ravel(),s.ravel(),256,[[0,180],[0,256]])[0]
	# h,s,v = cv2.split(samimg)
	# I = np.histogram2d(h.ravel(),s.ravel(),256,[[0,180],[0,256]])[0]

	R = M/(I+1)

	h,s,v = cv2.split(img)
	B = R[h.ravel(), s.ravel()]
	B = np.minimum(B,1)
	B = B.reshape(img.shape[:2])
	disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
	cv2.filter2D(B,-1,disc,B)
	B = np.uint8(B)
	cv2.normalize(B,B,0,255,cv2.NORM_MINMAX)

	ret,thresh = cv2.threshold(B,0,255,0)

	for i in range(oriimg.shape[0]):
		for j in range(oriimg.shape[1]):
			for k in range(oriimg.shape[2]):
				if thresh[i, j] == 0:
					oriimg[i,j] = 0

	cv2.imshow('out', oriimg)
	cv2.waitKey(0)
Пример #22
0
 def apply_skin_mask(self, frame):
     # transform from rgb to hsv
     hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
     # detect skin using the hand histogram
     self.skin_mask = cv2.calcBackProject([hsv], [0, 1], self.hand_histogram, [0, 180, 0, 256], 1)
     # create a elliptical kernel (11 is the best in my case)
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
     cv2.filter2D(self.skin_mask, -1, kernel, self.skin_mask)
     # Apply gaussian filter to give much better result
     cv2.GaussianBlur(self.skin_mask, (3, 3), 0, self.skin_mask)
     # change the threshold to suit the brightness (20-30 gave me best results so far)
     _, thresh = cv2.threshold(self.skin_mask, 20, 255, 0)
     thresh = cv2.merge((thresh, thresh, thresh))
     # Mask the hand from the original frame
     self.skin_mask = cv2.bitwise_and(frame, thresh)
     # Apply gaussian filter to give much cleaner result
     cv2.GaussianBlur(self.skin_mask, (5, 5), 0, self.skin_mask)
     # remove faulty skin (kernel of size 9x9)
     cv2.morphologyEx(self.skin_mask, cv2.MORPH_OPEN, (31, 31), self.skin_mask, iterations=5)
     # reduce black holes in the hand
     cv2.morphologyEx(self.skin_mask, cv2.MORPH_CLOSE, (9, 9), self.skin_mask, iterations=5)
     # Show skin detection result if DEBUGGING
     if self.DEBUGGING:
         cv2.imshow('SKIN', self.skin_mask)
     return self.skin_mask
    def _canny_curve_detector(self, img, low_thresh=35, high_thresh=60):
        # We start by blurring the image a little bit
        img = cv2.GaussianBlur(np.copy(img), (3, 3), 1)

        kernel1 = np.array([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
        kernel2 = np.array([[0, 0, 0], [0, 1, -1], [0, 0, 0]])
        localmax_x = filter2D(img, cv2.PARAM_INT, kernel1) > 0
        localmax_x = and2(
            localmax_x, filter2D(img, cv2.PARAM_INT, kernel2) > 0)

        kernel1 = kernel1.transpose()
        kernel2 = kernel2.transpose()
        localmax_y = filter2D(img, cv2.PARAM_INT, kernel1) > 0
        localmax_y = and2(
            localmax_y, filter2D(img, cv2.PARAM_INT, kernel2) > 0)

        localmax = or2(localmax_x, localmax_y)

        # We drop local maxima which are under an intensity threshold
        strong = and2(localmax, (high_thresh < img))

        # We finally take back local max which are not so weak, and which are
        # just next to a string local maxima
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        strong.dtype = 'uint8'
        touch = cv2.morphologyEx(strong, cv2.MORPH_DILATE, kernel)
        touch.dtype = 'bool'

        edge = and2(touch, (low_thresh < img))
        edge = and2(edge, localmax)
        edge.dtype = 'uint8'
        return edge * 255
Пример #24
0
 def removeBackground(self, image):
     discValue = 10
     threshold = 1
     hsvt = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
     
     for roiHist in self.negHistograms:
         dst = cv2.calcBackProject([hsvt],[0,1],roiHist,[0,180,0,256],1)
         cv2.imshow('dst', dst)
         disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(discValue,discValue))
         cv2.filter2D(dst, -1,disc,dst)
         ret,thresh = cv2.threshold(dst,threshold,255,cv2.THRESH_BINARY_INV)
         thresh = cv2.merge((thresh,thresh,thresh))
         image = cv2.bitwise_and(image,thresh)
     
     
     for roiHist in self.posHistograms:
         dst = cv2.calcBackProject([hsvt],[0,1],roiHist,[0,180,0,256],1)
         #cv2.imshow('dst', dst)
         disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(discValue,discValue))
         cv2.filter2D(dst, -1,disc,dst)
         ret,thresh = cv2.threshold(dst,threshold,255,cv2.THRESH_BINARY)
         thresh = cv2.merge((thresh,thresh,thresh))
         image = cv2.bitwise_and(image,thresh)
         
         
         #res = np.hstack((thresh,res))
     
     cv2.imshow('backProj', image)
     return image
Пример #25
0
def harris_response(Ix, Iy, kernel, alpha):
    """Compute Harris reponse map using given image gradients.

    Parameters
    ----------
        Ix: image gradient in X direction, values in [-1.0, 1.0]
        Iy: image gradient in Y direction, same size and type as Ix
        kernel: 2D windowing kernel with weights, typically square
        alpha: Harris detector parameter multiplied with square of trace

    Returns
    -------
        R: Harris response map, same size as inputs, floating-point
    """

    # TODO: Your code here

    Ixx = Ix**2
    Iyy = Iy**2
    Ixy = Ix*Iy

    Sxx = cv2.filter2D(Ixx, -1, kernel)
    Syy = cv2.filter2D(Iyy, -1, kernel)
    R = Sxx*Syy - alpha*((Sxx - Syy)**2)
    return R
Пример #26
0
def conv_cv2(img, kernel):
        if kernel.dtype == 'complex':
            r = cv2.filter2D(img, -1, np.real(kernel))
            i = cv2.filter2D(img, -1, np.imag(kernel))
            return r + i * 1j
        else:
            return cv2.filter2D(img, -1, kernel)
Пример #27
0
    def get_pupil_pos(self):
        # Prewitt filter masks
        dx = np.array([[1.0, 0.0, -1.0], [1.0, 0.0, -1.0], [1.0, 0.0, -1.0], ])
        dy = np.transpose(dx)
        # filter with Gaussian
        for (x, y, w, h) in self.faces:
            roi_gray = self.gray[y:y + h, x:x + w]
            roi_gray_blurred = cv2.GaussianBlur(roi_gray, (5, 5), 0.05*h, 0.05*w)
            cv2.imshow('roi', roi_gray_blurred)
            x_derivative = cv2.filter2D(roi_gray_blurred, cv2.CV_32F, dx)
            y_derivative = cv2.filter2D(roi_gray_blurred, cv2.CV_32F, dy)
            # magic starts here
            for (ex, ey, ew, eh) in self.eyes:
                for outer_cols in xrange(ex, ex+ew):
                    for outer_rows in xrange(ey, ey+eh):
                        response_matrix = np.zeros((ew, eh))
                        for inner_cols in xrange(ex, ex+ew):
                            for inner_rows in xrange(ey, ey+eh):
                                center_vector = [outer_cols - inner_cols, outer_rows - inner_rows]
                                gradient_vector = [x_derivative[inner_cols, inner_rows], y_derivative[inner_cols, inner_rows]]
                                center_vector_norm = self._normalize_vector(center_vector)
                                gradient_vector_norm = self._normalize_vector(gradient_vector)
                                response_raw = np.dot(center_vector_norm, gradient_vector_norm)
                                response_normalized = (float(255 - roi_gray_blurred[inner_cols, inner_rows])/255) * response_raw
                                response_matrix[inner_cols-ex, inner_rows-ey] = response_normalized

                    response_matrix_disp = (response_matrix/np.max(response_matrix))
                    cv2.imshow("pupil", response_matrix_disp)
                    cv2.waitKey(1)
Пример #28
0
def Background_remove(img_trimmed,sample_path):
    roi = cv2.imread(sample_path)
    hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)   
 
    target = img_trimmed
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
 
    # calculating object histogram
    roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
 
    # normalize histogram and apply backprojection
    cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
 
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    cv2.filter2D(dst,-1,disc,dst)
 
    # threshold and binary AND
    ret,thresh = cv2.threshold(dst,5,255,0)
    #invert to get the object of interest
    cv2.bitwise_not(thresh,thresh)
    thresh = cv2.merge((thresh,thresh,thresh))
    res = cv2.bitwise_and(target,thresh)
 
    #res = np.vstack((target,thresh,res))
    return res
Пример #29
0
    def GetEdges(self,imgs):
        '''
            入力されたimgのedgeを入手して返す
            args : imgs   -> 画像, もしくは画像のリスト
            dst  : edges  -> 入力画像のedgeが入った画像,もしくはリスト
            param: threshold1 -> 低い方の閾値
                   threshold2 -> わからん
                   apertureSize -> 繋がっていないエッジの補完に関するparam
        '''
        # imgかtmpか
        if len(imgs) == len(self.scale):
            
            '''tmpに対する処理'''
            for i in range(len(self.scale)):
                # imgs[i] = cv2.GaussianBlur(imgs[i], (9,9), 2**1)
                imgs[i] = cv2.medianBlur(imgs[i],9)
                imgs[i] = cv2.filter2D(imgs[i], cv2.CV_8U, self.sharpenKernel)
                imgs[i] = cv2.Canny(imgs[i], threshold1= 90, threshold2= 200,apertureSize = 3)

        else:

            '''imgに対する処理'''
            # imgs = cv2.GaussianBlur(imgs, (9,9), 2**1)
            imgs = cv2.medianBlur(imgs,9)
            imgs = cv2.filter2D(imgs, cv2.CV_8U, self.sharpenKernel)
            imgs = cv2.Canny(imgs, threshold1= 90, threshold2= 200,apertureSize = 3)

        edges = imgs
        return edges
Пример #30
0
def get_vert_grad(img):
    sharp_filter = np.array([[-1, 0, 1], [-1, 0, 1], [-1,0,1]])
    edges = cv2.filter2D(img, -1, sharp_filter)
    sharp_filter = np.array([[1, 0, -1], [1, 0, -1], [1,0,-1]])
    edges_2 = cv2.filter2D(img, -1, sharp_filter)
    vert_edges = edges | edges_2
    return vert_edges
Пример #31
0
# Turn uint8, image fusion
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
Sobel = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
# ======= if you want to write the sobel image for use =================
lp = cv2.imwrite('/mnt/c/linuxmirror/laplace1.jpg', laplacian)
sx = cv2.imwrite('/mnt/c/linuxmirror/sobelX.jpg', sobelx)
sy = cv2.imwrite('/mnt/c/linuxmirror/sobelY.jpg', sobely)
so = cv2.imwrite('/mnt/c/linuxmirror/sobel.jpg', Sobel)
# ======== prewitt edge detection ======================================
prewit = ndimage.prewitt(im)
pw = cv2.imwrite('/mnt/c/linuxmirror/prewitt.jpg', prewit)
# Prewitt operator
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]], dtype=int)
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int)
x = cv2.filter2D(grayImage, cv2.CV_16S, kernelx)
y = cv2.filter2D(grayImage, cv2.CV_16S, kernely)
# Turn uint8, image fusion
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
Prewitt = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
pw = cv2.imwrite('/mnt/c/linuxmirror/prewitt2.jpg', Prewitt)
# ======== roberts edge detector =======================================
grayImage = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Roberts operator
kernelx = np.array([[-1, 0], [0, 1]], dtype=int)
kernely = np.array([[0, -1], [1, 0]], dtype=int)
x = cv2.filter2D(grayImage, cv2.CV_16S, kernelx)
y = cv2.filter2D(grayImage, cv2.CV_16S, kernely)
# Turn uint8, image fusion
absX = cv2.convertScaleAbs(x)
Пример #32
0
for i in range(4):
    plt.subplot(2, 2, i + 1)
    plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])
plt.show()


res = cv2.resize(image, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
print(f'original image shape: {image.shape}')
print(f'changed image shape: {res.shape}')


kernel = np.ones((5, 5), np.float32) / 25
dst = cv2.filter2D(image, -1, kernel)

plt.subplot(121)
plt.imshow(image)
plt.title('Original')
plt.xticks([]), plt.yticks([])

plt.subplot(122)
plt.imshow(dst)
plt.title('Averaging')
plt.xticks([]), plt.yticks([])

plt.show()

blur = cv2.blur(image, (5, 5))
Пример #33
0
roi = cv2.imread('roi.jpg')
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)

#目标搜索图片
target = cv2.imread('tar.jpg')
hsvt = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)

#计算目标直方图
roihist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
#归一化,参数为原图像和输出图像,归一化后值全部在2到255范围
cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)
dst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)

#卷积连接分散的点
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dst = cv2.filter2D(dst, -1, disc)

ret, thresh = cv2.threshold(dst, 50, 255, 0)
#使用merge变成通道图像
thresh = cv2.merge((thresh, thresh, thresh))

#蒙板
res = cv2.bitwise_and(target, thresh)
#矩阵按列合并,就是把target,thresh和res三个图片横着拼在一起
res = np.hstack((target, thresh, res))

cv2.imwrite('res.jpg', res)
#显示图像
cv2.imshow('1', res)
cv2.waitKey(0)
import cv2
import numpy as np

cap = cv2.VideoCapture(0)
_, im = cap.read()
cv2.imwrite("image.png", im)
while (1):  #change to if for camera

    # Take each frame
    #_, im = cap.read()

    n = 5
    kernel = np.ones((n, n), np.float32) / (n * n)
    im = cv2.filter2D(im, -1, kernel)

    grey = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

    k = cv2.waitKey(5) & 0xFF
    cv2.imshow("tote.jpg", im)
    ret, thresh = cv2.threshold(grey, 127, 255, 0)

    img, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    img = cv2.drawContours(img, contours, 3, (0, 255, 0), 3)
    cv2.imshow("image", img)

    if k == 27:
        break

cv2.destroyAllWindows()
Пример #35
0
import numpy as np
import cv2 as openCV

input_image = openCV.imread("./images/input.jpg")

openCV.imshow("Original Image", input_image)

openCV.waitKey()

kernel_matrix = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])

sharpen_image = openCV.filter2D(input_image, -1, kernel_matrix)

openCV.imshow("Sharpen Image", sharpen_image)

openCV.waitKey()

openCV.destroyAllWindows()
Пример #36
0
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt

img = cv.imread("D:/vision computing/First Class/car.jpg", 0)
# cv.imshow("before conv", img)
# # cv.waitKey(0)
# kernel = np.array([[1, 1, 1, 1, 1, 1],
#                    [1, 1, 1, 1, 1, 1],
#                    [1, 1, 1, 1, 1, 1],
#                    [1, 1, 1, 1, 1, 1],
#                    [1, 1, 1, 1, 1, 1], ])
kernel = np.ones((3, 3), np.float64) / 25

kernel = kernel / sum(kernel)
result = cv.filter2D(img, -1, kernel=kernel)

# cv.imshow('result',result)
# cv.imwrite('result.png',result)
# cv.waitKey(0)
# in paiin yani 1*2 tasvir 1
plt.subplot(121), plt.imshow(img, 'gray'), plt.title("Orginal Image")
plt.subplot(122), plt.imshow(result, 'gray'), plt.title("Filtered Image")
plt.show()
Пример #37
0
@author: lord_rhandy
"""


#def segmented (digit) ;:
 
import cv2
import numpy as np

image=cv2.imread("../meter.jpeg")
#print (image.shape)
#image = cv2.imread("../meter.jpeg")
kernel = np.array([[-1,-1,-1], 
                   [-1, 9,-1],
                   [-1,-1,-1]])
sharpened = cv2.filter2D(image, -1, kernel) # applying the sharpening kernel to the input image & displaying it.
#cv2.imshow('Image Sharpening', sharpened)


#ratio adjustment
#r = 100.0 / image.shape[1]
#dim = (100, int(image.shape[0] * r))
 
# perform the actual resizing of the image and show it
#resized = cv2.resize(sharpened, dim, interpolation = cv2.INTER_AREA)
#cv2.imshow("resized", resized)



def crop_image (image):
    digit1 =image [7:15,4:9]   
Пример #38
0
import cv2



cap=cv2.VideoCapture(0)
cap.set(3,1080) #set resolution if necessary
cap.set(4,720) #

while(True):
    ret,image=cap.read()
#   
    kernel_sharpening = np.array([[0,-1,0], #sharpening the image for better edges
                              [-1, 4,-1],
                              [0,-1,0]])

    image1 = cv2.filter2D(image, -1, kernel_sharpening)
    image=cv2.add(image,image1) #filtering
#   )
    orig = image.copy() 
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 
    gray = cv2.GaussianBlur(gray, (5, 5), 0) #noise removal
     
    kernel = np.ones((15, 15), np.uint8)
    Opened=cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel) #opening for clear edges
    
    
    
    edged = cv2.Canny(Opened, 75, 200) #detecting edges using canny
    k,l,m=image.shape
   
    cv2.imshow("Edged", edged)
Пример #39
0
    def sepia(self):
        kernel = np.array([[0.272, 0.534, 0.131], [0.349, 0.686, 0.168],
                           [0.393, 0.769, 0.189]])

        self.filtered_image = cv2.filter2D(self.original_image, -1, kernel)
Пример #40
0
# Show output image
cv.imshow('Black Background Image', src)
## [black_bg]

## [sharp]
# Create a kernel that we will use to sharpen our image
# an approximation of second derivative, a quite strong kernel
kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32)

# do the laplacian filtering as it is
# well, we need to convert everything in something more deeper then CV_8U
# because the kernel has some negative values,
# and we can expect in general to have a Laplacian image with negative values
# BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
# so the possible negative number will be truncated
imgLaplacian = cv.filter2D(src, cv.CV_32F, kernel)
sharp = np.float32(src)
imgResult = sharp - imgLaplacian

# convert back to 8bits gray scale
imgResult = np.clip(imgResult, 0, 255)
imgResult = imgResult.astype('uint8')
imgLaplacian = np.clip(imgLaplacian, 0, 255)
imgLaplacian = np.uint8(imgLaplacian)

#cv.imshow('Laplace Filtered Image', imgLaplacian)
cv.imshow('New Sharped Image', imgResult)
## [sharp]

## [bin]
# Create binary image from source image
Пример #41
0
import matplotlib.pyplot as plt

SHOW = True

img = cv2.imread('board.jpg', cv2.IMREAD_GRAYSCALE)

#if SHOW:
# cv2.namedWindow('image',cv2.WINDOW_NORMAL)
# cv2.resizeWindow('image', 600,600)
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

blur = cv2.medianBlur(img, 5)
sharpen_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
sharpen = cv2.filter2D(blur, -1, sharpen_kernel)
ret, thresh = cv2.threshold(sharpen, 110, 255, cv2.THRESH_BINARY)

kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(thresh, kernel, iterations=1)

kern = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)

squares = cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
squares = squares[0] if len(squares) == 2 else squares[1]
print(squares)
for square in squares:
    print('in')
    x, y, w, h = cv2.boundingRect(square)
    cv2.rectangle(closing, (x, y), (x + w, y + h), [36, 255, 12], 2)
Пример #42
0
    def emboss(self):
        kernel = np.array([[0, -1, -1], [1, 0, -1], [1, 1, 0]])

        self.filtered_image = cv2.filter2D(self.original_image, -1, kernel)
def find_color_card(rgb_img,
                    threshold_type='adaptgauss',
                    threshvalue=125,
                    blurry=False,
                    background='dark',
                    record_chip_size="median"):
    """Automatically detects a color card and output info to use in create_color_card_mask function

    Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl.

        Inputs:
    rgb_img          = Input RGB image data containing a color card.
    threshold_type   = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
    threshvalue      = Thresholding value, optional (default 125)
    blurry           = Bool (default False) if True then image sharpening applied
    background       = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
                        expansion applied to better detect edges, but histogram expansion will be hindered if there
                        is a dark background
    record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
                        "mean", or None

    Returns:
    df             = Dataframe containing information about the filtered contours
    start_coord    = Two element tuple of starting coordinates, location of the top left pixel detected
    spacing        = Two element tuple of spacing between centers of chips

    :param rgb_img: numpy.ndarray
    :param threshold_type: str
    :param threshvalue: int
    :param blurry: bool
    :param background: str
    :param record_chip_size: str
    :return df: pandas.core.frame.DataFrame
    :return start_coord: tuple
    :return spacing: tuple
    """
    # Imports
    import skimage
    import pandas as pd
    from scipy.spatial.distance import squareform, pdist

    # Get image attributes
    height, width, channels = rgb_img.shape
    total_pix = float(height * width)

    # Minimum and maximum square size based upon 12 MP image
    min_area = 1000. / 12000000. * total_pix
    max_area = 8000000. / 12000000. * total_pix

    # Create gray image for further processing
    gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)

    # Laplacian Fourier Transform detection of blurriness
    blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()

    # If image is blurry then try to deblur using kernel
    if blurry:
        # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
        kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1],
                           [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1],
                           [-1, -1, -1, -1, -1]]) / 8.0
        # Store result back out for further processing
        gray_img = cv2.filter2D(gray_img, -1, kernel)

    # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
    # thresholding. If your image has a bright background then apply
    if background == 'light':
        clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
        # apply CLAHE histogram expansion to find squares better with canny edge detection
        gray_img = clahe.apply(gray_img)
    elif background != 'dark':
        fatal_error('Background parameter ' + str(background) +
                    ' is not "light" or "dark"!')

    # Thresholding
    if threshold_type.upper() == "OTSU":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, 0, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    elif threshold_type.upper() == "NORMAL":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, threshvalue, 255,
                                       cv2.THRESH_BINARY)
    elif threshold_type.upper() == "ADAPTGAUSS":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
        threshold = cv2.adaptiveThreshold(gaussian, 255,
                                          cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                          cv2.THRESH_BINARY_INV, 51, 2)
    else:
        fatal_error('Input threshold_type=' + str(threshold_type) +
                    ' but should be "otsu", "normal", or "adaptgauss"!')

    # Apply automatic Canny edge detection using the computed median
    canny_edges = skimage.feature.canny(threshold)
    canny_edges.dtype = 'uint8'

    # Compute contours to find the squares of the card
    contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)[-2:]
    # Variable of which contour is which
    mindex = []
    # Variable to store moments
    mu = []
    # Variable to x,y coordinates in tuples
    mc = []
    # Variable to x coordinate as integer
    mx = []
    # Variable to y coordinate as integer
    my = []
    # Variable to store area
    marea = []
    # Variable to store whether something is a square (1) or not (0)
    msquare = []
    # Variable to store square approximation coordinates
    msquarecoords = []
    # Variable to store child hierarchy element
    mchild = []
    # Fitted rectangle height
    mheight = []
    # Fitted rectangle width
    mwidth = []
    # Ratio of height/width
    mwhratio = []

    # Extract moments from contour image
    for x in range(0, len(contours)):
        mu.append(cv2.moments(contours[x]))
        marea.append(cv2.contourArea(contours[x]))
        mchild.append(int(hierarchy[0][x][2]))
        mindex.append(x)

    # Cycle through moment data and compute location for each moment
    for m in mu:
        if m['m00'] != 0:  # This is the area term for a moment
            mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
            mx.append(int(m['m10'] / m['m00']))
            my.append(int(m['m01'] / m['m00']))
        else:
            mc.append((0, 0))
            mx.append((0))
            my.append((0))

    # Loop over our contours and extract data about them
    for index, c in enumerate(contours):
        # Area isn't 0, but greater than min-area and less than max-area
        if marea[index] != 0 and min_area < marea[index] < max_area:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.1 * peri, True)
            center, wh, angle = cv2.minAreaRect(c)  # Rotated rectangle
            mwidth.append(wh[0])
            mheight.append(wh[1])
            # In different versions of OpenCV, width and height can be listed in a different order
            # To normalize the ratio we sort them and take the ratio of the longest / shortest
            wh_sorted = list(wh)
            wh_sorted.sort()
            mwhratio.append(wh_sorted[1] / wh_sorted[0])
            msquare.append(len(approx))
            # If the approx contour has 4 points then we can assume we have 4-sided objects
            if len(approx) == 4 or len(approx) == 5:
                msquarecoords.append(approx)
            else:  # It's not square
                # msquare.append(0)
                msquarecoords.append(0)
        else:  # Contour has area of 0, not interesting
            msquare.append(0)
            msquarecoords.append(0)
            mwidth.append(0)
            mheight.append(0)
            mwhratio.append(0)

    # Make a pandas df from data for filtering out junk
    all_contours = {
        'index': mindex,
        'x': mx,
        'y': my,
        'width': mwidth,
        'height': mheight,
        'res_ratio': mwhratio,
        'area': marea,
        'square': msquare,
        'child': mchild
    }
    df = pd.DataFrame(all_contours)

    # Add calculated blur factor to output
    df['blurriness'] = blurfactor

    # Filter df for attributes that would isolate squares of reasonable size
    df = df[(df['area'] > min_area) & (df['area'] < max_area) &
            (df['child'] != -1) & (df['square'].isin([4, 5])) &
            (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]

    # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
    df = df[~(df['index'].isin(df['index'] + 1))]

    # Count up squares that are within a given radius, more squares = more likelihood of them being the card
    # Median width of square time 2.5 gives proximity radius for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 6
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(
        lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
    filtered_area = df['area']
    # Create empty matrix for storing comparisons
    sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
    # Double loop through all areas to compare to each other
    for p in range(0, len(filtered_area)):
        for o in range(0, len(filtered_area)):
            big = max(filtered_area.iloc[p], filtered_area.iloc[o])
            small = min(filtered_area.iloc[p], filtered_area.iloc[o])
            pct = 100. * (small / big)
            sizecomp[p][o] = pct

    # How many comparisons given 90% square similarity
    sizematrix = pd.DataFrame(sizecomp).apply(
        lambda sim: sim[sim >= 90].count() - 1, axis=1)

    # Append sizeprox summary to dataframe
    df = df.assign(sizeprox=sizematrix.values)

    # Reorder dataframe for better printing
    df = df[[
        'index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square',
        'child', 'blurriness', 'distprox', 'sizeprox'
    ]]

    # Loosely filter for size and distance (relative size to median)
    minsqwidth = median_sq_width_px * 0.80
    maxsqwidth = median_sq_width_px * 1.2
    df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) &
            (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)]

    # Filter for proximity again to root out stragglers. Find and count up squares that are within given radius,
    # more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius
    # for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 5
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(
        lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Filter results for distance proximity to other squares
    df = df[(df['distprox'] >= 4)]
    # Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
    df['x'] = pd.to_numeric(df['x'], errors='coerce')
    df['y'] = pd.to_numeric(df['y'], errors='coerce')

    # Remove NaN
    df = df.dropna()

    if df['x'].min() is np.nan or df['y'].min() is np.nan:
        fatal_error('No color card found under current parameters')
    else:
        # Extract the starting coordinate
        start_coord = (df['x'].min(), df['y'].min())

        # start_coord = (int(df['X'].min()), int(df['Y'].min()))
        # Calculate the range
        spacingx_short = (df['x'].max() - df['x'].min()) / 3
        spacingy_short = (df['y'].max() - df['y'].min()) / 3
        spacingx_long = (df['x'].max() - df['x'].min()) / 5
        spacingy_long = (df['y'].max() - df['y'].min()) / 5
        # Chip spacing since 4x6 card assumed
        spacing_short = min(spacingx_short, spacingy_short)
        spacing_long = max(spacingx_long, spacingy_long)
        # Smaller spacing measurement might have a chip missing
        spacing = int(max(spacing_short, spacing_long))
        spacing = (spacing, spacing)

    if record_chip_size is not None:
        if record_chip_size.upper() == "MEDIAN":
            chip_size = df.loc[:, "area"].median()
            chip_height = df.loc[:, "height"].median()
            chip_width = df.loc[:, "width"].median()
        elif record_chip_size.upper() == "MEAN":
            chip_size = df.loc[:, "area"].mean()
            chip_height = df.loc[:, "height"].mean()
            chip_width = df.loc[:, "width"].mean()
        else:
            print(
                str(record_chip_size) +
                " Is not a valid entry for record_chip_size." +
                " Must be either 'mean', 'median', or None.")
            chip_size = None
            chip_height = None
            chip_width = None
        # Store into global measurements
        outputs.add_observation(
            variable='color_chip_size',
            trait='size of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_size,
            label=str(record_chip_size))
        method = record_chip_size.lower()
        outputs.add_observation(
            variable=f'{method}_color_chip_height',
            trait=f'{method} height of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_height,
            label=str(record_chip_size))
        outputs.add_observation(
            variable=f'{method}_color_chip_width',
            trait=f'{method} size of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_width,
            label=str(record_chip_size))

    return df, start_coord, spacing
Пример #44
0
def cv_filter2d(img):
    kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])

    dst = cv2.filter2D(img, -1, kernel)
    return dst
Пример #45
0
def genStroke(img, dirNum, verbose = False):
    height , width = img.shape[0], img.shape[1]
    img = np.float32(img) / 255.0
    print("Input  height: %d, width: %d"%(height,width)) 

    print("PreProcessing Images, denoising ...") 
    img = cv2.medianBlur(img, 3)
    # if verbose == True:
    #     cv2.imshow('blurred image', np.uint8(img*255))
    #     cv2.waitKey(0)

    print("Generating Gradient Images ...")
    imX = np.append(np.absolute(img[:, 0 : width - 1]  - img[:, 1 : width]),  np.zeros((height, 1)), axis = 1)
    imY = np.append(np.absolute(img[0 : height - 1, :] - img[1 : height, :]), np.zeros((1, width)), axis = 0)
##############################################################
#####   Here we have many methods to generate gradient   #####
##############################################################
    img_gradient = np.sqrt((imX ** 2 + imY ** 2))
    img_gradient = imX + imY
    if verbose == True:
        # cv2.imshow('gradient image', np.uint8(255-img_gradient*255))
        cv2.imwrite('output/grad.jpg',np.uint8(255-img_gradient*255))
        cv2.waitKey(0)



    #filter kernel size
    tempsize = 0 
    if height > width:
        tempsize = width
    else:
        tempsize = height
    tempsize /= 30
#####################################################################
# according to the paper, the kernelsize is 1/30 of the side length 
#####################################################################
    halfKsize = int(tempsize / 2)
    if halfKsize < 1:
        halfKsize = 1
    if halfKsize > 9:
        halfKsize = 9
    kernalsize = halfKsize * 2 + 1
    print("Kernel Size = %s" %(kernalsize)) 



##############################################################
############### Here we generate the kernal ##################
##############################################################
    kernel = np.zeros((dirNum, kernalsize, kernalsize))
    kernel [0,halfKsize,:] = 1.0
    for i in range(0,dirNum):
        kernel[i,:,:] = temp = rotateImg(kernel[0,:,:], i * 180 / dirNum)
        kernel[i,:,:] *= kernalsize/np.sum(kernel[i])
        # print(np.sum(kernel[i]))
        if verbose == True:
            # print(kernel[i])
            title = 'line kernel %d'%i
            # cv2.imshow( title, np.uint8(temp*255))
            cv2.waitKey(0)

#####################################################
# cv2.filter2D() 其实做的是correlate而不是conv
# correlate 相当于 kernal 旋转180° 的 conv
# 但是我们的kernal是中心对称的,所以不影响 
#####################################################

    #filter gradient map in different directions
    print("Filtering Gradient Images in different directions ...") 
    response = np.zeros((dirNum, height, width))
    for i in range(dirNum):
        ker = kernel[i,:,:]; 
        response[i, :, :] = cv2.filter2D(img_gradient, -1, ker)
    if verbose == True:
        for i in range(dirNum):
            title = 'response %d'%i
            # cv2.imshow(title, np.uint8(response[i,:,:]*255))
            cv2.waitKey(0)



    #divide gradient map into different sub-map
    print("Caculating Gradient classification ...")
    Cs = np.zeros((dirNum, height, width))
    for x in range(width):
        for y in range(height):
            i = np.argmax(response[:,y,x])
            Cs[i, y, x] = img_gradient[y,x]
    if verbose == True:
        for i in range(dirNum):
            title = 'max_response %d'%i
            # cv2.imshow(title, np.uint8(Cs[i,:,:]*255))
            cv2.waitKey(0)



    #generate line shape
    print("Generating shape Lines ...")
    spn = np.zeros((dirNum, height, width))
    for i in range(dirNum):
        ker = kernel[i,:,:]; 
        spn[i, :, :] = cv2.filter2D(Cs[i], -1, ker)
    sp = np.sum(spn, axis = 0)

    sp = sp * np.power(img_gradient, 0.4) 
    ################# 这里怎么理解看论文 #################
    sp =  (sp - np.min(sp)) / (np.max(sp) - np.min(sp))
    S  = 1 -  sp
    # if verbose == True:
    #     cv2.imshow('raw stroke', np.uint8(S*255))
    #     cv2.waitKey(0)

    return S
w = weights1.numpy()
filter_index=0



print("first conv layer")
print(w[filter_index][0])

print(w[filter_index][0].shape)
plt.imshow(w[filter_index][0],cmap="gray")
##TODO: load in and display any image from the transformed test dataset
import cv2
img=cv2.imread('./images/mona_lisa.jpg')
## TODO: Using cv's filter2D function,
## apply a specific set of filter weights (like the one displayed above) to the test image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.xticks([]), plt.yticks([])
plt.title("Original Image")

## TODO: Using cv's filter2D function,
## apply a specific set of filter weights (like the one displayed above) to the test image
filtered = cv2.filter2D(img, -1, w[filter_index][0])
fig = plt.figure()
ax = fig.add_subplot(121, xticks = [], yticks = [])
ax.imshow(filtered)
ax.set_title("Feature Map")
ax = fig.add_subplot(122, xticks = [], yticks = [])
ax.imshow(w[filter_index][0], cmap = 'gray')

plt.show()
def sharpen(img):
    kern = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
    return cv2.filter2D(img, -1, kern)
Пример #48
0
def process(img, filters):
    accum = np.zeros_like(img)
    for kern in filters:
        fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
        np.maximum(accum, fimg, accum)
    return accum
Пример #49
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
from filters import apply_filter

#
# FALTA APLICAR PASABAJOS Y PASAALTOS. EL PUNTO 2 YA ESTA
#

def create_filter(rows, columns):
  return np.ones((rows, columns), np.float32)/(columns*rows)

image = cv2.cvtColor(cv2.imread('../imagenes/diegote.jpg'), cv2.COLOR_BGR2RGB)

filter = create_filter(5, 5)

new_image = apply_filter(image, filter)

cv_image = cv2.filter2D(image, -1, cv2.rotate(filter, cv2.ROTATE_90_CLOCKWISE))

plt.subplot(121),plt.imshow(image),plt.title('Original')
plt.xticks([]), plt.yticks([])
#plt.subplot(132),plt.imshow(cv_image),plt.title('OpenCV')
#plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(new_image),plt.title('Mi filtro')
plt.xticks([]), plt.yticks([])
plt.show()
    "..\\BMG\\11august\\13\\DSC06090.JPG",
    "..\\BMG\\11august\\13\\DSC06094.JPG",
    "..\\BMG\\11august\\13\\IMG_6097.JPG"
]

files = files_new_set1

for f in files:
    print("Image file: " + f)

    img = cv2.imread(f)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])

    d = cv2.filter2D(img, -1, kernel)
    dg = cv2.cvtColor(d, cv2.COLOR_BGR2GRAY)

    #print("before")

    rect_image = np.zeros(gray.shape, np.uint8)

    circles = cv2.HoughCircles(gray,
                               cv2.HOUGH_GRADIENT,
                               1,
                               285,
                               param1=51,
                               param2=48,
                               minRadius=100,
                               maxRadius=175)
    # To Do:  Decrease min radius and max radius in step counts of 5 until exactly 48 circles are identified - no less, no More.
Пример #51
0
 def apply_emboss(self, word_img):
     return cv2.filter2D(word_img, -1, self.emboss_kernal)
def sharpen_image(image):
    kernel = numpy.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
    image = cv2.filter2D(image, -1, kernel)
    return image
Пример #53
0
import cv2
import numpy as np
import matplotlib.pyplot as plt

# 图像梯度——使用到的函数:cv2.Soble(), cv3.Schar(), cv2.Laplacian
# 前两种是求一阶或者二阶导数,Scharr是对Sobel(使用小卷积核求解梯度角度)的优化,Lapplacian是求二阶导数
'高通滤波——去掉低频的信号,留下高频信号(上面的三种不同的梯度滤波器)'
src = cv2.imread("../images/1.jpg")
'1. 锐化操作——使得梯度变得明显'
# 1. 自定义锐化核
kernel = np.float32([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
dst1 = cv2.filter2D(src, ddepth=-1, kernel=kernel)

# USM锐化()UnSharpMask
gaussian = cv2.GaussianBlur(src, ksize=(7, 7), sigmaX=3, sigmaY=3)  # 进行高斯模糊
dst2 = cv2.addWeighted(src, alpha=2, src2=gaussian, beta=-1, gamma=0)

# cv2.imshow("dst1", dst1)
# cv2.imshow("gaussian", gaussian)
# cv2.imshow("dst2", dst2)

'2. 梯度操作/高通滤波:找轮廓'
gray = cv2.imread("../images/6.jpg", cv2.IMREAD_GRAYSCALE)  # 变成单通道
print(gray.shape)

# 1. Sobel算子:dx和dy表示的是求导的阶数,0表示这个方向上没有导数,一般为0,1,2
sobel_x = cv2.Sobel(gray, ddepth=-1, dx=1, dy=0,
                    ksize=3)  # x轴方向的一阶导数,x方向指的是左右相邻的梯度很大,感觉上是竖着的
sobel_y = cv2.Sobel(gray, ddepth=-1, dx=0, dy=1,
                    ksize=3)  # y轴方向的一阶导数,y方向 指的是上下相邻的梯度很大,感觉上是横着的
sobel_x_abs = cv2.convertScaleAbs(sobel_x, alpha=2, beta=1)  # 对图像进行增强,参数值变大会曝光
Пример #54
0
 def apply_sharp(self, word_img):
     return cv2.filter2D(word_img, -1, self.sharp_kernel)
Пример #55
0
#align face image
faceAligned = faceAlign(image, size, faceLandmarks)
#print(faceAligned)



#read the aligned face

#im = cv2.imread(faceAligned)
im = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2RGB)
#print(im)
#gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)

#increase sharpness
sKernel = np.array(([0, -1, 0],[-1, 5, -1],[0, -1, 0]), dtype="int")

#smoothening filter

kernel = np.ones((5,5),np.float32)/25
output = cv2.filter2D(im, -1, kernel)

#cv2.imshow("image", image)
#cv2.imshow("face aligned", output)

#save image
imazh = cv2.imwrite("aligned"+filename,output)

#cv2.waitKey(0)
#cv2.destroyAllWindows()
Пример #56
0
import numpy as np
import cv2
#LEER LA IMAGEN EN EL ORDEN B-G-R
img=cv2.imread("imagen5.png")
#MOSTRAR LA FORMA DE LA IMAGEN , FILAS COLUMNAS Y CAPAS
print(img.shape)
#CREAR NUEVA IMAGEN CON DIMENSIÓN 500 x 500
#INTER_CUBIC PARA AUMENTAR LAS DIMENSIONES
img2=cv2.resize(img,(500,500),interpolation=cv2.INTER_CUBIC)
#convertir de la escala de BGR HACIA GRIS
gris=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
#INCREMENTAR EL BRILLO DE UNA IMAGEN
gris2=cv2.equalizeHist(gris)
kernel=np.array([[-1,0,1],[0,0,0],[-1,0,1]])
#filter2D
filt=cv2.filter2D(gris2,-1,kernel=kernel)
#Canny
bordes1=cv2.Canny(gris,10,200)
bordes2=cv2.Canny(gris2,10,200)

cv2.imshow("GRISES",gris)
cv2.imshow("GRISES 2",gris2)
cv2.imshow("IMAGEN BORDES 1 DE LA IMAGEN SIN ECUALIZAR",bordes1)
cv2.imshow("IMAGEN BORDES 2 DE LA IMAGEN ECUALIAZADA",bordes2)

#DETERMINAR EL HISTOGRAMA DE UNA IMAGEN EN ESCALA DE GRISES
#PIXEL TIENE COMO MAXIMO 256 NIVELES DE INTENSIDAD (8 BITS SIN SIGNO)
histo=cv2.calcHist([gris],[0],None,[256],[0,256])
histo2=cv2.calcHist([gris2],[0],None,[256],[0,256])

from matplotlib import pyplot as plt
Пример #57
0
        mask = cv2.absdiff(f.astype(np.uint8), BG.astype(np.uint8))

        ret, mask = cv2.threshold(mask.astype(np.uint8), 40, 255,
                                  cv2.THRESH_BINARY)

        # disc = cv2.getStructuringElement(cv2.MORPH_RECT,(7,7))

        # cv2.filter2D(mask,-1,disc,mask)

        kernel = np.ones((3, 3), np.uint8)
        mask = cv2.erode(mask, kernel, iterations=1)
        # mask = cv2.dilate(mask,np.ones((11,11),np.uint8),iterations = 1)

        # Now convolute with circular disc
        disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
        cv2.filter2D(mask, -1, disc, mask)

        # reigon1.drawBoundary(frame)
        cv2.imshow('fore', mask)

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_NONE)

        # get the contour with the greatest area
        # max_area = -1
        # ci = -1
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)

            if area > 45:
Пример #58
0
def onePhotoExecution(imgEye, params):
    #przypisanie parametów do zmiennych
    scale = params[0]
    rmin = params[1]
    rmax = params[2]
    numberOfPoints = params[3]
    searchPixels = params[4]
    noiseMethod = params[5]
    segMethod = params[6]

    rminScaled = int(rmin * scale)
    rmaxScaled = int(rmax * scale)

    imgEyeGrey = imgEye.copy() #konwersja obrazu do odcieni szarosci
    imgSrc = imgEye.copy() #wykonanie kopii oryginalnego obrazu
    imgIrisPupil = imgEye.copy() #wykonanie kopii oryginalnego obrazu
    imgSrcMask = imgEye.copy() #wykonanie kopii oryginalnego obrazu
    imgSrcGrey = imgEyeGrey.copy()

    kernelSr2 = np.ones((5, 5), np.float32) / 17
    kernelSr = np.ones((5, 5), np.float32) / 25
    #przygotowanie obrazów dla metody parabol
    if(noiseMethod == 'parabolicApproximation'):
        imgSrcParabole = imgEye.copy() #wykonanie kopii oryginalnego obrazu

        #przygotowanie obrazu do szukania punktów powieka górna (metoda paraboli)
        imgEyeReadyFindPoints = cv2.medianBlur(imgEyeGrey,11)
        imgEyeReadyFindPoints = cv2.filter2D(imgEyeReadyFindPoints, -1, kernelSr2)

        #przygotowanie obrazu do szukania punktów powieka dolna (metoda paraboli)
        imgParaboleBottom = cv2.filter2D(imgEyeGrey, -1, kernelSr2)
        imgEyeGreyIrisTemp = cv2.equalizeHist(imgEyeGrey)
        imgEyeReadyIris = cv2.filter2D(imgEyeGreyIrisTemp, -1, kernelSr)

####################    SEGMENTACJA     #####################
    if(segMethod == "Daugman"):

        #preprocessing
        kernelSr = np.ones((5, 5), np.float32) / 25

        #przygotowanie obrazu do szukania tęczówki (środek i promień)
        imgEyeGreyIrisTemp = cv2.equalizeHist(imgEyeGrey)
        imgEyeReadyIris = cv2.filter2D(imgEyeGreyIrisTemp, -1, kernelSr)

        #przygotowanie obrazu do szukania źrenicy (środek i promień)
        imgEyeReadyPupil = cv2.filter2D(imgEyeGrey, -1, kernelSr)

        #skalowanie obrazu
        imgResizedIris, imgResizedPupil = imageScale(imgEyeReadyIris, imgEyeReadyPupil, scale)

        #znajdowanie parametrów źrenicy punkty środka i promień
        center, radius = daugman.findPupilBoundary(imgEyeReadyPupil)


        #zastosowanie skalowania dla znalezionych parametów (wyszukiwanie parametrów źrenicy odbywa się na obrazie bez skalowania)
        x_pup_center = int(center[0] * scale)
        y_pup_center = int(center[1] * scale)


        #szukanie parametrów opisujących tęczówkę
        yIrisCenterScaled, xIrisCenterScaled, RIrisScaled = daugman.searchIrisBoundary(imgResizedIris, rminScaled, rmaxScaled, int(y_pup_center),
                                int(x_pup_center),numberOfPoints,searchPixels,0.5)


        #skalowanie parametów opisujących tęczówkę (z metody otrzymywane są z obrazu po skalowaniu)
        xIrisCenter = int(xIrisCenterScaled / scale)
        yIrisCenter = int(yIrisCenterScaled / scale)
        RIris = int(RIrisScaled / scale)

    elif(segMethod == "ActiveContours"):

        iris_left = -1
        iris_right = -1
        iris_up = -1
        iris_down = -1

        pupil_left = -1
        pupil_right = -1
        pupil_up = -1
        pupil_down = -1

    # Ladowanie obrazow
        img = imgEye.copy()

        if img.ndim == 3:
            img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            img_gray = img

        raw_img = imgEye.copy()
        raw_img_gray = img_gray.copy()
        # kopie obrazu oryginalnego

        cx,cy,max_t = morphsnakes.findPupilBoundary_AverageDarkPix(img_gray)

        iris_bw = np.zeros(img_gray.shape,np.uint8)
        pupil_bw = np.zeros(img_gray.shape,np.uint8)

        iris_bw,iris_down, iris_up, iris_right,iris_left=morphsnakes.test_iris(img_gray,cx,cy,max_t)
        pupil_bw,pupil_down,pupil_up,pupil_left,pupil_right=morphsnakes.test_pupil(img_gray,cx,cy,max_t)

    #sprawdzenie czy segmentacja poszla dobrze, jesli nie to ustawienie samemu

    #jesli pojechalo zbyt daleko w lewo to lewa granica iris jest skraca o roznice po obu stronach
        if (pupil_left - iris_left) > 1.2*(iris_right - pupil_right):
            iris_left = iris_left + int((pupil_left - iris_left)-(iris_right - pupil_right))
    #jesli pojechalo zbyt daleko w prawo to prawa granica iris jest skraca o roznice po obu stronach
        elif (iris_right - pupil_right) > 1.2*(pupil_left - iris_left):
            iris_right = iris_right - int((iris_right - pupil_right)-(pupil_left - iris_left))

    #mozliwe promienie elipsy teczowki
        iris_radius = [0,0,0,0]
        iris_radius[0] = abs(iris_right-cx)
        iris_radius[1] = abs(cx-iris_left)
        iris_radius[2] = abs(cy-iris_up)
        iris_radius[3] = abs(iris_down - cy)

    #dopasowanie elipsy teczowki
        mask = np.zeros(img_gray.shape,np.uint8)
        cv2.ellipse(mask,(cx,cy),(min(iris_radius[0],iris_radius[1]),max(iris_radius[2],iris_radius[3])),0,0,360,255,-1)

    #mozliwe promienie elipsy zrenicy
        pupil_radius = [0,0,0,0]
        pupil_radius[0] = abs(pupil_right-cx)
        pupil_radius[1] = abs(cx-pupil_left)
        pupil_radius[2] = abs(cy-pupil_up)
        pupil_radius[3] = abs(pupil_down - cy)

    #dopasowanie elipsy zrenicy
        pupil_mask = np.zeros(img_gray.shape,np.uint8)
        cv2.ellipse(pupil_mask,(cx,cy),(min(pupil_radius[0],pupil_radius[1]),max(pupil_radius[2],pupil_radius[3])),0,0,360,255,-1)
        pupil_mask=cv2.bitwise_not(pupil_mask)

    #polaczenie dwoch masek
        mask=cv2.bitwise_and(mask,pupil_mask)
        #cv2.imshow("okno",mask)
        #cv2.waitKey(0)
    #obraz koncowy
        seg = mask
        segPreview = cv2.bitwise_and(mask, raw_img_gray)

        xIrisCenter = cx
        yIrisCenter = cy
        RIris = int((max(iris_radius)+min(iris_radius))/2)
        center = (cx,cy)
        radius = min(pupil_radius)

    elif(segMethod == "Hough"):
        pupil_circle = hough.detect_inner_circle(imgEye)
        center = (pupil_circle[0],pupil_circle[1])
        radius = pupil_circle[2]

        iris_circle = hough.detect_outer_circle(imgEye,center,radius)
        yIrisCenter = iris_circle[1]
        xIrisCenter = iris_circle[0]
        RIris = iris_circle[2]


################## USUWANIE ZAKLOCEN  #########################

    #metoda punktów przecinających
    if(noiseMethod == 'commonPoints'):
        commonPointsMask = lines.eyelidDetection(imgEyeGrey,xIrisCenter,yIrisCenter,RIris,100)

        if(segMethod == 'Daugman'or segMethod == 'Hough'):
            maska = imageMask(commonPointsMask,center,radius,xIrisCenter,yIrisCenter,RIris)
        if(segMethod == 'ActiveContours'):
            maska = cv2.bitwise_or(commonPointsMask,commonPointsMask,mask=seg)

    #metoda przybliżenia powiek za pomocą funkcji parabolicznych
    elif(noiseMethod == 'parabolicApproximation'):
        #gorna powieka wykrywanie za pomoca paraboli
        pL, pR, iN,imgParaboleEdited = parabolas.generateEdgelsTop(imgEyeReadyFindPoints,xIrisCenter,yIrisCenter,RIris,imgEyeReadyIris)
        aTop, bTop, cTop = parabolas.parabolicCurveTop(imgParaboleEdited,yIrisCenter,xIrisCenter,RIris,pL,pR,iN)

        #dolna powieka wykrywanie za pomoca paraboli
        parabolaValues, iNd = parabolas.generateEdgelsBottom(imgParaboleBottom,xIrisCenter,yIrisCenter,RIris)
        aBot, bBot, cBot = parabolas.parabolicCurveBottom(imgParaboleBottom,yIrisCenter,xIrisCenter,RIris,parabolaValues,iNd)

        imgSrcParabole = parabolas.rysujParaboleMask(imgSrcParabole,aTop,bTop,cTop,aBot,bBot,cBot)

        if(segMethod == 'Daugman' or segMethod == 'Hough'):
            maska = imageMask(imgSrcParabole,center,radius,xIrisCenter,yIrisCenter,RIris)
        if(segMethod == 'ActiveContours'):
            maska = cv2.bitwise_or(imgSrcParabole,imgSrcParabole,mask=seg)

    #metoda odcinajaca z dwoch kol
    elif(noiseMethod == 'radialCutoff'):
        wynik = cutoff.radialCutoff(imgEyeGrey,radius,RIris,center[0],center[1],60)

        if(segMethod == 'Daugman' or segMethod == 'Hough'):
            maska = imageMask(wynik,center,radius,xIrisCenter,yIrisCenter,RIris)
        if(segMethod == 'ActiveContours'):
            maska = cv2.bitwise_or(wynik,wynik,mask=seg)

    elif(noiseMethod == 'none'):
        rows, cols = imgSrc.shape[:2]
        emptyMask = np.full((rows, cols), 255, dtype=np.uint8)

        if(segMethod == 'Daugman' or segMethod == 'Hough'):
            maska = imageMask(emptyMask,center,radius,xIrisCenter,yIrisCenter,RIris)
        if(segMethod == 'ActiveContours'):
            maska = seg

    maskaPreview = cv2.bitwise_and(imgSrc, maska)

    irisCenterX = xIrisCenter
    irisCenterY = yIrisCenter
    irisR = RIris

    pupilCenterX = center[0]
    pupilCenterY = center[1]
    pupilR = radius

    return irisCenterX, irisCenterY, irisR, pupilCenterX, pupilCenterY, pupilR, maska, maskaPreview
Пример #59
0
 def apply(self,src,dst):
     """Apply filter with BRG or gray source/destination"""
     cv2.filter2D(src, -1, self._kernel, dst)
Пример #60
0
def random_sharpening(img):
    if random.randint(0, 1):
        kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)
        img = cv2.filter2D(img, -1, kernel=kernel)
        img = np.clip(img, 0., 255.)
    return img