コード例 #1
1
ファイル: cartoon.py プロジェクト: yshao/cs6475
def cartoon(srcColor):
    srcGray=cv2.cvtColor(srcColor,cv2.COLOR_BGR2GRAY)
    print srcGray.shape,srcColor.shape
    cv2.medianBlur(srcGray,5,srcGray)

    mask=srcGray.copy().astype(np.uint8)
    edges=srcGray.copy().astype(np.uint8)

    ### sketch detection
    cv2.Laplacian(srcGray,cv2.CV_8U,edges,5)
    cv2.threshold(edges,60,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU,mask)
    outImg=srcColor.copy()
    tmp=outImg.copy()

    ### bilateral filtering ###
    rep=10
    for i in xrange(rep):
        size=9;sigmaColor=9;sigmaSpace=7

        cv2.bilateralFilter(outImg,size,sigmaColor,sigmaSpace,tmp)
        cv2.bilateralFilter(tmp,size,sigmaColor,sigmaSpace,outImg)

    output=cv2.bitwise_and(srcColor,srcColor,mask=mask)
    cv2.edgePreservingFilter(output,output)

    return output
コード例 #2
0
 def detectRover(self, argFrame):
     frame    = self.frame
     hsvFrame = self.frame
     thresh   = self.frame[:,:,0]
     rGreen = (38,67,155,198,0,255)
     rPink = (165,182,155,192,0,255)
     hsvFrame  = cv2.cvtColor(self.frame.copy(), cv2.COLOR_BGR2HSV)
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rGreen[0],rGreen[2],rGreen[4]]),np.array([rGreen[1],rGreen[3],rGreen[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     greenPt = (int((x+x+w)/2),int((y+y+h)/2))
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rPink[0],rPink[2],rPink[4]]),np.array([rPink[1],rPink[3],rPink[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     pinkPt = (int((x+x+w)/2),int((y+y+h)/2))
     self.roverPos = (int((greenPt[0]+pinkPt[0])/2),int((greenPt[1]+pinkPt[1])/2))
     angle = getAngle(pinkPt[0],pinkPt[1],greenPt[0],greenPt[1])
     self.roverHeading = 360+angle[2]*-1
     return greenPt, pinkPt
コード例 #3
0
ファイル: dreamer.py プロジェクト: chipgarner/CamDreams
 def input_filter(img):
     img = cv2.medianBlur(img, 3)
     img = cv2.medianBlur(img, 3)
     img = cv2.medianBlur(img, 3)
     img = cv2.medianBlur(img, 5)
     img = cv2.bilateralFilter(img, 20, 50, 10)
     return img
コード例 #4
0
    def smooth(self,image):
        l,a,b = cv2.split(cv2.cvtColor(image, cv.CV_BGR2Lab))

        a_new = cv2.medianBlur(a, 15)
        b_new = cv2.medianBlur(b, 15)

        return cv2.cvtColor(cv2.merge((l, a_new, b_new)), cv.CV_Lab2BGR)
コード例 #5
0
ファイル: analysis.py プロジェクト: ivars-silamikelis/retino
def get_vessels(img,side):
#convert to grayscale
    #img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #equilized = cv2.equalizeHist(img_gray)
    green_channel = img[:,:,1]
    threshold =np.max(green_channel)*0.9
    
    #crop image
    gch_crop1=green_channel[:, (green_channel != 0).sum(axis=0) != 0]
    gch_crop2=gch_crop1[(gch_crop1 != 0).sum(axis=1) != 0,:]
    green_channel=gch_crop2
    
    #rotate by optical disc
    dummy,gch_bin = cv2.threshold(green_channel, threshold,255 ,cv2.THRESH_BINARY)
    i,j = np.unravel_index(gch_bin.argmax(), gch_bin.shape)
    if ((gch_bin.shape[1]/2 < j) and side=='left') or ((gch_bin.shape[1]/2 > j) and side=='right'):
        green_channel=np.rot90(green_channel,2)
#25 x 25 median filter
    gch_mf = cv2.medianBlur(green_channel,35)
#gch_nl = cv2.fastNlMeansDenoising(green_channel,h=10)
    gch_norm = green_channel - gch_mf

    gch_norm_norm = cv2.medianBlur(gch_norm,35)
#convert to binary image
    thresh,gch_norm_bin = cv2.threshold(gch_norm,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)


    gch_norm_bin_norm = cv2.medianBlur(gch_norm_bin,35)
    return gch_norm_bin_norm
コード例 #6
0
def medianFilter(path, i=0):
        import cv2
        spath = path+'Batch260615-001-#17-100FPS-50mlPmin-2-%04d.png'  %i
#        spath = path+'Check\\imgSubbed-%d.jpg'  %i
        if os.path.exists(spath):   
            print("File found!")
        else:
            print("File not found!")
            print(os.listdir(path))
        img = cv2.imread(spath,0)
        im1 =img.copy()
        im2 = img.copy()
        
        for ii in range(40):
            im1 = cv2.medianBlur(im1, ksize=5)
            if ii%2 == 0:
                im2 = cv2.medianBlur(im2, ksize=1+ii)
        cv2.imshow('im1', im1)
        cv2.imshow('im2', im2)
#        cv2.waitKey(5)
        


#        img = cv2.imread('die.png')
        print('Denoising')
        dst = cv2.fastNlMeansDenoising(img,None,10,7,21)
        cv2.imshow('img', img)
        cv2.imshow('dst', dst)
        cv2.imwrite(path + 'img.jpg', img)
        cv2.imwrite(path + 'dst.jpg', dst)
        cv2.waitKey(5)
        print('finished')
コード例 #7
0
ファイル: Util.py プロジェクト: Daguerreo/ppd
    def getMaskHSV(self, frame, background, channel='s'):
        if channel == 'h':
            c = 0
        elif channel == 's':
            c = 1
        elif channel == 'v':
            c = 2
        else:
            c = 0
            print 'warning: wrong channel on getMaskHSV'

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        background = cv2.cvtColor(background, cv2.COLOR_BGR2HSV)
        ch = frame[:,:,c]
        bg = background[:,:,c]

        mask = cv2.absdiff(ch,bg)
        mask = cv2.threshold(mask,30,255,cv2.THRESH_BINARY)[1]
        mask = cv2.medianBlur(mask, 7)

        element = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
        mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, element)
        element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(13,13))
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, element)
        mask = cv2.medianBlur(mask, 9)

        return mask
コード例 #8
0
ファイル: filter.py プロジェクト: dhuadaar/PicFilter
	def render(self,frame):
		canvas = cv2.imread("pen.jpg", cv2.CV_8UC1)
		numDownSamples = 2
		img_rgb = frame
		# number of downscaling steps
		numBilateralFilters = 3
		# number of bilateral filtering steps
		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		img_color = img_rgb
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrDown(img_color)
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for _ in xrange(numBilateralFilters):
			img_color = cv2.bilateralFilter(img_color, 9, 9, 3)

		# upsample image to original size
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrUp(img_color)
		# convert to grayscale and apply median blur
		img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
		img_blur = cv2.medianBlur(img_gray, 3)

		# detect and enhance edges
		img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
		return  cv2.multiply(cv2.medianBlur(img_edge,7), canvas, scale=1./256)
コード例 #9
0
ファイル: EyeCanSee.py プロジェクト: kendricktan/cv-lane
    def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top
コード例 #10
0
ファイル: gui2.py プロジェクト: saminaji/CellMigration
def basic_seg(img, images):
    noOfFrames = len(images)
    bgFrame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    for i in range(1, 4):
        bgFrame = bgFrame / 2 + \
                  cv2.cvtColor((images[i]),
                               cv2.COLOR_BGR2GRAY) / 2

    # Array to save the object locations
    objLocs = np.array([None, None])

    # Kernel for morphological operations
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    # Display the frames like a video
    # Read each frame
    frame = images[1]

    # Perform background subtraction after median filter
    diffFrame = cv2.absdiff(cv2.cvtColor(cv2.medianBlur(frame, 7), \
                                         cv2.COLOR_BGR2GRAY), cv2.medianBlur(bgFrame, 7))

    # Otsu thresholding to create the binary image
    [th, bwFrame] = cv2.threshold(diffFrame, 0, 255, cv2.THRESH_OTSU)

    # Morphological opening operation to remove small blobs
    bwFrame = cv2.morphologyEx(bwFrame, cv2.MORPH_OPEN, kernel)

    return bwFrame
コード例 #11
0
ファイル: detect_utils.py プロジェクト: yxlao/ball-robot
def hsv_to_im_mask(im_hsv, hsv_lows, hsv_highs, is_bucket=False, is_arm=False):
    if is_bucket:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 7)
        # erode
        im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    elif is_arm:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 9)
        # erode
        # im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    else:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 5)
        # erode
        # im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    return im_mask
コード例 #12
0
def extractFromVideo(inPath,out_paths=['train.txt','test.txt','val.txt'],MIN_FRAMES = 10,DO_RESIZE=True,new_sz = (40,80)):
	#Default values
	MAX_FRAMES = 100
	ALPHA = 0.25
	
	print "Starting background subtraction and object tracking........... %s"%inPath
	
	cap = cv2.VideoCapture(inPath)
	ret, prev_frame = cap.read()	

	w = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)); h = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
	w_crop = 80; h_crop = 160
	
	def cap_read():
		if cap.isOpened():
			ret, frame = cap.read()
			if ret:
				return frame
		return None
		
	bgsubImpl = bgsub.get_instance(bgsub.BGMethod.EIGEN_SUBSTRACTION,cap_read)
	bgsubImpl.setShape((h,w))
	
	N = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT));
	
	prv_mean = None; prev_img=None; i=0;
	frames =[];
	while(True):
		mask_frame =  bgsubImpl.process()
		mask_frame = cv2.medianBlur(mask_frame,5)
		mask_frame = cv2.medianBlur(mask_frame,3)
		print 'Proceesing ... {0}%\r'.format((i*100/N)),
		if bgsubImpl.isFinish():
			break	
					
		# labelling
		#img = np.zeros((h,w,3), np.uint8)	# Create a black image
		img = bgsubImpl.cur_frame
		
		points =  np.column_stack(np.where(mask_frame==1))
		if points.shape[0] > 0:
			mean = points.mean(axis=0)
			if not prv_mean is None:
				mean = ALPHA*mean + (1-ALPHA)*prv_mean
			(y,x)=np.int32(mean);
			if x-w_crop/2>0 and x+w_crop/2<w and y-h_crop/2>0 and y+h_crop/2<h :
				img = img[y-h_crop/2:y+h_crop/2,x-w_crop/2:x+w_crop/2];
				if DO_RESIZE:
					i=i+1;
					img = cv2.resize(img,new_sz);
					img  = np.asarray(img,dtype='float64')/256;
					frames.append(img.flatten());
			prv_mean = np.int32(mean);
			
		if i % MAX_FRAMES == 0:
			write(frames,out_paths,min_frames=MIN_FRAMES)
	cap.release();
	
	if frames.__len__() > MIN_FRAMES*2:
		write(frames,out_paths,min_frames=MIN_FRAMES)
def read_images(fpath):
    lines = utils.read_image_list(fpath)

    logger.info('loading data: {}'.format(fpath))
    X_data, y_data = [], []
    for inst_path, truth_path in lines:
        inst, truth = [cv2.imread(p, cv2.IMREAD_GRAYSCALE)
                for p in (inst_path, truth_path)]
        assert inst is not None and truth is not None, (inst_path, truth_path)

        pad_h, pad_w = [x / 2 for x in MODEL_INPUT_SHAPE]
        padded = cv2.copyMakeBorder(inst, pad_h, pad_h, pad_w, pad_w,
                               cv2.BORDER_REFLECT)

        m7  = cv2.medianBlur(padded, 7)
        m15 = cv2.medianBlur(padded, 15)

        c7 = 255 - cv2.subtract(m7, padded)
        c15 = 255 - cv2.subtract(m15, padded)

        # (c, h, w) layout
        input = np.array((padded, m7, m15, c7, c15))
        truth = truth.reshape((1,) + truth.shape)

        # pad input image
        X_data.append(input)
        y_data.append(truth)

    return X_data, y_data
コード例 #14
0
    def GetEdges(self,imgs):
        '''
            入力されたimgのedgeを入手して返す
            args : imgs   -> 画像, もしくは画像のリスト
            dst  : edges  -> 入力画像のedgeが入った画像,もしくはリスト
            param: threshold1 -> 低い方の閾値
                   threshold2 -> わからん
                   apertureSize -> 繋がっていないエッジの補完に関するparam
        '''
        # imgかtmpか
        if len(imgs) == len(self.scale):
            
            '''tmpに対する処理'''
            for i in range(len(self.scale)):
                # imgs[i] = cv2.GaussianBlur(imgs[i], (9,9), 2**1)
                imgs[i] = cv2.medianBlur(imgs[i],9)
                imgs[i] = cv2.filter2D(imgs[i], cv2.CV_8U, self.sharpenKernel)
                imgs[i] = cv2.Canny(imgs[i], threshold1= 90, threshold2= 200,apertureSize = 3)

        else:

            '''imgに対する処理'''
            # imgs = cv2.GaussianBlur(imgs, (9,9), 2**1)
            imgs = cv2.medianBlur(imgs,9)
            imgs = cv2.filter2D(imgs, cv2.CV_8U, self.sharpenKernel)
            imgs = cv2.Canny(imgs, threshold1= 90, threshold2= 200,apertureSize = 3)

        edges = imgs
        return edges
コード例 #15
0
    def update_disparity_map(self):
        """
        Update disparity map in GUI.

        The disparity image is normalized to the range 0-255 and then divided by
        255, because OpenCV multiplies it by 255 when displaying. This is
        because the pixels are stored as floating points.
        """
        #key = ord('a')
        #while not key == ord('n'): 
        print "Getting disparity map"
        disparity = self.block_matcher.get_disparity(self.pair)
        print "Got disparity map"
        norm_coeff = 255 / disparity.max()
        new_disp = cv2.resize(disparity,None,fx=0.6, fy=0.6, interpolation = cv2.INTER_CUBIC)

        print "Bluring disparity map"
        disp = (new_disp * norm_coeff / 255)
        filter_disp = cv2.medianBlur(disp, 5)      
        filter_disp = cv2.medianBlur(filter_disp, 5)      

        self.cvImage = filter_disp

        cv2.imshow(self.window_name, self.cvImage)
        print "repainted image"
コード例 #16
0
ファイル: stats.py プロジェクト: lpigou/chalearn2014
def proc_user(user):
    user[user==1]=255
    for i,u in enumerate(user):
        u = cv2.medianBlur(u, 3)
        user[i] = u

    user.swapaxes(0,1)
    for i,u in enumerate(user):
        u = cv2.medianBlur(u, 9)
        user[i] = u
    user.swapaxes(0,1)

    #---------------------CONTOUR--------------------------------------------
    # for i,u in enumerate(user):
    #     # u = cv2.medianBlur(u, 3)
    #     contours, hierarchy = cv2.findContours(u.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #     im = zeros(u.shape)
    #     # cnt = contours[4]
    #     maxl = 0
    #     biggest_cnt = None
    #     for cnt in contours:
    #         if len(cnt) > maxl:
    #             maxl = len(cnt)
    #             biggest_cnt = cnt

    #     cv2.drawContours(im,[biggest_cnt],0,255,-1)

    #     user[i] = im
    #---------------------CONTOUR--------------------------------------------
    user[user>0] = 1
    return user
コード例 #17
0
 def getGestureRegion(self, frameNum):
     """ Get gesture region for the given frame """
     # get Depth frame
     depthData = self.getFrame(self.depth, frameNum)
     depthGray = cv2.cvtColor(depthData, cv2.cv.CV_RGB2GRAY)
     
     # get user segmentation frame
     userSeg = self.getFrame(self.user, frameNum)
     userSegGray = cv2.cvtColor(userSeg, cv2.cv.CV_RGB2GRAY)
     userSegGray = cv2.medianBlur(userSegGray, 5)    # Median filter on original user image
     
     # Convert user to binary image
     threshold = 128
     _, userBinImg = cv2.threshold(userSegGray, threshold, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
     
     depthGray[np.where(userBinImg == 0)] = 0
     depthGray = cv2.medianBlur(depthGray, 5)
     
     depthRealValue = depthGray.astype(np.float32) # depth value of real world (0-maxDepth)
     # Convert to depth values
     depthRealValue = depthRealValue / 255.0 * float(self.data['maxDepth'])
     depthRealValue = depthRealValue.round()
     depthRealValue = depthRealValue.astype(np.uint16)
     
     # scale depthGray to 0-255
     depthGray = depthGray.astype(np.uint16)
     depthGray = bytescale(depthGray)
     depthImgValue = np.copy(depthGray)  
 
     return depthImgValue, depthRealValue
コード例 #18
0
ファイル: filter.py プロジェクト: dhuadaar/PicFilter
	def render(self,frame):
		numDownSamples = 2
		img_rgb = frame
		# number of downscaling steps
		numBilateralFilters = 7
		# number of bilateral filtering steps
		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		img_color = img_rgb
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrDown(img_color)
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for _ in xrange(numBilateralFilters):
			img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

		# upsample image to original size
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrUp(img_color)
		# convert to grayscale and apply median blur
		img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
		img_blur = cv2.medianBlur(img_gray, 7)

		# detect and enhance edges
		img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
		# -- STEP 5 --
		# convert back to color so that it can be bit-ANDed with color image
		img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
		final = cv2.bitwise_and(img_color, img_edge)
		return cv2.medianBlur(final,7)
コード例 #19
0
  def track_ball(self,cv_image):
    # Image is a np array (not a ros msg)
    imgHSV = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)
    #cv2.imshow("image_view", imgHSV)

    imgHSV = cv2.medianBlur(imgHSV,3)
    #cv2.imshow("image_view_blur1", imgHSV)

    #hsv_min = np.array([150,100,70])
    #hsv_max = np.array([255,255,255])
    hsv_min = np.array([150,100,70])
    hsv_max = np.array([245,255,255])

    img_thr = cv2.inRange(imgHSV,hsv_min,hsv_max)
    #cv2.imshow("image_thr", img_thr)

    img_thr = cv2.medianBlur(img_thr,5)
    #cv2.imshow("image_thr_blur", img_thr)

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()
     
    params.filterByColor = True
    params.blobColor = 255
     
    # Filter by Area.
    params.filterByArea = True
    params.minArea = 100
    params.maxArea = 300
     
    # Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.5
     
    # Filter by Convexity
    params.filterByConvexity = True
    params.minConvexity = 0.87
     
    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.5
     
    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3 :
        detector = cv2.SimpleBlobDetector(params)
    else :
        detector = cv2.SimpleBlobDetector_create(params)

    keypoints = detector.detect(img_thr)

    if keypoints:
        x = keypoints[0].pt[0]
        y = keypoints[0].pt[1]
        t = rospy.get_time()
        if x and y:
            self.pub.publish(x = x, y = y, t = t)

        im_with_keypoints = cv2.drawKeypoints(img_thr, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
コード例 #20
0
    def saveCroppedImage(self):
        im_color = None
        im_mask = None
        if self.new_color_image and self.new_mask_image:
            rospy.loginfo("Cropping image " + str(self.img_num))
            im_color = copy.deepcopy(self.current_color_image)
            im_mask = copy.deepcopy(self.current_mask_image)
            self.new_color_image = False
            self.new_mask_image = False

            # Gray scale please, findContours needs it like that
            mask_gray = cv2.cvtColor(im_mask, cv.CV_BGR2GRAY)
            cv2.medianBlur(mask_gray, 7)
            # Get the contours
            contours, hierarchy = cv2.findContours(mask_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            # Get the biggest one (noise is little ones)
            biggestcont = contours[0]
            for cont in contours:
                if len(biggestcont) < len(cont):
                    biggestcont = cont
            cnt = biggestcont
            # Get the rect
            x, y, w, h = cv2.boundingRect(cnt)
            # Crop stuff
            crop = im_color[y : y + h, x : x + w]
            # Save image
            if self.img_num > 0:  # First image is not usefull, it's too dark
                rospy.loginfo(
                    "Saving image "
                    + str(self.img_num)
                    + " to "
                    + self.store_path
                    + "/"
                    + self.object_name
                    + "/"
                    + self.object_name
                    + "_image_"
                    + str(self.img_num).zfill(3)
                    + ".png"
                )
                cv2.imwrite(
                    self.store_path
                    + "/"
                    + self.object_name
                    + "/"
                    + self.object_name
                    + "_image_"
                    + str(self.img_num).zfill(3)
                    + ".png",
                    crop,
                )
            else:  # at least create the directory to store the images
                rospy.loginfo("Skipping first image as it's too dark. Creating directory to store images.")
                os.mkdir(self.store_path + "/" + self.object_name)
            self.img_num += 1

        # else:  # there are 2 calls, one won't do anything. Don't hit me
        #     rospy.loginfo("Fake call...")
        return
コード例 #21
0
ファイル: __init__.py プロジェクト: sudhargk/video-annotator
	def __morph_ops__(self,mask):
		mask = cv2.medianBlur(np.uint8(mask),3)
		mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, KERNEL)
		mask = cv2.medianBlur(mask,3)
		mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE,KERNEL)
		mask = binary_fill_holes(mask)
		mask = remove_small_objects(mask,min_size=128,connectivity=2)
		return np.uint8(mask);
コード例 #22
0
ファイル: background.py プロジェクト: 4nc3str4l/VA_GiC
	def __segment(self, gray):
		# Substract BG to current frame
		mostChanges = np.abs(sum(self.W-self.BG))
		
		# Normalize max-min values
		mostChanges = (mostChanges.clip(0, max=1)*255).astype('uint8')
		self.__recognition = mostChanges.copy()

		# Apply a threshold
		_,mostChanges = cv2.threshold(mostChanges, 150, 255, cv2.THRESH_BINARY)
		mostChanges = cv2.medianBlur(mostChanges, 9)
		self.__mostChanges = mostChanges.copy()

		# Apply median filter and multiply with the current scene
		mask = cv2.medianBlur(mostChanges, 3)
		mask = np.multiply(gray, mask)
		mask = (mask*255).astype('uint8')

		# Apply distance filter to minimize possible collisions
		mask = cv2.distanceTransform(mask, cv2.cv.CV_DIST_L1, 3)
		# Normalize between 0 and 255
		mask = cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX).astype('uint8')
		# Reduce noise
		mask = cv2.GaussianBlur(mask, (-3,-3), 0.5)
		# Apply threshold again to mask out real low values 
		# (Almost black zones, thus possible unions between objects)
		_,mask = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
		self.__mask = mask.copy()

		contours,_ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

		objs = []
		for c in contours:
			leftmost = c[:,:,0].min()
			rightmost = c[:,:,0].max()
			topmost = c[:,:,1].min()
			bottommost = c[:,:,1].max()

			area = ((rightmost - leftmost)*(bottommost-topmost))
			if area < self.min_area:
				continue

			objs.append((leftmost, rightmost, topmost, bottommost))

		found = 1
		while found > 0:
			found = 0

			for a in objs:
				for b in objs:
					if a != b:
						if self.__contains(a, b):
							objs.remove(b)
							found = 1
							break

		return objs
コード例 #23
0
ファイル: ps3.py プロジェクト: RitterGT/ComputerVision
def step5_2():
    R = cv2.imread(os.path.join('input', 'pair2-L.png'))
    L = cv2.imread(os.path.join('input', 'pair2-R.png'))

    #median filter both
    L = cv2.medianBlur(L, 3)
    R = cv2.medianBlur(R, 3)

    apply_disparity_norm(L, R, '5-a-1-ncorr', 7)
コード例 #24
0
def blobSmoothing(immask):
    imfilter = cv2.medianBlur(immask,7)
    imfilter = cv2.medianBlur(imfilter,5)
    imfilter = cv2.medianBlur(imfilter,3)

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))

    imfilter = cv2.dilate(imfilter,0.5*kernel)
    imfilter = cv2.erode(imfilter,kernel)
    return imfilter
def gen_multi_channel(padded):
    m7  = cv2.medianBlur(padded, 7)
    m15 = cv2.medianBlur(padded, 15)

    c7 = 255 - cv2.subtract(m7, padded)
    c15 = 255 - cv2.subtract(m15, padded)


    # (c, h, w) layout
    input = np.array((padded, m7, m15, c7, c15))
    return input
コード例 #26
0
ファイル: imtools.py プロジェクト: elmonkey/Python_OpenCV
def subtract_bgn(im, bgn):
    """ Subtract the backgrounbd (bgn) from the image (im). Both have the same 
    dimensions (including number of channels)"""
    # do a simple pre-filtering
    bgn = cv2.medianBlur(bgn,3)
    im  = cv2.medianBlur(im,3)
    
    fgbg = cv2.BackgroundSubtractorMOG()
#     fgmask = fgbg.apply(bgn)
    fgmask = fgbg.apply(im)
    return fgmask
コード例 #27
0
def Split_Blur_Thresh(frame):
	
	b,g,r = cv2.split(frame)
	b = cv2.medianBlur(b,7)
	g = cv2.medianBlur(g,7)
	r = cv2.medianBlur(r,7)
	b = cv2.adaptiveThreshold(b,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
	g = cv2.adaptiveThreshold(g,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
	r = cv2.adaptiveThreshold(r,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
	
	return cv2.merge((b,g,r))
コード例 #28
0
def different_filters(im):
    
    im_median13 = cv2.medianBlur(im, 13)
    im_median15 = cv2.medianBlur(im, 15)
    
    images = [im, im_median13, im_median15]
    for image in images:
        plt.figure()
        output.plot_image(image)
    
    plt.figure()
    output.plot_several_image_histograms(images, ['original', 'median13', 'median15'])
コード例 #29
0
    def CleanImgage(self, thresh = 30):

        # Image processing

        footprint = np.array([[-1,-1,-1],[-1,8,-1], [-1,-1,-1]])
        self.clean_image = cv2.medianBlur(self.image, 5)
        self.clean_image = cv2.filter2D(self.clean_image,-1,footprint)
        self.clean_image = cv2.medianBlur(self.clean_image, 5)
        self.markers = np.zeros_like(self.image)
        self.markers[self.clean_image < threshold_otsu(self.image)] = 1
        self.markers[self.clean_image >= ((threshold_otsu(self.image)*thresh)/100)] = 2
        self.markers[self.clean_image >= ((threshold_otsu(self.image)*50)/100)] = 3
コード例 #30
0
def median_blur(frame, blocksize = 3, debug = False):
    """
    median blur with blocksize
    """
    blocksize = np.int( (blocksize / 2) * 2 + 1 )
    
    if debug:
        print "median_blur: blocksize: ", blocksize
    
    if blocksize in [3, 5]:
        return cv2.medianBlur(frame, ksize = blocksize)
    else:
        return cv2.medianBlur(convto8(frame), ksize = blocksize)
コード例 #31
0
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(blur_2dconv), plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show()

# Image Blurring (Image Smoothing)
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_filtering/py_filtering.html#image-blurring-image-smoothing

# 1. Averaging
blur_ave = cv2.blur(img, (5, 5))

# 2. Gaussian Filtering
blur_gaus = cv2.GaussianBlur(img, (5, 5), 0)

# 3. Median Filtering (highly effective in removing salt-and-pepper noise)
blur_median = cv2.medianBlur(img, 5)

# 4. Bilateral Filtering (highly effective at noise removal while preserving edges)
blur_bilat = cv2.bilateralFilter(img, 9, 75, 75)

# Plot all these filterings
titles = [
    'Original Image', '2D Convolution', 'Averaging', 'Gaussian Filtering',
    'Median Filtering', 'Bilateral Filtering'
]
images = [img, blur_2dconv, blur_ave, blur_gaus, blur_median, blur_bilat]

for i in range(6):
    plt.subplot(2, 3, i + 1), plt.imshow(images[i])
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])
コード例 #32
0
import cv2 as cv
import numpy as np

img = cv.imread('../Images & Videos/russian_license_plate.jfif')
img_grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
license_cascade = cv.CascadeClassifier(
    '../Images & Videos/russin_license_plate2.xml')
license_plate = license_cascade.detectMultiScale(img,
                                                 scaleFactor=1.3,
                                                 minNeighbors=2)
black_img = np.zeros((img.shape), dtype=np.uint8)
for (x, y, w, h) in license_plate:
    blur = cv.medianBlur(img[y:y + h, x:x + w], 15)
    img[y:y + h, x:x + w] = blur
cv.imshow('show', img)
cv.waitKey(0)
cv.destroyAllWindows()
コード例 #33
0
def median_blur_demo(image):
    #中值模糊
    dst = cv.medianBlur(image, 5)
    cv.imshow(" median_blur_demo", dst)
コード例 #34
0
ファイル: test8.py プロジェクト: 907597029/back_up
def median_blur_demo(image):
    dst = cv.medianBlur(image, 9)
    cv.imshow("dog_m_b",dst)
    cv.imwrite("dog_m_b.jpg",dst)
コード例 #35
0
import cv2
import numpy as np

img = cv2.imread('images/lines_target.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 21)
edges = cv2.Canny(blur, 200, 400)

lines = cv2.HoughLines(edges, 1, np.pi / 180, 63)
for x in range(0, len(lines)):
    for rho, theta in lines[x]:

        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        x1 = int(x0 + 1000 * (-b))
        y1 = int(y0 + 1000 * (a))
        x2 = int(x0 - 1000 * (-b))
        y2 = int(y0 - 1000 * (a))

        cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)

cv2.imshow('lab5-b', img)
cv2.waitKey(0)
コード例 #36
0
    def __call__(self, results):
        """Call functions to load image and get image meta information.

        Args:
            results (dict): Result dict from :obj:`mmseg.CustomDataset`.

        Returns:
            dict: The dict contains loaded image and meta information.
        """

        if results.get('img_prefix') is not None:
            filename = osp.join(results['img_prefix'],
                                results['img_info']['filename'])
        else:
            filename = results['img_info']['filename']
        hdr = dict()
        with open(filename) as f:
            for line in f.readlines():
                if '=' not in line:
                    continue
                else:
                    key, value = line.split('=')
                    key = key.strip()
                    value = value.strip()
                    hdr[key] = value
        assert hdr[
            'file type'] == 'ENVI Standard', 'Require ENVI data: file type = ENVI Standard'
        assert hdr['byte order'] == '0', 'Require ENVI data: byte order = 0'
        assert hdr['x start'] == '0', 'Require ENVI data: x start = 0'
        assert hdr['y start'] == '0', 'Require ENVI data: y start = 0'
        assert hdr['interleave'].lower(
        ) == 'bsq', 'Require ENVI data: interleave = bsq'
        assert int(hdr['data type']) <= len(
            self.ENVI_data_type) and self.ENVI_data_type[int(
                hdr['data type'])] != None

        data_type = int(hdr['data type'])
        header_offset = int(hdr['header offset'])
        height = int(hdr['lines'])
        width = int(hdr['samples'])
        bands = int(hdr['bands'])
        if hdr['interleave'].lower() == 'bsq':
            img_bytes = np.fromfile(filename.replace('.hdr', '.raw'),
                                    dtype=self.ENVI_data_type[data_type],
                                    offset=header_offset)
            img_bytes = img_bytes.reshape((bands, height, width))
            img_bytes = img_bytes[self.channel_select, :, :]
            if self.dataset_name == 'cholangiocarcinoma':
                img_bytes = img_bytes[:, ::-1, :]
            img_bytes = np.transpose(img_bytes, (1, 2, 0))
        else:
            img_bytes = np.zeros((height, width, bands),
                                 dtype=self.ENVI_data_type[data_type])
            pass
        if self.to_float32:
            img_bytes = img_bytes.astype(np.float32)
            if self.normalization:

                img_bytes -= self.mean[..., self.channel_select]
                img_bytes /= self.std[..., self.channel_select]
                ############################################3
                # img_bytes *= 16
                # img_bytes += 128
                # img_bytes = img_bytes.astype(np.uint8)
                # img_bytes = img_bytes.astype(np.float32)
                # img_bytes -= 128
                # img_bytes /= 16
                ##############################################
        if self.median_blur:
            for band in range(img_bytes.shape[0]):
                img_bytes[band, :, :] = cv2.medianBlur(img_bytes[band, :, :],
                                                       ksize=3)

        results['filename'] = filename.replace('.hdr', '.png')
        results['ori_filename'] = results['img_info']['filename'].replace(
            '.hdr', '.png')
        results['img'] = img_bytes
        results['img_shape'] = img_bytes.shape
        results['ori_shape'] = img_bytes.shape
        # Set initial values for default meta_keys
        results['pad_shape'] = img_bytes.shape
        results['scale_factor'] = 1.0
        results['channel_select'] = self.channel_select
        results['channel_to_show'] = self.channel_to_show
        num_channels = 1 if len(img_bytes.shape) < 3 else img_bytes.shape[2]
        mean = np.ones(num_channels, dtype=np.float32) * 128
        std = np.ones(num_channels, dtype=np.float32) * 16
        results['img_norm_cfg'] = dict(mean=mean, std=std, to_rgb=False)
        return results
コード例 #37
0
ファイル: blurring.py プロジェクト: vqc/open_cv_3_learning
    cv2.blur(image, (7, 7)),
])

cv2.imshow("averaged", blurred)

blurred = np.hstack([
    cv2.GaussianBlur(image, (3, 3), 0),  # last val is std dev in x axis
    cv2.GaussianBlur(image, (5, 5), 0),  # when set to zero, it is
    cv2.GaussianBlur(image, (7, 7), 0),  # calculated for us
])

cv2.imshow("Gaussian", blurred)
# gaussian is more natural than averaged

blurred = np.hstack([
    cv2.medianBlur(image, 3),
    cv2.medianBlur(image, 5),
    cv2.medianBlur(image, 7),
])

cv2.imshow("Median", blurred)
# Median replaces the center with the median pixel from surroundings
# instead of a mean. the median exists is an actual pixel value in the
# surroundings.
# median is good for reducing detail noise.
# whereas the other blurs have more of a "motion blur"

blurred = np.hstack([
    cv2.bilateralFilter(image, 5, 21, 21),
    cv2.bilateralFilter(image, 7, 31, 31),
    cv2.bilateralFilter(image, 9, 41, 41),
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#skin color range for hsv color space
HSV_mask = cv2.inRange(img_HSV, (0, 15, 0), (17, 170, 255))
HSV_mask = cv2.morphologyEx(HSV_mask, cv2.MORPH_OPEN, np.ones((3, 3),
                                                              np.uint8))

#converting from gbr to YCbCr color space
img_YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
#skin color range for hsv color space
YCrCb_mask = cv2.inRange(img_YCrCb, (0, 135, 85), (255, 180, 135))
YCrCb_mask = cv2.morphologyEx(YCrCb_mask, cv2.MORPH_OPEN,
                              np.ones((3, 3), np.uint8))

#merge skin detection (YCbCr and hsv)
global_mask = cv2.bitwise_and(YCrCb_mask, HSV_mask)
global_mask = cv2.medianBlur(global_mask, 3)
global_mask = cv2.morphologyEx(global_mask, cv2.MORPH_OPEN,
                               np.ones((4, 4), np.uint8))

HSV_result = cv2.bitwise_not(HSV_mask)

YCrCb_result = cv2.bitwise_not(YCrCb_mask)

global_result = cv2.bitwise_not(global_mask)

cv2.imwrite("1_HSV.jpg", HSV_result)
cv2.imwrite("2_YCbCr.jpg", YCrCb_result)
cv2.imwrite("3_global_result.jpg", global_result)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #39
0
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('dave.jpg', 0)
img2 = cv2.medianBlur(img, 5)

img = img2

ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                            cv2.THRESH_BINARY, 11, 2)
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                            cv2.THRESH_BINARY, 11, 2)

titles = [
    'Original Image', 'Global Thresholding (v = 127)',
    'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding'
]
images = [img, th1, th2, th3]

for i in range(4):
    plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])
plt.show()
コード例 #40
0
import numpy as np
import cv2 as cv
img = cv.imread('c://Intel//python//test1//blobs.jpg')
output = img.copy()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.medianBlur(gray, 5)
circles = cv.HoughCircles(gray,
                          cv.HOUGH_GRADIENT,
                          1,
                          20,
                          param1=50,
                          param2=30,
                          minRadius=0,
                          maxRadius=0)
detected_circles = np.uint16(np.around(circles))
for (x, y, r) in detected_circles[0, :]:
    cv.circle(output, (x, y), r, (0, 0, 0), 3)
    cv.circle(output, (x, y), 2, (0, 255, 255), 3)

cv.imshow('output', output)
cv.waitKey(0)
cv.destroyAllWindows()
コード例 #41
0
import matplotlib.pyplot as plt
import cv2
import numpy as np
import Func

Func.create_canvas_matplotlib(500, 500)
image = cv2.imread('keyboard.jpg')

kernel_averaging_10_10 = np.ones((10, 10), np.float32) / 100
kernel_averaging_5_5 = np.array([[0.04, 0.04, 0.04, 0.04, 0.04],
                                 [0.04, 0.04, 0.04, 0.04, 0.04],
                                 [0.04, 0.04, 0.04, 0.04, 0.04],
                                 [0.04, 0.04, 0.04, 0.04, 0.04],
                                 [0.04, 0.04, 0.04, 0.04, 0.04]])
smooth_image_f2D_5_5 = cv2.filter2D(image, -1, kernel_averaging_5_5)
smooth_image_f2D_10_10 = cv2.filter2D(image, -1, kernel_averaging_10_10)
smooth_image_b = cv2.blur(image, (10, 10))
smooth_image_bfi = cv2.boxFilter(image, -1, (10, 10), normalize=True)
smooth_image_gb = cv2.GaussianBlur(image, (9, 9), 0)
smooth_image_mb = cv2.medianBlur(image, 9)

while True:
    smooth_image_bf = cv2.bilateralFilter(image, 5, 10, 10)

    image_stack = Func.stackImages(
        0.2, ([smooth_image_bf, smooth_image_f2D_5_5, smooth_image_gb
               ], [smooth_image_gbsmooth_image_mb]))
    cv2.imshow("Stack", image_stack)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    break
コード例 #42
0
img = cv2.imread('images/image1.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
blur3 = cv2.GaussianBlur(hsv, (3, 3), 0)
blur5 = cv2.GaussianBlur(hsv, (5, 5), 0)
blur7 = cv2.GaussianBlur(hsv, (7, 7), 0)
height, width, channels = hsv.shape
mask = np.zeros_like(img)

for j in range(height):
    for i in range(width):
        pixel = [
            np.array([
                hsv.item(j, i, 0),
                hsv.item(j, i, 1),
                hsv.item(j, i, 2),
                blur3.item(j, i, 0),
                blur5.item(j, i, 0),
                blur7.item(j, i, 0)
            ],
                     dtype=np.float32)
        ]
        results = gnb.predict(pixel)
        if (results == 1):
            mask[j, i] = [0, 0, 255]

mask = cv2.medianBlur(mask, 5)
img = cv2.addWeighted(img, 0.8, mask, 0.5, 0.2)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #43
0
import cv2

camera = cv2.VideoCapture(0)

prev_frame = None
while camera.isOpened():

    success, frame = camera.read()
    if not success:
        break

    frame = cv2.medianBlur(frame, 5)
    if prev_frame is not None:
        mask = cv2.absdiff(frame, prev_frame)
        _, mask = cv2.threshold(mask, 50, 255, cv2.THRESH_BINARY)
        cv2.imshow("mask", mask)
    else:
        prev_frame = frame

    cv2.imshow("prev", prev_frame)
    cv2.imshow("frame", frame)
    key_code = cv2.waitKey(1)

    if key_code in [ord('q'), 27]:
        break

camera.release()
cv2.destroyAllWindows()
コード例 #44
0
ファイル: Label_Split.py プロジェクト: ThomasJames/CNN_for_EO
        split_image(dim_pix=244, im=WI_Hflip, location=region,
                    dtype=f"Mask", filename=f"Region_{region_number}_{Hflip}")
        split_image(dim_pix=244, im=TC_Hflip, location=region, dtype=f"TC",
                    filename=f"Region_{region_number}_{Hflip}")

        # Vertical Flip
        TC_Vflip = np.flip(TC, 0)
        WI_Vflip = np.flip(UWI, 0)
        Vflip = "Vflip"
        split_image(dim_pix=244, im=WI_Vflip, location=region,
                    dtype=f"Mask", filename=f"Region_{region_number}_{Vflip}")
        split_image(dim_pix=244, im=TC_Vflip, location=region, dtype=f"TC",
                    filename=f"Region_{region_number}_{Vflip}")

        # Blur filter
        TC_Blur = cv2.medianBlur(TC, 5)
        Blur = "Blur"
        split_image(dim_pix=244, im=UWI, location=region, dtype=f"Mask",
                    filename=f"Region_{region}_{Blur}")
        split_image(dim_pix=244, im=TC_Blur, location=region, dtype=f"TC",
                    filename=f"Region_{region}_{Blur}")

        # Noise Filter
        noise = sp_noise(TC, 0.05)
        TC_noise = noise + TC
        Noise = "Noise"
        split_image(dim_pix=244, im=UWI, location=region, dtype=f"Mask",
                    filename=f"Region_{region_number}_{Noise}")
        split_image(dim_pix=244, im=TC_noise, location=region, dtype=f"TC",
                    filename=f"Region_{region_number}_{Noise}")
    else:
コード例 #45
0
ファイル: defect_seperate_1.py プロジェクト: ty9071/suzly2
        def zangwu_detect(self, mesh_bgr, model_type, name=None):
            zangwu_dict = {
                'defect': DEFECTS.LIANGPIN,
                'defect_pts_list': [],  # pts的list,每一个元素是一组缺陷的pts
            }
            if self.redis_db.get('cam1_zangwu_flag') == 0:
                if model_type != self.current_model:
                    self.current_model = model_type
                    self.mesh_setting = self.mesh_settings[model_type]
                    self.liner_setting = self.liner_settings[model_type]

                # gray = cv2.cvtColor(mesh_bgr, cv2.COLOR_BGR2GRAY)
                gray = mesh_bgr

                # t_start = time.time()
                sh, sw = gray.shape[:2]
                # img_gray = gray[55:sh - 50, 55:sw - 50]
                img_gray = gray[85:sh - 80, 85:sw - 80]

                # # 均值滤波
                img_blur = cv2.blur(img_gray, (39, 39))
                img_gray = cv2.blur(img_gray, (3, 3))

                img_diff1 = cv2.subtract(img_gray, img_blur)
                img_diff2 = cv2.subtract(img_blur, img_gray)
                ret, img_binary1 = cv2.threshold(img_diff1, 30, 255, cv2.THRESH_BINARY)
                ret, img_binary2 = cv2.threshold(img_diff2, 15, 255, cv2.THRESH_BINARY)
                img_binary2 = cv2.bitwise_or(img_binary1, img_binary2)

                sh, sw = img_binary1.shape[:2]

                # 同轴光拍的图片对binary再进行中值滤波,去掉椒盐噪声
                img_binary_blur = cv2.medianBlur(img_binary2, 5)
                # 四个角减去干扰区域
                # sh, sw = img_binary_blur.shape[:2]
                img_binary_blur[sh - 123:sh, 0:134] = 0
                img_binary_blur[0:130, sw - 125:sw] = 0
                img_binary_blur[0:90, 0:90] = 0
                img_binary_blur[sh - 90:sh, sw - 90:sw] = 0
                kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6))
                img_binary_blur = cv2.morphologyEx(img_binary_blur, cv2.MORPH_CLOSE, kernel)

                # 用连通域面积过滤
                mask = np.zeros(img_binary2.shape)
                cnts, hierarchy = cv2.findContours(img_binary_blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
                for cnt in cnts:
                    cv2.drawContours(mask, [cnt], -1, (255, 255, 255), -1)
                if len(cnts) > 0:
                    cnt_max = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
                    num_p_max_zangwu = cv2.contourArea(cnt_max)
                else:
                    num_p_max_zangwu = 0

                num_p_all_zangwu = np.count_nonzero(mask)

                # print("num_p_all_zangwu:", num_p_all_zangwu, "num_p_max_zangwu:", num_p_max_zangwu, "num_p_all_moque:",
                #       num_p_all_moque)
                # origin 3500/350
                if (num_p_all_zangwu > 13500) or (num_p_max_zangwu > 2080):
                    # zangwu_judge = True
                    zangwu_dict['defect'] = DEFECTS.ZANGWU
                else:
                    # zangwu_judge = False
                    zangwu_dict['defect'] = DEFECTS.LIANGPIN
                # print("zangwu_judge:", zangwu_judge)
                # print("time of total:", time.time() - t_start)
                # t_start = time.time()

                # cv2.namedWindow('img_gray', cv2.WINDOW_NORMAL)
                # cv2.resizeWindow('img_gray', 400, 400)
                # cv2.imshow("img_gray", img_gray)
                # cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
                # cv2.resizeWindow('mask', 400, 400)
                # cv2.imshow("mask", mask)
                # cv2.namedWindow('img_binary_blur', cv2.WINDOW_NORMAL)
                # cv2.resizeWindow('img_binary_blur', 400, 400)
                # cv2.imshow("img_binary_blur", img_binary_blur)
                # # cv2.imwrite("./img_gray.png", img_gray)
                # cv2.waitKey()
            return zangwu_dict
コード例 #46
0
        #w=w+tempw+dis
        res.append([x,y,w,h])
        count+=2
    else:
        res.append(temp[count])
        count+=1    
output=[]   
for i in range(len(temp)):
    x,y,w,h=temp[i]
    num=image[y-5:y+h+5,x-5:x+w+5]
    #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #num=cv2.morphologyEx(num,cv2.MORPH_OPEN,kernel)
    num=cv2.cvtColor(num,cv2.COLOR_BGR2GRAY)
    #num=cv2.GaussianBlur(num,(5,5),1)
    ret,th=cv2.threshold(num,220,255,cv2.THRESH_BINARY)#+cv2.THRESH_OTSU)
    th=cv2.medianBlur(th,3)

    output.append(th)
for i in range(len(output)):
    plt.figure(i+1)
    plt.imsave('C:/Users/gd/Desktop/redDigits/train/'+str(i)+'.jpg',output[i])
    #plt.imsave('C:/Users/gd/Desktop/1'+str(i)+'.jpg',mask)

#plt.imsave('C:/Users/gd/Desktop/1.jpg',mask)

#ret,th=cv2.threshold(gray,130,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)

#redLower=np.array([0,10,100])
#redHigher=np.array([180,160,245])
#mask=cv2.inRange(hsv,redLower,redHigher)
コード例 #47
0
out = cv2.VideoWriter(sys.argv[2], fourcc, 30.0, (vidWidth, vidHeight), True)

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:

        #       ORIGINAL GREEN
        #        lw = np.array([65,60,60])
        #        up = np.array([80,255,255])

        lw = np.array([40, 30, 60])  #40 30 60
        up = np.array([80, 255, 255])

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lw, up)
        mask_blur = cv2.medianBlur(mask, 5)
        mask_blur = cv2.cvtColor(mask_blur, cv2.COLOR_GRAY2RGB)
        #        mask_inv = cv2.bitwise_not(mask_blur)

        out.write(mask_blur)

        cv2.imshow('res', mask_blur)
        if cv2.waitKey(1) & 0XFF == ord('q'):
            break
    else:
        break

cap.release()
out.release()
cv2.destroyAllWindows()
コード例 #48
0
def gauss_process(image):
    img = cv2.medianBlur(image, 5)
    th_gauss = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                     cv2.THRESH_BINARY, 11, 2)
    cv2.fastNlMeansDenoising(th_gauss, th_gauss, 4)
    view_image(th_gauss, 'gauss image')
コード例 #49
0
import cv2
import numpy as np


# gray = cv2.imread('numbers_A4.png', cv2.IMREAD_GRAYSCALE)
gray = cv2.imread('numbers_gray_erode.png', cv2.IMREAD_GRAYSCALE)


gray = cv2.medianBlur(gray,3)


numbers_bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
cv2.imwrite('numbers_bin_gaussian.png', numbers_bin)

numbers_bin = cv2.medianBlur(numbers_bin,3)
cv2.imwrite('numbers_bin_gaussian_filter_by_meidan.png', numbers_bin)
コード例 #50
0
            img_tensor = img_transform(img2)

            # predict
            with torch.no_grad():
                img2 = img_tensor.unsqueeze(0).cuda().cpu()
                pred = net(img2)

            pred = pred.cpu().numpy().squeeze()
            pred = np.argmax(pred, axis=0)

            colorized = args.dataset_cls.colorize_mask(pred)
            img = np.array(colorized.convert('RGB'))
            kernel = np.ones((15, 15), np.uint8)
            img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

            median = cv2.medianBlur(img, 17)

            #write data to disk for training
            writeTrainData(steeringValue, throttleValue, median,
                           int(framecount / 5))

            #endTime = datetime.datetime.now()

            #elapsedTime = endTime - beginTime
            #if(elapsedTime.microseconds > 0.0):
            #    fps = round(1 / (elapsedTime.microseconds * 10**-6),2)
            #cv2.putText(img,"fps: " + str(fps),(10,90), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),4,cv2.LINE_AA)

            #print(str(steeringValue) + ", " + str(throttleValue))
            median = draw_user_angle(steeringValue, throttleValue, median)
コード例 #51
0
ファイル: image_edge2.py プロジェクト: jay9z/CV_python
    mask = skip_margin(mask)
    #plt.imshow(mask)
    return mask


img = cv2.imread('./Data and Image/Image_Question4.bmp')
#newimg = img
newimg = img[480:1450, 150:1200]
cv2.imwrite('image_roi.bmp', newimg)

gray = cv2.cvtColor(newimg, cv2.COLOR_BGR2GRAY)
color = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
#cv2.imshow("color",color)

#define ROI
gray = cv2.medianBlur(gray, 3)
#cv2.imshow("gray",gray)
circles1 = cv2.HoughCircles(gray,
                            cv2.HOUGH_GRADIENT,
                            2,
                            100,
                            param1=160,
                            param2=80,
                            minRadius=65,
                            maxRadius=75)
for circle in circles1[0]:
    cv2.circle(color, (circle[0], circle[1]), circle[2], (0, 255, 0), 3)

circles2 = cv2.HoughCircles(gray,
                            cv2.HOUGH_GRADIENT,
                            2,
コード例 #52
0
def recognize(cam=None):
    global prediction
    if cam is None:
        cam = cv2.VideoCapture(1)
        if cam.read()[0] == False:
            cam = cv2.VideoCapture(0)
    hist = get_hand_hist()
    x, y, w, h = 300, 100, 300, 300
    while True:
        text = ""
        img = cam.read()[1]
        img = cv2.flip(img, 1)
        img = cv2.resize(img, (640, 480))
        imgCrop = img[y:y + h, x:x + w]
        imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
        disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
        cv2.filter2D(dst, -1, disc, dst)
        blur = cv2.GaussianBlur(dst, (11, 11), 0)
        blur = cv2.medianBlur(blur, 15)
        thresh = cv2.threshold(blur, 0, 255,
                               cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
        thresh = cv2.merge((thresh, thresh, thresh))
        thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
        thresh = thresh[y:y + h, x:x + w]
        (openCV_ver, _, __) = cv2.__version__.split(".")
        if openCV_ver == '3':
            contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_NONE)[1]
        elif openCV_ver == '4':
            contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_NONE)[0]
        if len(contours) > 0:
            contour = max(contours, key=cv2.contourArea)
            # print(cv2.contourArea(contour))
            if cv2.contourArea(contour) > 10000:
                x1, y1, w1, h1 = cv2.boundingRect(contour)
                save_img = thresh[y1:y1 + h1, x1:x1 + w1]

                if w1 > h1:
                    save_img = cv2.copyMakeBorder(save_img, int((w1 - h1) / 2),
                                                  int((w1 - h1) / 2), 0, 0,
                                                  cv2.BORDER_CONSTANT,
                                                  (0, 0, 0))
                elif h1 > w1:
                    save_img = cv2.copyMakeBorder(save_img, 0, 0,
                                                  int((h1 - w1) / 2),
                                                  int((h1 - w1) / 2),
                                                  cv2.BORDER_CONSTANT,
                                                  (0, 0, 0))

                pred_probab, pred_class = keras_predict(model, save_img)

                if pred_probab * 100 > 80:
                    text = get_pred_text_from_db(pred_class)
                    if text.lower() == 'f**k':
                        text = 'Love'
                    print(text)
        blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
        splitted_text = split_sentence(text, 2)
        put_splitted_text_in_blackboard(blackboard, splitted_text)
        # cv2.putText(blackboard, text, (30, 200), cv2.FONT_HERSHEY_TRIPLEX, 1.3, (255, 255, 255))
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
        res = np.hstack((img, blackboard))
        # cv2.imshow("Recognizing gesture", res)
        return res
コード例 #53
0
ファイル: main.py プロジェクト: GaEsRaRe/Musical_Sheet_reader
def get_body(image):
    gray = cv2.medianBlur(image, 5)
    dst2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 11, 2)
    return dst2
コード例 #54
0
def cartoonizer(now):
    num_down = 2  # number of downsampling steps
    num_bilateral = 7  # number of bilateral filtering steps

    img_rgb = cv2.imread("./images/screen_capture_%s.jpg" % now)

    # downsample image using Gaussian pyramid
    img_color = img_rgb
    for _ in range(num_down):
        img_color = cv2.pyrDown(img_color)

    # repeatedly apply small bilateral filter instead of
    # applying one large filter
    for _ in range(num_bilateral):
        img_color = cv2.bilateralFilter(img_color,
                                        d=9,
                                        sigmaColor=9,
                                        sigmaSpace=7)

    # upsample image to original size
    for _ in range(num_down):
        img_color = cv2.pyrUp(img_color)

    # convert to grayscale and apply median blur
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
    img_blur = cv2.medianBlur(img_gray, 7)
    rows, cols, channels = img_rgb.shape
    roi = img_color[0:rows, 0:cols]

    # detect and enhance edges
    img_edge = cv2.adaptiveThreshold(img_blur,
                                     255,
                                     cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY,
                                     blockSize=9,
                                     C=2)

    img_median = cv2.medianBlur(img_edge, 5)
    edge_compare = np.concatenate((img_edge, img_median), axis=1)
    cv2.imshow("edge_compare", edge_compare)
    ret, mask = cv2.threshold(img_median, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    # convert back to color, bit-AND with color image
    img_edge = cv2.cvtColor(img_median, cv2.COLOR_GRAY2RGB)
    #img_cartoon = cv2.bitwise_and(img_color, img_edge)

    img_cartoon_bg = cv2.bitwise_and(roi, roi, mask=mask)
    img_cartoon_fg = cv2.bitwise_and(img_edge, img_edge, mask=mask_inv)
    dst = cv2.add(img_cartoon_fg, img_cartoon_bg)
    img_color[0:rows, 0:cols] = dst

    # display
    #cv2.imshow("original", img_rgb)
    #cv2.imshow("edge", img_edge)
    #cv2.imshow("mask_inv", mask_inv)
    #cv2.imshow("color", img_color)
    #cv2.imshow("mask", mask)
    #cv2.imshow("cartoon", img_rgb)
    result_compare = np.concatenate((img_rgb, img_color), axis=1)
    cv2.imshow("result_compare", result_compare)
    cv2.waitKey(0)

    cv2.imwrite("./images/screen_capture_cartoonized_mask_%s.jpg" % now, mask)
    cv2.imwrite("./images/screen_capture_cartoonized_%s.jpg" % now, img_rgb)
    cv2.destroyAllWindows()
コード例 #55
0
import cv2
import numpy as np

bs = cv2.createBackgroundSubtractorKNN()  #k-Nearest Neighbor分类算法
camera = cv2.VideoCapture("1.avi")  #导入视频,括号中为视频地址,0表示打开笔记本内置摄像头

while True:
    ret, frame = camera.read()  #ret为布尔值,当读取至视频最后一帧后,返回False
    #frame为每一帧图像的三维矩阵
    fgmask = bs.apply(frame)
    fg2 = fgmask.copy()
    blur = cv2.medianBlur(fg2, 5)  #中值滤波
    th = cv2.threshold(
        blur, 254, 255, cv2.THRESH_BINARY
    )[1]  #图像的简单阈值处理,第一个参数:原图像,第二个参数:进行分类的阈值,第三个参数:高于(低于)阈值时赋予的新值,第四个参数:选择参数的方法
    #第一个返回值:得到的阈值,第二个阈值:阈值化后的图像。此处只取第二个返回值,加[1]
    dilated = cv2.dilate(th,
                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                         iterations=2)  #膨胀的次数
    #图像的膨胀
    image, contours, gihr = cv2.findContours(
        dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
    )  #提取图像轮廓。参数1:图像,参数2:提取规则cv2.RETR_EXTERNAL:只找外轮廓,cv2.RETR_TREE:内外轮廓都找。
    #参数3:输出轮廓内容格式。cv2.CHAIN_APPROX_SIMPLE:输出少量轮廓点。cv2.CHAIN_APPROX_NONE:输出大量轮廓点。
    #计算轮廓的边界框,并加在图片上                                                             #输出参数1:图像。输出参数2:轮廓列表。输出参数3:层级
    for c in contours:
        if cv2.contourArea(c) > 100:
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(
                frame, (x, y), (x + w, y + h), (255, 255, 0),
                2)  #图像加框,参数1:图像,参数2:左上角坐标,参数3:右下角坐标,参数4:框的颜色,参数5:框的粗细
コード例 #56
0
ファイル: artistic.py プロジェクト: zhangfx123/imgaug
def _blur_median(image, ksize):
    if ksize % 2 == 0:
        ksize += 1
    if ksize <= 1:
        return image
    return cv2.medianBlur(_normalize_cv2_input_arr_(image), ksize)
コード例 #57
0
@author: anshu
"""

import cv2
import numpy as np
def get_frame(cap, scaling_factor):
    _, frame = cap.read()
    frame = cv2.resize(frame, None, fx=scaling_factor, 
            fy=scaling_factor, interpolation=cv2.INTER_AREA)

    return frame

if __name__=='__main__':
    cap = cv2.VideoCapture(0)
    scaling_factor = 0.5
    while True:
        frame = get_frame(cap, scaling_factor) 
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        lower = np.array([0, 70, 60])
        upper = np.array([50, 150, 255])
        mask = cv2.inRange(hsv, lower, upper)
        img_bitwise_and = cv2.bitwise_and(frame, frame, mask=mask)
        img_median_blurred = cv2.medianBlur(img_bitwise_and, 5)
        cv2.imshow('Input', frame)
        cv2.imshow('Output', img_median_blurred)
        c = cv2.waitKey(5) 
        if c == 27:
            break
    cv2.destroyAllWindows()
コード例 #58
0
ファイル: testingGUI.py プロジェクト: kbruett/CompArchProject
        # will assume this contour correspondes to the area of the bottle cap
        cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
        # Get the radius of the enclosing circle around the found contour
        ((x, y), radius) = cv2.minEnclosingCircle(cnt)
        # Draw the circle around the contour
        cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
        # Get the moments to calculate the center of the contour (in this case Circle)
        M = cv2.moments(cnt)
        center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))

        points.appendleft(center)

    elif len(cnts) == 0:
        if len(points) != 0:
            blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
            blur1 = cv2.medianBlur(blackboard_gray, 15)
            blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
            thresh1 = cv2.threshold(blur1, 0, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
            blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_NONE)[1]
            if len(blackboard_cnts) >= 1:
                cnt = sorted(blackboard_cnts,
                             key=cv2.contourArea,
                             reverse=True)[0]

                if cv2.contourArea(cnt) > 1000:
                    x, y, w, h = cv2.boundingRect(cnt)
                    alphabet = blackboard_gray[y - 10:y + h + 10,
                                               x - 10:x + w + 10]
                    newImage = cv2.resize(alphabet, (28, 28))
コード例 #59
0
ファイル: median_filter.py プロジェクト: mauliknshah/kampf
"""
This block iterates through all the images with png format in the directory or subdirectory under the source path. 
Also, it skips iterating through the mask file. The open cv's medianBlur method applies the filter to the image, with
two kernels sized 3 and 5. 

The newly found filtered images are stored in the new folders named after appending _m3 or _m5 to the original folder.
"""
for root, dirs, files in os.walk(path):
    for file in files:
        if file.endswith('.png') and (not file.startswith('m')):

            print(os.path.join(root, file))

            #Apply median filter of kernel size 5.
            img = cv2.imread(os.path.join(root, file))
            medianImg = cv2.medianBlur(img, 5)

            #Save the file. Create the required directory if does not exist.
            op_path5 = root + "_m5"
            try:
                if not os.path.exists(op_path5):
                    os.makedirs(op_path5)
            except OSError as e:
                raise

            cv2.imwrite(os.path.join(op_path5, file), medianImg)

            #Apply median filter of kernel size 3.
            medianImg = cv2.medianBlur(img, 3)
            op_path3 = root + "_m3"
コード例 #60
0
def create_record(orig_picture, classes, size, per_num, TFRecordname, Output,
                  classes_num, save_path):
    Name = save_path + '/' + TFRecordname + ".tfrecords"
    writer = tf.python_io.TFRecordWriter(Name)
    # print("创建写者成功!")
    per_num = int(per_num)  # 这个类型错误找了好久
    size = int(size)
    classes_num = int(classes_num)
    for index, name in enumerate(classes):
        class_path = orig_picture + "/" + name + "/*.jpg"
        temp = 1
        for item in glob.glob(str(class_path)):
            # print("进入递归")
            if temp < int(per_num):
                try:
                    print("item: ", item)
                    img = cv2.imread(item, 0)  # 灰度模式打开图像
                    img = cv2.medianBlur(img, 7)  # 模糊图像,降噪
                    th1 = cv2.adaptiveThreshold(img, 255,
                                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                cv2.THRESH_BINARY_INV, 11,
                                                2)  # 背景黑色,物体白色
                    th2 = cv2.adaptiveThreshold(img, 255,
                                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                cv2.THRESH_BINARY, 11,
                                                2)  # 背景白色,物体黑色
                    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                       (25, 25))
                    closed = cv2.morphologyEx(th1, cv2.MORPH_CLOSE, kernel)
                    # img = img.resize((SIZE, SIZE))   # 设置需要转换的图片大小
                    image, cnts, hierarchy = cv2.findContours(
                        closed.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
                    c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
                    rect = cv2.minAreaRect(c)
                    box = np.int0(cv2.boxPoints(rect))
                    cv2.drawContours(img, [box], -1, (0, 255, 0), 3)
                    # 矩形
                    Xs = [i[0] for i in box]
                    Ys = [i[1] for i in box]
                    x1 = min(Xs)
                    x2 = max(Xs)
                    y1 = min(Ys)
                    y2 = max(Ys)
                    if x1 < 0:
                        x1 = 0
                    if x2 < 0:
                        x2 = 0
                    if y1 < 0:
                        y1 = 0
                    if y2 < 0:
                        y2 = 0
                    hight = y2 - y1
                    width = x2 - x1
                    cropImg = th2[y1:y1 + hight, x1:x1 + width]  # 裁剪
                    finish = cv2.resize(cropImg, (size, size))  # 重新给定图像大小,缩放图像
                    img_raw = finish.tobytes()  # 将图片转化为原生bytes,numpy方法
                    print("分组:", index, "第", temp, "张")
                    no2 = "分组:" + str(index) + "第" + str(temp) + "张"
                    Output.setLabelText(no2)
                    example = tf.train.Example(features=tf.train.Features(
                        feature={
                            "label":
                            tf.train.Feature(int64_list=tf.train.Int64List(
                                value=[index])),
                            'img_raw':
                            tf.train.Feature(bytes_list=tf.train.BytesList(
                                value=[img_raw]))
                        }))
                    temp = temp + 1
                    writer.write(example.SerializeToString())
                    Output.setPercent((temp / (per_num * classes_num)) * 100)
                    QApplication.processEvents()  # 实时显示
                except Exception as e:
                    print(e)
            else:
                break
    writer.close()