def main():
    lowThreshold = 100
    higThreshold = 12  
    max_lowThreshold = 300 
    max_higThreshold = 200
    mode = 1
    contour_i = 10
    hot_x_100 = 5000
    hot_y_100 = 5000
    hot_r_100 = 40
    csv_filepath_large_data = 'pyNVscan_AT_CV_V3.1.csv'
    
    cv2.namedWindow('mpl_Hough',cv2.WINDOW_NORMAL) 
    cv2.namedWindow('param',cv2.WINDOW_NORMAL) 
  
    cv2.createTrackbar('Low threshold','param',100, max_lowThreshold,nothing)  
    cv2.createTrackbar('Hig threshold','param',12, max_higThreshold,nothing) 
#   cv2.createTrackbar('Matplotlib Mode','param',0, 2, nothing)
#   cv2.createTrackbar('Contour_i','param',10, 100, nothing)
    cv2.createTrackbar('Hot_X_100','param',5000, 10000, nothing)
    cv2.createTrackbar('Hot_Y_100','param',5000, 10000, nothing)
    cv2.createTrackbar('Hot_R_100','param',40, 10000, nothing)
    
    with open(csv_filepath_large_data,"rb") as f: 
        my_matrix = np.loadtxt(f, delimiter=",", skiprows=0)
        
    csv_head_large = creat_CSV_Head_File(my_matrix)

    png_ruler_large = csv_to_PNG(my_matrix, csv_head_large, mode, contour_i, png_io)
    
    img = cv2.imdecode(np.fromstring(png_io.getvalue(), dtype=np.uint8), 1)    #读内存中的二进制图像数据
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    gaussian_gray = cv2.GaussianBlur(gray,(3,3),0)
    
    pzt_x, pzt_y, pzt_r = gui_CV_Number(hot_x_100, hot_y_100, hot_r_100)
    
    png_x, png_y = pztXY_to_pngXY(csv_head_large, gray, pzt_x, pzt_y)

    flag_hough, png_x1, png_y1, png_r1 = mpl_Hough(csv_head_large, img, gaussian_gray, png_x, png_y, pzt_r, png_ruler_large, lowThreshold, higThreshold)  # initialization  
    
    while(1):
        k=cv2.waitKey(20)&0xFF
        if k==27:
            break

        lowThreshold = cv2.getTrackbarPos('Low threshold','param')
        higThreshold = cv2.getTrackbarPos('Hig threshold','param')
#       mode = cv2.getTrackbarPos('Matplotlib Mode','param')
#       contour_i = cv2.getTrackbarPos('Contour_i','param')
        hot_x_100 = cv2.getTrackbarPos('Hot_X_100','param')
        hot_y_100 = cv2.getTrackbarPos('Hot_Y_100','param')
        hot_r_100 = cv2.getTrackbarPos('Hot_R_100','param')

        pzt_x, pzt_y, pzt_r = gui_CV_Number(hot_x_100, hot_y_100, hot_r_100)
    
        png_x, png_y = pztXY_to_pngXY(csv_head_large, gray, pzt_x, pzt_y)

        flag_hough, png_x1, png_y1, png_r1 = mpl_Hough(csv_head_large,img, gaussian_gray, png_x, png_y, pzt_r, png_ruler_large, lowThreshold, higThreshold)
        
    cv2.destroyAllWindows()  
def main():

    cap = cv2.VideoCapture(1)
    if not cap.isOpened():
        print("Capture could not be opened successfully.") 

    while True:
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        _, img = cap.read()

        alpha = cv2.getTrackbarPos('Contrast', 'image')
        #Sliders can't be lower than 0, so starting at 50, then subtracting
        beta = cv2.getTrackbarPos('Brightness', 'image') - 50
        print beta

        toggle = cv2.getTrackbarPos(switch, 'image')
        segmented = False if toggle == 0 else True
        
        #trans_img = cv2.add(mul_img, b_array)
        trans_img = (alpha * img)       
        #trans_img = np.where(trans_img + beta >= 0, trans_img + beta, 0)
        

        if segmented:
            gray = cv2.cvtColor(trans_img, cv2.COLOR_BGR2GRAY)
            _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
            cv2.imshow('image', binary)
        else:
            cv2.imshow('image', trans_img)
Exemple #3
0
def goLiveT():
	cap = cv2.VideoCapture(0)
	cv2.namedWindow('image')
	# create trackbars for color change
	cv2.createTrackbar('Thres','image',0,255,nothing)
	# create switch for ON/OFF functionality
	switch = '0 : OFF \n1 : ON'
	cv2.createTrackbar(switch, 'image',0,1,nothing)
	while (1):

		_, imgOriginal = cap.read()
		cv2.imshow('imgOriginal',imgOriginal)
		filteredImage = rb.clearImage(imgOriginal)

		# get current positions of four trackbars
		binValue = cv2.getTrackbarPos('Thres','image')
		s = cv2.getTrackbarPos(switch,'image')

		k = cv2.waitKey(1) & 0xFF
		if k == 27:
			break

		if s == 0:
			pass 
		else:
			thresImage = rb.doThresHold(filteredImage,binValue)
			cv2.imshow('img', thresImage)

	cv2.destroyAllWindows()
Exemple #4
0
def get_text_th(img):
    cv2.namedWindow('result')
    cv2.createTrackbar('d1','result',1,255, nothing)
    cv2.createTrackbar('S1','result',1,15, nothing)
    cv2.createTrackbar('d2','result',1,15, nothing)
    cv2.createTrackbar('S2','result',1,255, nothing)
    img = cv2.blur(img, (3, 3))

    while (1):
        d2 = cv2.getTrackbarPos('d2','result')
        S2 = cv2.getTrackbarPos('S2','result')
        d1 = cv2.getTrackbarPos('d1','result')
        S1 = cv2.getTrackbarPos('S1','result')

        lapl = cv2.Laplacian(img, cv2.CV_32F)
        bilat = cv2.bilateralFilter(lapl, 6, 75, 75)
        imshow('result', bilat)

        ret, bth = cv2.threshold(bilat, 0.9, 1, cv2.THRESH_BINARY_INV)
        bth = np.mat(bth * 255, np.uint8)

        merge = cv2.bitwise_and(img, bth)
        imshow('m', merge)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
    def get_range(self):
        if self.switch_enabled:
            toggle_lower_upper = cv2.getTrackbarPos(MultiTrackbarWindow.switch, self.window_name)
        else:
            toggle_lower_upper = 0

        if toggle_lower_upper == 0:
            if self.current_mode == 1:
                for trackbar in self.trackbars:
                    cv2.setTrackbarPos(trackbar["name"], trackbar["window"], trackbar["current lower"])
                self.current_mode = 0
            for trackbar in self.trackbars:
                trackbar["current lower"] = cv2.getTrackbarPos(trackbar["name"], trackbar["window"])
        else:
            if self.current_mode == 0:
                for trackbar in self.trackbars:
                    cv2.setTrackbarPos(trackbar["name"], trackbar["window"], trackbar["current upper"])
                self.current_mode = 1
            for trackbar in self.trackbars:
                trackbar["current upper"] = cv2.getTrackbarPos(trackbar["name"], trackbar["window"])
        if self.returns_unzipped_results:
            lower = (trackbar["current lower"] for trackbar in self.trackbars)
            upper = (trackbar["current upper"] for trackbar in self.trackbars)
            if not self.returns_generators:
                lower = tuple(lower)
                upper = tuple(upper)
            result = (lower, upper)
        else:
            result = ((trackbar["current lower"], trackbar["current upper"]) for trackbar in self.trackbars)
            if not self.returns_generators:
                result = tuple(result)

        return result
Exemple #6
0
def trackBar():
    # Create a black image, a window
    img = np.zeros((300,512,3), np.uint8)
    cv2.namedWindow('Image')

    # create trackbars for color change
    cv2.createTrackbar('R', 'Image', 0, 255, nothing)
    cv2.createTrackbar('G', 'Image', 0, 255, nothing)
    cv2.createTrackbar('B', 'Image', 0, 255, nothing)

    # create switch for ON/OFF functionality
    switch = '0 : OFF \n1 : ON'
    cv2.createTrackbar(switch, 'Image', 0, 1, nothing)

    while(1):
        cv2.imshow('Image', img)
        k = cv2.waitKey(1) 
        if k == 27:  #27 is the code for ESC key
            break

        # get current positions of four trackbars
        r = cv2.getTrackbarPos('R', 'Image')
        g = cv2.getTrackbarPos('G', 'Image')
        b = cv2.getTrackbarPos('B', 'Image')
        s = cv2.getTrackbarPos(switch, 'Image')

        if s == 0:
            img[:] = 0
        else:
            img[:] = [b,g,r]

    cv2.destroyAllWindows()
Exemple #7
0
 def update():
     sigma = cv2.getTrackbarPos("sigma", "control") * 2 + 1
     str_sigma = cv2.getTrackbarPos("str_sigma", "control") * 2 + 1
     blend = cv2.getTrackbarPos("blend", "control") / 10.0
     print("sigma: %d  str_sigma: %d  blend_coef: %f" % (sigma, str_sigma, blend))
     dst = coherence_filter(src, sigma=sigma, str_sigma=str_sigma, blend=blend)
     cv2.imshow("dst", dst)
    def checkSettings(self):

        if self.showBackImage == False:
            return

        self.PERFECTION = cv2.getTrackbarPos('perfection','backImage')
        self.THRESHOLD = cv2.getTrackbarPos('back_threshold', 'backImage')
 def update():
     sigma = cv2.getTrackbarPos('sigma', 'control')*2+1
     str_sigma = cv2.getTrackbarPos('str_sigma', 'control')*2+1
     blend = cv2.getTrackbarPos('blend', 'control') / 10.0
     print 'sigma: %d  str_sigma: %d  blend_coef: %f' % (sigma, str_sigma, blend)
     dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend)
     cv2.imshow('dst', dst)
Exemple #10
0
def imageIdentify(Box):
    global config
    try: 
        #Gets and processes the given box selection 
        #Get box size from  it's slider
        Selection_Frame = Box.name    #Selection Frame and Box share Name
        Box.size        = cv2.getTrackbarPos('Size',   Selection_Frame) 
        Box.threshold   = cv2.getTrackbarPos('Threshold', Selection_Frame)  

        #Set the min box size
        if Box.size <10: Box.size = 10

        #gets and process the selection
        Box.selection = getSelection(Box.location[0], Box.location[1], Box.size)
        Box.selection = preprocessImage(Box.selection, Selection_Frame, Box.threshold)
        
        cv2.imshow(Selection_Frame, Box.selection)    #show proccesed image in selection window
        cv2.resizeWindow(Selection_Frame, 300, 300)   #keeps the windows a set size so you cant see trackbars

        #convert image to number
        ValueList = getValueList(Box.segCoordinates, Box.selection)
        Value     = convertToInt(ValueList)

        return Value
    except:
        print "Selection Error: Out of Bounds"
        return None
Exemple #11
0
def hough_adjust(mat,img0):
    img=np.copy(img0)
    lines = cv2.HoughLinesP(mat,1,np.pi/180,200,100,10)
    print lines
    if lines!=None:
        for x1,y1,x2,y2 in lines[0]:
            cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
    
    cv2.namedWindow('lines',cv2.WINDOW_NORMAL)
    cv2.createTrackbar('Threshhold','lines',1,1000,nothing)
    cv2.createTrackbar('MinLL','lines',1,1000,nothing)
    cv2.createTrackbar('MinLG','lines',1,1000,nothing)
    cv2.imshow('lines',img)
    while(1):
        cv2.imshow('lines',img)
        img = np.copy(img0)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
            
            # get current positions of four trackbars
        
        Threshhold = (cv2.getTrackbarPos('Threshhold','lines'))
        MinLL = (cv2.getTrackbarPos('MinLL','lines'))
        MinLG = (cv2.getTrackbarPos('MinLL','lines'))
        
        lines = cv2.HoughLinesP(mat,1,np.pi/180,Threshhold,MinLL,MinLG)
        print lines
        if lines!=None:
            for x1,y1,x2,y2 in lines[0]:
                cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
                
    cv2.destroyAllWindows()

    return lines, Threshhold, MinLL, MinLG
Exemple #12
0
 def on_trackbar_changed(x):
     lowerb = tuple(
         cv2.getTrackbarPos(ch + '_min', winname) for ch in 'HLS')
     upperb = tuple(
         cv2.getTrackbarPos(ch + '_max', winname) for ch in 'HLS')
     out = img_util.apply_mask_hls(img_720p, lowerb, upperb)
     cv2.imshow(winname, out)
Exemple #13
0
def get_hls_range(img, winname=None):
    if winname is None:
        winname = 'Choose HLS bounds'

    h, w, _ = img.shape
    f = 720.0 / h
    img_720p = cv2.resize(img, dsize=None, fx=f, fy=f)

    def on_trackbar_changed(x):
        lowerb = tuple(
            cv2.getTrackbarPos(ch + '_min', winname) for ch in 'HLS')
        upperb = tuple(
            cv2.getTrackbarPos(ch + '_max', winname) for ch in 'HLS')
        out = img_util.apply_mask_hls(img_720p, lowerb, upperb)
        cv2.imshow(winname, out)

    cv2.imshow(winname, img_720p)
    for ch in 'HLS':
        cv2.createTrackbar(ch + '_min', winname, 0, 255, on_trackbar_changed)
        cv2.createTrackbar(ch + '_max', winname, 255, 255, on_trackbar_changed)

    while True:
        cv2.waitKey()
        lowerb = tuple(
            cv2.getTrackbarPos(ch + '_min', winname) for ch in 'HLS')
        upperb = tuple(
            cv2.getTrackbarPos(ch + '_max', winname) for ch in 'HLS')
        cv2.destroyWindow(winname)
        return lowerb, upperb
Exemple #14
0
	def _draw_board(self, dst):
		hsvdst = cv2.cvtColor(dst,cv2.COLOR_BGR2HSV)
		step = CHESS_SIZE/8
		black_val = cv2.getTrackbarPos('black', 'warp')
		white_val = cv2.getTrackbarPos('white', 'warp')
		yc = cv2.getTrackbarPos('ycutoff', 'warp')
		font = cv2.FONT_HERSHEY_SIMPLEX
		#dst = hsvdst[...,2]
		col = dst
		dst = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
		rv, bl_dst = cv2.threshold(dst, black_val, 255, cv2.THRESH_BINARY)
		rv, wt_dst = cv2.threshold(dst, white_val, 255, cv2.THRESH_BINARY)
		for i in range(8):
			for j in range(8):
				y = self.BORDER + step*i
				x = self.BORDER + step*j
				bl_cnt = np.sum(bl_dst[y-yc:y-yc + step,x:x+step] == 0)
				wt_cnt = np.sum(wt_dst[y-yc:y+step-yc,x:x+step] == 255)
				col[y-yc:y-yc+step,x:x+step,2] = np.zeros((step, step))
				s = str(bl_cnt) + "/" + str(wt_cnt)
				if bl_cnt > 500:
					cv2.circle(col, (x+step/2, y+step/2), 25, BLACK, -1)
				elif wt_cnt >= 75:
					cv2.circle(col, (x+step/2, y+step/2), 25, WHITE, -1)
				cv2.putText(col, s ,(x, y+step/2), font, 0.5,BLUEGREEN,2,cv2.LINE_AA)
Exemple #15
0
    def get_mask(self, image):
        """
        Returns mask of image. We use floodfill algorithm that
        compares 4 neighboring pixels and based on specified
        threashold fills mask image that is bigger by two pixels
        in every direction with white color and than we remove
        left noise by running dilation

        Args:
            image(numpy.ndarray): Preprocessed image

        Returns:
            Mask of given image
        """
        h, w = image.shape[:2]
        mask = np.zeros((h+2, w+2), np.uint8)
        connectivity = 4
        mask[:] = 0
        if self.debug is True:
            self.lo = cv.getTrackbarPos('lo', 'result')
            self.hi = cv.getTrackbarPos('hi', 'result')
        flags = connectivity
        flags |= cv.FLOODFILL_MASK_ONLY
        flags |= 255 << 8
        self.seed = self.get_seed(image)
        cv.floodFill(image, mask, self.seed, (255, 255, 255), (self.lo,)*3,
                     (self.hi,)*3, flags)
        kernel = np.ones((1, 1), np.uint8)
        mask = cv.dilate(mask, kernel, iterations=4)
        return mask
    def getParams(self):
        """Updates simple blob detector parameters with values on set on the trackbar"""
         # Change thresholds                                                   
        self.params.minThreshold = cv2.getTrackbarPos('minThresh','bars')
        self.params.maxThreshold = cv2.getTrackbarPos('maxThresh','bars')
        self.params.thresholdStep = cv2.getTrackbarPos('step','bars')

        # Filter by Area.       
        self.params.filterByArea = True
        self.params.minArea = cv2.getTrackbarPos('minArea','bars')
        self.params.maxArea = cv2.getTrackbarPos('maxArea','bars')

        # Circularity
        self.params.filterByCircularity = True
        self.params.minCircularity = cv2.getTrackbarPos('minCircularity','bars')/100.
        self.params.maxCircularity = cv2.getTrackbarPos('maxCircularity','bars')/100.

        # Convexity
        self.params.filterByConvexity = True
        self.params.minConvexity = cv2.getTrackbarPos('minConvexity','bars')/100.
        self.params.maxConvexity = cv2.getTrackbarPos('maxConvexity','bars')/100.

        # Inertia
        self.params.filterByInertia = False
        #print self.params.maxInertiaRatio
        #self.params.minInertiaRatio = cv2.getTrackbarPos('minIertia','bars')/100.
        #self.params.maxInertiaRatio = cv2.getTrackbarPos('maxIertia','bars')/100.

        # Distance
        self.params.minDistBetweenBlobs = cv2.getTrackbarPos('minDistance','bars')
def flooding(img):
    # modifies copy_isolated_butt
    global copy_isolated_butt, new_flood
    # if new_flood is true, update the floodFill image (removes previous floodFills) and make the basics floodFills in the new image. If new_flood is false make the floodFills on img
    if new_flood:
        img = img_contours.copy()
        new_flood = False
        # floodFill on the img underside
        cv2.floodFill(img, mask, (128,copy_isolated_butt.shape[0]-5), (255, 255, 255), (3,)*3, (60,)*3, flags = 4)
        cv2.floodFill(img, mask, (11,copy_isolated_butt.shape[0]-6), (255, 255, 255), (2,)*3, (100,)*3) 
        cv2.floodFill(img, mask, (copy_isolated_butt.shape[1]-1,copy_isolated_butt.shape[0]-1), (255, 255, 255), (2,)*3, (70,)*3)
        # floodFill on the left side of img
        cv2.floodFill(img, mask, (1,10), (255, 255, 255), (2,)*3, (60,)*3, flags=4)
        cv2.floodFill(img, mask, (1,copy_isolated_butt.shape[0]-1), (255, 255, 255), (2,)*3, (60,)*3, flags=4)
        #cv2.floodFill(img, mask, (15,copy_isolated_butt.shape[0]-38), (255, 255, 255), (2,)*3, (60,)*3, flags=4)
        # floodFill on uper right corner of img
        cv2.floodFill(img, mask, (copy_isolated_butt.shape[1]-3,3), (255,255,255), (2,)*3, (60,)*3)
              
    flags = connectivity
    if fixed_range:
        flags |= cv2.FLOODFILL_FIXED_RANGE
    if seed_pt != None:
        lo = cv2.getTrackbarPos('floodfill_lo','config')
        hi = cv2.getTrackbarPos('floodfill_hi','config')
        cv2.floodFill(img, None, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3)
    cv2.imshow('floodfill',img)
    # update copy_isolated_img with new changes
    copy_isolated_butt = img.copy()
    return img
        def update(dummy=None):       
            global it
            b1 = cv2.getTrackbarPos('blur-radius1', 'result2') / 10.0
            c1 = cv2.getTrackbarPos('contour1', 'result2')
            t1 = modifyImg(warped, b1, c1, args.blur_contour, args.eksize)
            if c1 > 0: # colorize contour
                green = t1[:,:,1]
                green[ green != 255 ] = 139
                blue = t1[:,:,2]
                blue[ blue != 255 ] = 255

            b2 = cv2.getTrackbarPos('blur-radius2', 'result2') / 10.0
            c2 = cv2.getTrackbarPos('contour2', 'result2')
            t2 = modifyImg(im2, b2, c2, args.blur_contour, args.eksize)
            if c2 > 0: # colorize contour 
                red = t2[:,:,0]
                red[ red != 255 ] = 200
                green = t2[:,:,1]
                green[ green != 255 ] = 137
            
            # blend images
            bf = cv2.getTrackbarPos('blend', 'result2') / 100.0        
            dst = cv2.addWeighted(t1, bf, t2, 1.0 - bf, 0)
            cv2.imshow("result", dst)
            if args.save_result:
                cv2.imwrite('result_{}.png'.format(it), dst)
                it += 1
Exemple #19
0
def calibrateColor(color, def_range):
	
	global kernel
	name = 'Calibrate '+ color
	cv2.namedWindow(name)
	cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
	cv2.createTrackbar('Sat', name, def_range[0][1], 255, nothing)
	cv2.createTrackbar('Val', name, def_range[0][2], 255, nothing)
	while(1):
		ret , frameinv = cap.read()
		frame=cv2.flip(frameinv ,1)

		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

		hue = cv2.getTrackbarPos('Hue', name)
		sat = cv2.getTrackbarPos('Sat', name)
		val = cv2.getTrackbarPos('Val', name)

		lower = np.array([hue-20,sat,val])
		upper = np.array([hue+20,255,255])

		mask = cv2.inRange(hsv, lower, upper)
		eroded = cv2.erode( mask, kernel, iterations=1)
		dilated = cv2.dilate( eroded, kernel, iterations=1)

		cv2.imshow(name, dilated)

		k = cv2.waitKey(5) & 0xFF
		if k == ord(' '):
			cv2.destroyWindow(name)
			return np.array([[hue-20,sat,val],[hue+20,255,255]])
Exemple #20
0
    def __init__(self, calibrated_pair, image_pair):
	"""Initialize tuner with a ``CalibratedPair`` and tune given pair."""
        #: Calibrated stereo pair to find Stereo BM settings for
        self.calibrated_pair = calibrated_pair
        cv2.namedWindow(self.window_name)
        cv2.createTrackbar("cam_preset", self.window_name,
                           self.calibrated_pair.block_matcher.stereo_bm_preset, 3,
                           self.set_bm_preset)
        cv2.createTrackbar("ndis", self.window_name,
                           self.calibrated_pair.block_matcher.search_range, 160,
                           self.set_search_range)
        cv2.createTrackbar("winsize", self.window_name,
                           self.calibrated_pair.block_matcher.window_size, 21,
                           self.set_window_size)
        #: (left, right) image pair to find disparity between
        self.pair = image_pair
        #self.tune_pair(image_pair)
	while True:
		if cv2.waitKey(1) & 0xFF == 27:
			break
		print cv2.getTrackbarPos('ndis',"Stereo BM Tuner")
		self.set_bm_preset(cv2.getTrackbarPos('cam_preset',self.window_name))
		self.set_search_range(cv2.getTrackbarPos('ndis',self.window_name))
		self.set_window_size(cv2.getTrackbarPos('winsize',self.window_name))
		self.update_disparity_map()
def run(capture):
    """ Update display based on user input """
    current_frame = get_new_frame(capture)

    while capture.isOpened():
        if cv2.getTrackbarPos(tbar_play_video_name, win_default_name):
            try:
                current_frame = get_new_frame(capture)
            except StopIteration:
                print("End of clip")
                break

        #This IS NOT HSV. This is BGR
        (hue_frame, sat_frame, val_frame) = cv2.split(current_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_hue_name):
            hue_frame = cv2.bitwise_not(hue_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_sat_name):
            sat_frame = cv2.bitwise_not(sat_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_val_name):
            val_frame = cv2.bitwise_not(val_frame)

        cv2.imshow(win_default_name, current_frame)
        cv2.imshow(win_hue_name, hue_frame)
        cv2.imshow(win_sat_name, sat_frame)
        cv2.imshow(win_val_name, val_frame)

        cv2.waitKey(40)
def draw_circle(event, x, y, flags, param):
    r = cv2.getTrackbarPos('R', 'image')
    g = cv2.getTrackbarPos('G', 'image')
    b = cv2.getTrackbarPos('B', 'image')
    color = (b, g, r)

    global ix, iy, drawing, mode
    # 当按下左键是返回起始位置坐标
    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        ix, iy = x, y
    # 当鼠标左键按下并移动是绘制图形。event 可以查看移动,flag 查看是否按下
    elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
        if drawing is True:
            if mode is True:
                cv2.rectangle(img, (ix, iy), (x, y), color, -1)
            else:
                # 绘制圆圈,小圆点连在一起就成了线,3 代表了笔画的粗细
                cv2.circle(img, (x, y), 3, color, -1)
                # 下面注释掉的代码是起始点为圆心,起点到终点为半径的
                # r=int(np.sqrt((x-ix)**2+(y-iy)**2))
                # cv2.circle(img,(x,y),r,(0,0,255),-1)

                # 当鼠标松开停止绘画。
    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
Exemple #23
0
def FindBall(im2):
    global FilterWindowName
    hsv = cv2.cvtColor(im2, cv.CV_RGB2HSV)
    [h, s, v] = cv2.split(hsv)
    
    #---------------------
    high_bnd = cv2.getTrackbarPos("Hue Max", FilterWindowName+"Hue")
    low_bnd = cv2.getTrackbarPos("Hue Min", FilterWindowName+"Hue")
    
    ret, hi1 = cv2.threshold(h, high_bnd, 255, cv2.THRESH_BINARY_INV)
    ret, hi2 = cv2.threshold(h, low_bnd, 255, cv2.THRESH_BINARY)
    
    hi = cv2.bitwise_and(hi1, hi2)
    cv2.imshow(FilterWindowName+"Hue", hi);
    #---------------------
    high_bnd = cv2.getTrackbarPos("Value Max", FilterWindowName+"Value")
    low_bnd = cv2.getTrackbarPos("Value Min", FilterWindowName+"Value")
    
    ret, vi1 = cv2.threshold(v, high_bnd, 255, cv2.THRESH_BINARY_INV)
    ret, vi2 = cv2.threshold(v, low_bnd, 255, cv2.THRESH_BINARY)
    
    vi = cv2.bitwise_and(vi1, vi2)
    cv2.imshow(FilterWindowName+"Value", vi);
    #---------------------
    out = cv2.bitwise_and(vi, hi)

    
    return out
Exemple #24
0
def depth_func(d, k):
  global do_depth_edge, do_rgb_edge, do_blur

  #sys.stdout.write('[%s]' % k)
  #sys.stdout.flush()

  if k == ord('e'):
    print "swap do_depth_edge"
    do_depth_edge = not do_depth_edge
  elif k == ord('f'):
    print "swap do_rgb_edge"
    do_rgb_edge = not do_rgb_edge
  elif k == ord('p'):
    pickle({'rgb': rgb_image, 'depth': d}, '/tmp/kinect.pickle')
    print "pickled to /tmp/kinect.pickle"
  elif k == ord('b'):
    do_blur = not do_blur
  elif k != '':
    print "key: %s" % k

  d = clip(d)
  if do_blur:
    d = cv2.GaussianBlur(d, (5,5), 0)
  #d = np.clip(d, 130, 140, d)
  if do_depth_edge:
    e = cv2.Canny(d, cv2.getTrackbarPos('minVal', 'Depth'),
                     cv2.getTrackbarPos('maxVal', 'Depth'))
    return e
    return where(e == 0, [d, e])
  return d
def process_frame(frame):
    """ Process frame based on user input """
    channel = cv2.getTrackbarPos(tbar_channel_select_name, win_debug_name)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    frame = get_channel(frame, channel)

    block_size = cv2.getTrackbarPos(tbar_block_size_name, win_debug_name)
    threshold = cv2.getTrackbarPos(tbar_thresh_name, win_debug_name)

    if not block_size % 2 == 1:
        block_size += 1
        cv2.setTrackbarPos(tbar_block_size_name, win_debug_name, block_size)

    if block_size <= 1:
        block_size = 3
        cv2.setTrackbarPos(tbar_block_size_name, win_debug_name, block_size)

    adaptive = cv2.adaptiveThreshold(frame, 255,
                                     cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     block_size,
                                     threshold)

    if draw_contours:
        cframe = np.zeros((frame.shape[0], frame.shape[1], 3), np.uint8)
        contours, hierarchy = cv2.findContours(adaptive,
                                               cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        cv2.drawContours(cframe, contours, -1, (255, 255, 255), 3)
        return cframe
    else:
        return adaptive
Exemple #26
0
    def update(dummy=None):
        #print "dummy", dummy
        
        if logmode:
            ii = img.logged
        else:
            ii = img.norm
            
        sz=5
        scalefact = cv2.getTrackbarPos('scale', 'SelectROI')
        iters = cv2.getTrackbarPos('erode iters', 'SelectROI')
        bright = cv2.getTrackbarPos('bright', 'SelectROI')

        rszimg = cv2.resize(ii, (0,0), fx=1.0/scalefact, fy=1.0/scalefact)

        rszimg = rszimg + rszimg * (bright/10.0)

        st = cv2.getStructuringElement(cv2.MORPH_DILATE, (sz, sz))
        res = cv2.morphologyEx(rszimg, cv2.MORPH_DILATE, st, iterations=iters)

        #print res
        #print pt1
        #print pt2
        
        cv2.rectangle(res, pt1, pt2, color=(1.0,1.0,1.0), thickness=1)
        
        cv2.imshow('SelectROI', res)
        
        img.roi = ii[pt1[1]*scalefact:pt2[1]*scalefact, pt1[0]*scalefact:pt2[0]*scalefact]
        cv2.imshow('SelectedROI', img.roi)
def main():
    # 创建一个黑色图像
    img = np.zeros((300, 512, 3), np.uint8)
    cv2.namedWindow('image')

    cv2.createTrackbar('R', 'image', 0, 255, nothing)
    cv2.createTrackbar('G', 'image', 0, 255, nothing)
    cv2.createTrackbar('B', 'image', 0, 255, nothing)

    switch = '0:OFF\n1:ON'
    cv2.createTrackbar(switch, 'image', 0, 1, nothing)

    while (1):
        cv2.imshow('image', img)
        k = cv2.waitKey(1)
        if k == ord('q'):  # 按q键退出
            break

        r = cv2.getTrackbarPos('R', 'image')
        g = cv2.getTrackbarPos('G', 'image')
        b = cv2.getTrackbarPos('B', 'image')
        s = cv2.getTrackbarPos(switch, 'image')

        if s == 0:
            img[:] = 0
        else:
            img[:] = [b, g, r]
    cv2.destroyAllWindows()
Exemple #28
0
def simple_one():
    def nothing(x):
        print x

    # Create a black image, a window
    img = numpy.zeros((300,512,3), numpy.uint8)
    cv2.namedWindow('image')

    # create trackbars for color change
    cv2.createTrackbar('R','image',0,255,nothing)
    cv2.createTrackbar('G','image',0,255,nothing)
    cv2.createTrackbar('B','image',0,255,nothing)

    # create switch for ON/OFF functionality
    switch = 'ON / OFF'
    cv2.createTrackbar(switch, 'image',0,1,nothing)

    while(1):
        cv2.imshow('image',img)
        k = cv2.waitKey(1) & 0xFF
        # escap
        if k == 27:
            break

        # get current positions of four trackbars
        r = cv2.getTrackbarPos('R','image')
        g = cv2.getTrackbarPos('G','image')
        b = cv2.getTrackbarPos('B','image')
        s = cv2.getTrackbarPos(switch,'image')

        if s == 1:
            img[:] = 0
        else:
            img[:] = [b,g,r]
Exemple #29
0
    def update(dummy=None):

        c0=0.0
        c1 = cv2.getTrackbarPos('blue', 'AdjustColor') /1024.
        c2 = cv2.getTrackbarPos('green', 'AdjustColor') /1024.
        c3 = cv2.getTrackbarPos('red', 'AdjustColor') /1024.
        c4=1024.0/1024.
        
    #def updatePic():
        #r = np.array([colorfunctR(c0,c1,c2,c3,c4,d) for d in img.roi.flat]).reshape(img.roi.shape)
        #g = np.array([colorfunctG(c0,c1,c2,c3,c4,d) for d in img.roi.flat]).reshape(img.roi.shape)
        #b = np.array([colorfunctB(c0,c1,c2,c3,c4,d) for d in img.roi.flat]).reshape(img.roi.shape)
        
        #print c0, c1, c2, c3, c4
        r, g, b = colorfunctArr(c0,c1,c2,c3,c4,img.roi)        
        
        #print np.shape(img.roi)
        #print np.shape(r)
        #print r
    
        #print img.roi
        #print np.shape(img.roi)
        #print type(img.roi)
        #print img.roi.dtype
        #print np.max(img.roi), np.min(img.roi)      
        
        color = cv2.merge([b,g,r, np.array(img.roi, dtype=np.float64)])*255
        color = np.array(color, dtype=np.uint8)
        #print color
        #print np.shape(color)
        #print type(color)
        #print color.dtype
        #print np.max(color), np.min(color)
        cv2.imshow('AdjustColor', color)
        img.color = color   
Exemple #30
0
def onBlockSize (pos):
   if (pos%2 != 0):
       thres = cv2.getTrackbarPos("threshold","image")
       if thres == 0:
           img2 = cv2.adaptiveThreshold(img,cv2.getTrackbarPos("maxValue","image"),cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,pos,cv2.getTrackbarPos("cte","image"))
       else:  img2 = cv2.adaptiveThreshold(img,cv2.getTrackbarPos("maxValue","image"),cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,pos,cv2.getTrackbarPos("cte","image"))
       cv2.imshow("image",img2)
cv2.resizeWindow("HSV",640,240)
#cv2.namedWindow("HSV",cv2.WINDOW_NORMAL)
cv2.createTrackbar("HUE Min","HSV",0,179,empty)
cv2.createTrackbar("HUE Max","HSV",179,179,empty)
cv2.createTrackbar("SAT Min","HSV",0,255,empty)
cv2.createTrackbar("SAT Max","HSV",255,255,empty)
cv2.createTrackbar("Value Min","HSV",0,255,empty)
cv2.createTrackbar("Value Max","HSV",0,255,empty)



while True:
    _,img= cap.read()
    imgHsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)

    h_min = cv2.getTrackbarPos("HUE Min","HSV")
    h_max = cv2.getTrackbarPos("HUE Max","HSV")
    s_min = cv2.getTrackbarPos("SAT Min","HSV")
    s_max = cv2.getTrackbarPos("SAT Max","HSV")
    v_min = cv2.getTrackbarPos("Value Min","HSV")
    v_max = cv2.getTrackbarPos("Value Max","HSV")
    #print(h_min)


    lower = np.array([ h_min ,s_min , v_min])
    upper = np.array([ h_max, s_max , v_max])
    mask = cv2.inRange(imgHsv,lower,upper)
    result = cv2.bitwise_and(img,img,mask=mask)
    mask=cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
    imgGen = cv2.dilate(result,kernel,4)
    imgDar = cv2.erode(result,kernel,4)
Exemple #32
0
cv2.createTrackbar('alpha', 'blend', 0, 10, nothing)

# FLIR Camera
cap1 = cv2.VideoCapture(0)
cap1.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cap1.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)

# Raspberry Pi Camera
cap2 = cv2.VideoCapture(1)
cap2.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cap2.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
#cap2.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,  640)
#cap2.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

while True:
    alpha = cv2.getTrackbarPos('alpha', 'blend')
    ret, img1 = cap1.read()
    ret, img2 = cap2.read()

    rows, cols = img1.shape[:2]
    resize = cv2.resize(img1, (4 * cols, 4 * rows),
                        interpolation=cv2.INTER_CUBIC)
    #resize = cv2.resize(img1, (8*cols, 8*rows), interpolation = cv2.INTER_CUBIC)    # 640x480

    cv2.resizeWindow('blend', 320, 240)
    #cv2.resizeWindow('blend', 640, 480)    # 640x480
    cam_alpha = float(alpha) / 10
    flir_alpha = float(10 - alpha) / 10
    dst = cv2.addWeighted(img2, cam_alpha, resize, flir_alpha, 0)
    cv2.imshow("blend", dst)
Exemple #33
0
    def run(self):
        if self.debug:
            cv.namedWindow('image')
            cv.moveWindow('image', 100, 100)

            cv.createTrackbar('p_lower', 'image', 0, 50, self.nothing)
            cv.createTrackbar('p_upper', 'image', 50, 100, self.nothing)

            cv.createTrackbar('th_p_lower', 'image', 0, 255, self.nothing)
            cv.createTrackbar('th_p_upper', 'image', 0, 255, self.nothing)

            cv.setTrackbarPos('p_lower', 'image', self.p_lower_nth_default)
            cv.setTrackbarPos('p_upper', 'image', self.p_upper_nth_default)
            cv.setTrackbarPos('th_p_lower', 'image', self.p_lower_default)
            cv.setTrackbarPos('th_p_upper', 'image', self.p_upper_default)

        p_upper = self.p_upper_nth_default
        p_lower = self.p_lower_nth_default

        th_p_lower = self.p_lower_default
        th_p_upper = self.p_upper_default
        previous_ev = self.exposure_default

        while not rospy.is_shutdown():
            if self.image is None:
                continue
            pre_process_bgr = self.pre_processing(self.image)
            gray = cv.cvtColor(pre_process_bgr, cv.COLOR_BGR2GRAY)

            current_ev = self.get_exposure()

            if current_ev is None:
                ev = previous_ev
            else:
                ev = current_ev

            if self.debug:
                p_lower = cv.getTrackbarPos('p_lower', 'image')
                p_upper = cv.getTrackbarPos('p_upper', 'image')

                th_p_lower = cv.getTrackbarPos('th_p_lower', 'image')
                th_p_upper = cv.getTrackbarPos('th_p_upper', 'image')

                cv.imshow('gray', gray)
                cv.imshow('image', self.image)
                cv.imshow('pre_process_bgr', pre_process_bgr)

                k = cv.waitKey(1) & 0xff
                if k == ord('q'):
                    break
                histr = cv.calcHist([gray], [0], None, [256], [0, 256])
                plt.plot(histr, color='blue')

                plt.pause(0.00001)
                plt.clf()

            current_p_lower = self.stat.get_percentile(gray, p_lower)
            current_p_upper = self.stat.get_percentile(gray, p_upper)

            print('==' * 20)
            print(th_p_lower, th_p_upper, ev)
            print(current_p_lower, current_p_upper, ev)

            if current_p_lower < th_p_lower:
                self.set_param(ev + 0.04)
            if current_p_upper > th_p_upper:
                self.set_param(ev - 0.04)

            previous_ev = current_ev

        if self.debug:
            plt.close()
            cv.destroyAllWindows()
Exemple #34
0
        approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)

        x, y, w, h = cv2.boundingRect(approx)

        cv2.rectangle(imageContours, (x, y), (x + w, y + h), (0, 255, 0), 1)

        cv2.putText(imageContours, 'Area: ' + str(area), (x, y + h + 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1)
        cv2.putText(imageContours, 'Perimeter: ' + str(perimeter),
                    (x, y + h + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                    (0, 255, 0), 1)
        cv2.putText(imageContours, 'Circularity: ' + str(circularity),
                    (x, y + h + 60), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                    (0, 255, 0), 1)


image = cv2.imread('hex.png')
imgContour = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

threshold1 = cv2.getTrackbarPos('Threshold1', 'Parameters')
threshold2 = cv2.getTrackbarPos('Threshold2', 'Parameters')

image = cv2.Canny(image, threshold1, threshold2)

getContours(image, imgContour)

cv2.imshow('imgContour', imgContour)

cv2.waitKey()
cv2.destroyAllWindows()
# -- barra l1
cv.createTrackbar('L1', "Imagem_final", 200, image.shape[0], nothing)

# -- barra l2
cv.createTrackbar('L2', "Imagem_final", 400, image.shape[0], nothing)

# -- barra d
cv.createTrackbar('d', "Imagem_final", 10, 100, nothing)

# -- Cria array para ponderação
x = np.arange(-image.shape[0]/2, (image.shape[0]/2), dtype=np.float32)

while(1):
    # -- Leitura dos valores das Trackbar's
    l1 = cv.getTrackbarPos('L1', "Imagem_final") - image.shape[0]/2
    l2 = cv.getTrackbarPos('L2', "Imagem_final") - image.shape[0]/2
    d = cv.getTrackbarPos('d', "Imagem_final")

    alfa = (np.tanh((x - l1) / d) - np.tanh((x - l2) / d)) / 2

    # -- Cria imagem alfa e alfa_invertido com os pesos
    alfa_image = np.float32(image.copy())

    for i in range(0, image.shape[0]):
        alfa_image[i, :] = alfa[i]

    inv_alfa_image = 1 - alfa_image

    # -- Borramento da imagem original
Exemple #36
0
    curr_max = mimax['MAX']

    if first:
        my_min_global = 326 + img_min
        my_max_global = 580 + img_min
        first = False
    if last_i != i:
        #print(i, filenames[i])
        cv2.destroyWindow('options')
        cv2.namedWindow('options')
        cv2.createTrackbar('min', 'options', 0, img_max - img_min, nothing)
        cv2.createTrackbar('max', 'options', img_max - img_min,
                           img_max - img_min, nothing)
        cv2.setTrackbarPos('min', 'options', my_min_global - img_min)
        cv2.setTrackbarPos('max', 'options', my_max_global - img_min)
    my_min = cv2.getTrackbarPos('min', 'options')
    my_max = cv2.getTrackbarPos('max', 'options')
    my_min_global = curr_min
    my_max_global = curr_max

    # for i in range(img_min,img_max,0.01*(img-max-img_min)):
    # 	my_min=i
    # 	my_max=i
    #print(my_min_global, my_max_global)

    img = np.maximum(img, my_min + img_min)
    img = np.minimum(img, my_max + img_min)
    img = (img - np.min(img)) / (np.max(img) - np.min(img))

    # img=anisodiff(img)
    def find_hsv_values(self):
        # optional argument for trackbars

        # named ites for easy reference
        barsWindow = 'Bars'
        hl = 'H Low'
        hh = 'H High'
        sl = 'S Low'
        sh = 'S High'
        vl = 'V Low'
        vh = 'V High'

        # set up for video capture on camera 0

        # create window for the slidebars
        cv2.namedWindow(barsWindow, flags=cv2.WINDOW_AUTOSIZE)

        # create the sliders
        cv2.createTrackbar(hl, barsWindow, 0, 179, nothing)
        cv2.createTrackbar(hh, barsWindow, 0, 179, nothing)
        cv2.createTrackbar(sl, barsWindow, 0, 255, nothing)
        cv2.createTrackbar(sh, barsWindow, 0, 255, nothing)
        cv2.createTrackbar(vl, barsWindow, 0, 255, nothing)
        cv2.createTrackbar(vh, barsWindow, 0, 255, nothing)

        # set initial values for sliders
        cv2.setTrackbarPos(hl, barsWindow, 0)
        cv2.setTrackbarPos(hh, barsWindow, 179)
        cv2.setTrackbarPos(sl, barsWindow, 0)
        cv2.setTrackbarPos(sh, barsWindow, 255)
        cv2.setTrackbarPos(vl, barsWindow, 0)
        cv2.setTrackbarPos(vh, barsWindow, 255)

        while (True):
            frame = self.cv_image
            frame = cv2.GaussianBlur(frame, (5, 5), 0)

            # convert to HSV from BGR
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            # read trackbar positions for all
            hul = cv2.getTrackbarPos(hl, barsWindow)
            huh = cv2.getTrackbarPos(hh, barsWindow)
            sal = cv2.getTrackbarPos(sl, barsWindow)
            sah = cv2.getTrackbarPos(sh, barsWindow)
            val = cv2.getTrackbarPos(vl, barsWindow)
            vah = cv2.getTrackbarPos(vh, barsWindow)

            # make array for final values
            HSVLOW = np.array([hul, sal, val])
            HSVHIGH = np.array([huh, sah, vah])

            # apply the range on a mask
            mask = cv2.inRange(hsv, HSVLOW, HSVHIGH)
            maskedFrame = cv2.bitwise_and(frame, frame, mask=mask)

            # display the camera and masked images
            cv2.imshow('Masked', maskedFrame)
            cv2.imshow('Camera', frame)

            # check for q to quit program with 5ms delay
            if cv2.waitKey(5) & 0xFF == ord('q'):
                break

        # clean up our resources
        cv2.destroyAllWindows()
Exemple #38
0
def nothing(x):
    pass


cap = cv2.VideoCapture(0)

face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

cv2.namedWindow("Frame")
cv2.createTrackbar("Neighbours", "Frame", 5, 20, nothing)

while True:
    _, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    neighbours = cv2.getTrackbarPos("Neighbours", "Frame")

    faces = face_cascade.detectMultiScale(gray, 1.3, neighbours)
    for rect in faces:
        (x, y, w, h) = rect
        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    cv2.imshow("Frame", frame)

    key = cv2.waitKey(1)
    if key == 27:
        break

cap.release()
cv2.destroyAllWindows()
Exemple #39
0
cv2.createTrackbar('r_e', 'panel2', 400, 655, fun)
cv2.createTrackbar('c_s', 'panel2', 0, 655, fun)
cv2.createTrackbar('c_e', 'panel2', 400, 655, fun)
cap = cv2.VideoCapture(0)
mov = cv2.VideoCapture(
    'C:\\Users\\tusha\\Desktop\\Predestination (2014) - Polygon Movies.mp4')
st = 1
while True:
    _, img = cap.read()
    _, back = mov.read()
    if st == 1:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('C:\\Users\\tusha\\Desktop\\video.avi', fourcc,
                              20.0, (img.shape[1], img.shape[0]))
    st = 9999
    rs = cv2.getTrackbarPos('r_s', 'panel2')
    re = cv2.getTrackbarPos('r_e', 'panel2')
    cs = cv2.getTrackbarPos('c_s', 'panel2')
    ce = cv2.getTrackbarPos('c_e', 'panel2')
    img = img[rs:re, cs:ce]
    new_back = back[rs:re, cs:ce]
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    l_h = cv2.getTrackbarPos('l_h', 'panel')
    l_s = cv2.getTrackbarPos('l_s', 'panel')
    l_v = cv2.getTrackbarPos('l_v', 'panel')
    h_h = cv2.getTrackbarPos('h_h', 'panel')
    h_s = cv2.getTrackbarPos('h_s', 'panel')
    h_v = cv2.getTrackbarPos('h_v', 'panel')

    lower = (l_h, l_s, l_v)
    upper = (h_h, h_s, h_v)
Exemple #40
0
def calibrate():

    #cam = cv2.VideoCapture(1)

    global imCalRGB
    global new_image
    global image_proc_img
    global imCalRGBorig
    global intersectp
    global center_dartboard
    global points

    #imCalRGB = cv2.imread("/Users/Hannes/Desktop/Darts/Dartboard_2.png")
    #imCalRGB = cv2.imread("frame1.jpg")
    #success,imCalRGB = cam.read() #cam
    cv2.imwrite("frame1.jpg", imCalRGB)  # save calibration frame

    global calibrationComplete
    calibrationComplete = False

    while calibrationComplete == False:
        #Read calibration file, if exists
        if os.path.isfile("calibrationData.pkl"):
            try:
                # ToDo: adapt system to automatic calibration data
                #start a fresh set of points
                points = []

                calFile = open('calibrationData.pkl', 'rb')
                calData = CalibrationData()
                calData = pickle.load(calFile)
                #load the data into the global variables
                transformation_matrix = calData.transformationMatrix
                center_dartboard = calData.center_dartboard
                ring_radius = []
                ring_radius.append(calData.ring_radius[0])
                ring_radius.append(calData.ring_radius[1])
                ring_radius.append(calData.ring_radius[2])
                ring_radius.append(calData.ring_radius[3])
                ring_radius.append(calData.ring_radius[4])
                ring_radius.append(
                    calData.ring_radius[5])  #append the 6 ring radii
                #close the file once we are done reading the data
                calFile.close()

                #copy image for old calibration data
                new_image = imCalRGB.copy()

                #now draw them out:
                height, width = imCalRGB.shape[:2]

                # get a fresh new image
                new_image = imCalRGB.copy()

                heightnew, widthnew = imCalRGB.shape[:2]

                new_image = cv2.warpPerspective(imCalRGBorig,
                                                transformation_matrix,
                                                (800, 800))
                # cv.WarpPerspective(imCalRGB,new_image,mapping)
                cv2.imshow(winName4, new_image)

                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[0], (0, 255, 0), 1)  # outside double
                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[1], (0, 255, 0), 1)  # inside double
                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[2], (0, 255, 0), 1)  # outside treble
                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[3], (0, 255, 0), 1)  # inside treble
                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[4], (0, 255, 0), 1)  # 25
                cv2.circle(
                    new_image,
                    (int(center_dartboard[0]), int(center_dartboard[1])),
                    ring_radius[5], (0, 255, 0), 1)  # Bulls eye

                # 20 sectors...
                sectorangle = 2 * math.pi / 20
                i = 0
                while (i < 20):
                    cv2.line(
                        new_image,
                        (int(center_dartboard[0]), int(center_dartboard[1])),
                        (int(center_dartboard[0] +
                             170 * 2 * math.cos((0.5 + i) * sectorangle)),
                         int(center_dartboard[1] +
                             170 * 2 * math.sin((0.5 + i) * sectorangle))),
                        (0, 255, 0), 1)
                    i = i + 1

                cv2.imshow(winName4, new_image)

                test = cv2.waitKey(0)
                if test == 13:
                    cv2.destroyAllWindows()
                    #we are good with the previous calibration data
                    calibrationComplete = True
                else:
                    cv2.destroyAllWindows()
                    calibrationComplete = True
                    #delete the calibration file and start over
                    os.remove("calibrationData.pkl")

            #corrupted file
            except EOFError as err:
                print err

        else:
            # ToDo: remove manual calibration and adapt system to automatic calibration data
            # create new image for imageprocessing
            # image_proc_img = new_image.copy()
            image_proc_img = imCalRGB.copy()
            # call image processing function
            imagproccalib()

            height, width = imCalRGB.shape[:2]

            new_center = (400, 400)

            # raw_loc_mat = np.zeros((height, width))
            if DEBUG:
                #cv2.namedWindow('image')
                cv2.namedWindow('image', cv2.WINDOW_NORMAL)
                # create trackbars for color change
                cv2.createTrackbar('cx', 'image', 0, 20, nothing)
                cv2.createTrackbar('cy', 'image', 0, 20, nothing)

                cv2.createTrackbar('tx1', 'image', 0, 20, nothing)
                cv2.createTrackbar('ty1', 'image', 0, 20, nothing)

                cv2.createTrackbar('tx2', 'image', 0, 20, nothing)
                cv2.createTrackbar('ty2', 'image', 0, 20, nothing)

                cv2.createTrackbar('tx3', 'image', 0, 20, nothing)
                cv2.createTrackbar('ty3', 'image', 0, 20, nothing)

                cv2.createTrackbar('tx4', 'image', 0, 20, nothing)
                cv2.createTrackbar('ty4', 'image', 0, 20, nothing)

                cv2.setTrackbarPos('cx', 'image', 10)
                cv2.setTrackbarPos('cy', 'image', 10)

                cv2.setTrackbarPos('tx1', 'image', 10)
                cv2.setTrackbarPos('ty1', 'image', 10)

                cv2.setTrackbarPos('tx2', 'image', 10)
                cv2.setTrackbarPos('ty2', 'image', 10)

                cv2.setTrackbarPos('tx3', 'image', 10)
                cv2.setTrackbarPos('ty3', 'image', 10)

                cv2.setTrackbarPos('tx4', 'image', 10)
                cv2.setTrackbarPos('ty4', 'image', 10)

                # create switch for ON/OFF functionality
                switch = '0 : OFF \n1 : ON'
                cv2.createTrackbar(switch, 'image', 0, 1, nothing)

                while (1):
                    cv2.imshow('image', new_image)
                    k = cv2.waitKey(1) & 0xFF
                    if k == 27:
                        break

                    # get current positions of four trackbars
                    cx = cv2.getTrackbarPos('cx', 'image') - 10
                    cy = cv2.getTrackbarPos('cy', 'image') - 10
                    tx1 = cv2.getTrackbarPos('tx1', 'image') - 10
                    ty1 = cv2.getTrackbarPos('ty1', 'image') - 10
                    tx2 = cv2.getTrackbarPos('tx2', 'image') - 10
                    ty2 = cv2.getTrackbarPos('ty2', 'image') - 10
                    tx3 = cv2.getTrackbarPos('tx3', 'image') - 10
                    ty3 = cv2.getTrackbarPos('ty3', 'image') - 10
                    tx4 = cv2.getTrackbarPos('tx4', 'image') - 10
                    ty4 = cv2.getTrackbarPos('ty4', 'image') - 10
                    s = cv2.getTrackbarPos(switch, 'image')

                    if s == 0:
                        new_image[:] = 0
                    else:
                        # transform the image to form a perfect circle
                        transformation_matrix = transformation(
                            new_center, tx1, ty1, tx2, ty2, tx3, ty3, tx4, ty4)

            else:
                transformation_matrix = transformation(new_center, 3, -1, 4,
                                                       -3, 0, 0, 1, 5)

            cv2.destroyAllWindows()

            print "The dartboard image has now been normalized."
            print ""

            cv2.imshow(winName4, new_image)
            cv2.setMouseCallback(winName4, on_mouse_new)
            test = cv2.waitKey(0)
            if test == 13:
                cv2.destroyWindow(winName4)
                cv2.destroyAllWindows()

            ## sectors are sometimes different -> make accessible
            ring_radius = [7 * 2, 16 * 2, 97 * 2, 107 * 2, 160 * 2, 170 * 2]

            # time.sleep(5)
            # cv2.destroyWindow(winName)
            #save valuable calibration data into a structure
            calData = CalibrationData()
            calData.transformationMatrix = transformation_matrix
            calData.center_dartboard = new_center
            calData.ring_radius = ring_radius

            #write the calibration data to a file
            calFile = open("calibrationData.pkl", "wb")
            pickle.dump(calData, calFile, 0)
            calFile.close()

            calibrationComplete = True

    cv2.destroyAllWindows()
#create trackbar, called H, in window image, w/ range (0,255), pass nothing
    #create sample color boxes
tL=(10,10)
bR=(wd/2-10,ht-10)

tL2=(wd/2+10,10)
bR2=(wd-10,ht-10)


while(1):

	#controls setup / plotting
	cv2.imshow('image',canvas)
		#track set 1 - lower
	h_L = cv2.getTrackbarPos('H lower','image')
	s_L = cv2.getTrackbarPos('S lower','image')
	v_L = cv2.getTrackbarPos('V lower','image')    
	hsvL=np.uint8([[[h_L,s_L,v_L]]])   #trans to BGR
	bgrL=cv2.cvtColor(hsvL,cv2.COLOR_HSV2BGR)
	b_L=int(bgrL[0][0][0])
	g_L=int(bgrL[0][0][1])
	r_L=int(bgrL[0][0][2])
		#track set 1 - upper
	h_U = cv2.getTrackbarPos('H upper','image')
	s_U = cv2.getTrackbarPos('S upper','image')
	v_U = cv2.getTrackbarPos('V upper','image')    
	hsvU=np.uint8([[[h_U,s_U,v_U]]])   #trans to BGR
	bgrU=cv2.cvtColor(hsvU,cv2.COLOR_HSV2BGR)
	b_U=int(bgrU[0][0][0])
	g_U=int(bgrU[0][0][1])
Exemple #42
0
cv2.namedWindow('image')
cap = cv2.VideoCapture(0)
ret = cap.set(3, 320)
ret = cap.set(4, 240)
lines = [[[1, 2, 100, 200]]]
cv2.createTrackbar('R', 'image', 427, 1000, nothing)
cv2.createTrackbar('G', 'image', 65, 1000, nothing)
cv2.createTrackbar('B', 'image', 20, 50, nothing)
cv2.createTrackbar('T', 'image', 17, 50, nothing)
cv2.createTrackbar('U', 'image', 2, 10, nothing)
while (1):
    minLineLength = 100
    maxLineGap = 10

    r = cv2.getTrackbarPos('R', 'image')
    g = cv2.getTrackbarPos('G', 'image')
    b = cv2.getTrackbarPos('B', 'image')
    t = cv2.getTrackbarPos('T', 'image')
    u = cv2.getTrackbarPos('U', 'image')

    # Take each frame
    _, frame = cap.read()
    #    frame = cv2.imread('gradient2.jpg')
    # Convert BGR to HSV
    #    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #    th3 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
    gray = cv2.GaussianBlur(gray, (3, 3), 0)
    equ = cv2.equalizeHist(gray)
Exemple #43
0
    # black_image1 = cv2.dilate(black_image1,Kernel_morp_use,iterations = iterations)
    cv2.imshow("image_Kuy", black_image1)
    # print(hie)
    add_point_to_list(hie.tolist()[0])
    # print(list_contours_box_symbol)
    # print("Kuy")
    print(len(contour1))

    contour_use = []
    list_contours_path = []
    list_contours_box_symbol = []
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        break
    Kernel_blur = (
        (cv2.getTrackbarPos("Kernel_blur", "image_blur") + 1) * 2) - 1

    Canny_Thres_1 = cv2.getTrackbarPos("Canny_Thres_1", "image_edge")
    Canny_Thres_2 = cv2.getTrackbarPos("Canny_Thres_2", "image_edge")

    Kernel_morp = (
        (cv2.getTrackbarPos("Kernel_morp", "image_morp") + 1) * 2) - 1
    Mode_morp = cv2.getTrackbarPos("Mode_morp", "image_morp")
    iterations = cv2.getTrackbarPos("iterations", "image_morp")

    minArea = cv2.getTrackbarPos("minArea", "image_contour")
    maxArea = cv2.getTrackbarPos("maxArea", "image_contour")

    data = {}
    data['Parameter'] = []
    data['Parameter'].append({
	pass
desktop = (1920, 1080) #max resolution
orig = cv2.imread('blackmarble_8mb.jpg') #blackmarble_8mb.jpg
#resized = orig.copy()
resized = cv2.resize(orig, (desktop[0],
							desktop[1]))
cv2.namedWindow('slider') #Make the trackbar used for HSV masking 
cv2.createTrackbar('hue_up','slider',180,180,nothing)
cv2.createTrackbar('hue_low','slider',0,180,nothing)
cv2.createTrackbar('sat_up','slider',255,255,nothing)
cv2.createTrackbar('sat_low','slider',0,255,nothing)
cv2.createTrackbar('bright_up','slider',255,255,nothing)
cv2.createTrackbar('bright_low','slider',0,255,nothing)

while True: 
	hue_low = cv2.getTrackbarPos('hue_low', 'slider') 	
	hue_up = cv2.getTrackbarPos('hue_up', 'slider')
	sat_low = cv2.getTrackbarPos('sat_low', 'slider') 	
	sat_up = cv2.getTrackbarPos('sat_up', 'slider')
	bright_low = cv2.getTrackbarPos('bright_low', 'slider') 	
	bright_up = cv2.getTrackbarPos('bright_up', 'slider')
	lower = np.array([hue_low, sat_low, bright_low])
	upper = np.array([hue_up, sat_up, bright_up])

	mask = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV) #convert to hsv
	mask = cv2.inRange(mask, lower, upper) #threshold 
	overlay = cv2.bitwise_and(resized, resized, mask=mask) #replace dark pixels
	#hsv = cv2.bitwise_and(hsv, hsv, mask=mask) 
	
	cv2.imshow('black_marble', overlay)
	cv2.imshow('mask', mask)
Exemple #45
0
    # 1.fast Fourier transform
    rows, cols = img_gray.shape[:2]
    img_fft = stdFftImage(img_gray, rows, cols)
    amplitude, _ = graySpectrum(img_fft)
    minValue, maxValue, minLoc, maxLoc = cv2.minMaxLoc(
        amplitude
    )  # The maximum value of the spectrum after centralization is at the center of the image

    cv2.namedWindow("tracks")
    max_radius = np.sqrt(pow(rows, 2) + pow(cols, 2))
    cv2.createTrackbar("Radius", "tracks", 0, int(max_radius), nothing)
    cv2.createTrackbar("Filter type", "tracks", 0, 2, nothing)

    while True:
        # 2.Construction of high pass filter
        radius = cv2.getTrackbarPos("Radius", "tracks")
        lpType = cv2.getTrackbarPos("Filter type", "tracks")
        nrows, ncols = img_fft.shape[:2]
        # x, y = int(ncols / 2), int(nrows / 2)  # Notice here are the coordinates
        # ilpFilter = createHPFilter(img_fft.shape, (x, y), radius, lpType)
        ilpFilter = createHPFilter(img_fft.shape, maxLoc, radius, lpType)

        # 3.High pass filter
        img_filter = ilpFilter * img_fft

        _, gray_spectrum = graySpectrum(
            img_filter)  # Observe the change of the filter

        # 4. Inverse Fourier transform, and take the real part for cutting, And decentralize
        img_ift = cv2.dft(img_filter,
                          flags=cv2.DFT_INVERSE + cv2.DFT_REAL_OUTPUT +
Exemple #46
0
            ord('a'): 'prev_frame',
            ord('A'): 'prev_frame',
            ord('d'): 'next_frame',
            ord('D'): 'next_frame',
            ord('q'): 'slow',
            ord('Q'): 'slow',
            ord('e'): 'fast',
            ord('E'): 'fast',
            ord('c'): 'snap',
            ord('C'): 'snap',
            -1: status,
            27: 'exit'
        }[cv2.waitKey(10)]

        if status == 'play':
            frame_rate = cv2.getTrackbarPos('F', 'image')
            sleep((0.1 - frame_rate / 1000.0)**21021)
            i += 1
            cv2.setTrackbarPos('S', 'image', i)
            continue
        if status == 'stay':
            i = cv2.getTrackbarPos('S', 'image')
        if status == 'exit':
            break
        if status == 'prev_frame':
            i -= 1
            cv2.setTrackbarPos('S', 'image', i)
            status = 'stay'
        if status == 'next_frame':
            i += 1
            cv2.setTrackbarPos('S', 'image', i)
}

while True:

    ret, frame = cap.read()
    #frame = cv2.GaussianBlur(frame, (5,5), 3)
    # drawing the corner points of the frame
    for i in corners:
        cv2.circle(frame, corners[i], 5, (0, 0, 255), -1)

    # convert the image to HSV to allow InRange to work on it
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # prep the taskbars and get the values from them by manual
    # adjustment
    lh = cv2.getTrackbarPos("L-H", "Trackbars")
    ls = cv2.getTrackbarPos("L-S", "Trackbars")
    lv = cv2.getTrackbarPos("L-V", "Trackbars")
    uh = cv2.getTrackbarPos("U-H", "Trackbars")
    us = cv2.getTrackbarPos("U-S", "Trackbars")
    uv = cv2.getTrackbarPos("U-V", "Trackbars")

    # Thresholding the image to obtain the mask
    mask = cv2.inRange(hsv, np.array([lh, ls, lv]), np.array([uh, us, uv]))
    #cv2.imshow('Mask', mask)

    # performing bitwise AND operation to layer mask over frame
    #final = cv2.bitwise_and(frame, frame, mask = mask)
    #cv2.imshow('Final', final)

    # utilise the mask to get contours and plot them on frame
Exemple #48
0
    waitTime = 330
else:
    img = cv2.imread(sys.argv[1])
    output = img
    waitTime = 33

while (1):

    if useCamera:
        # Capture frame-by-frame
        img = cv2.imread('C:/Users/hung phung/Desktop/Captureeeeeee.png')
        img = imutils.resize(img, width=600)
        output = img

    # get current positions of all trackbars
    hMin = cv2.getTrackbarPos('HMin', 'image')
    sMin = cv2.getTrackbarPos('SMin', 'image')
    vMin = cv2.getTrackbarPos('VMin', 'image')

    hMax = cv2.getTrackbarPos('HMax', 'image')
    sMax = cv2.getTrackbarPos('SMax', 'image')
    vMax = cv2.getTrackbarPos('VMax', 'image')

    # Set minimum and max HSV values to display
    lower = np.array([hMin, sMin, vMin])
    upper = np.array([hMax, sMax, vMax])

    # Create HSV Image and threshold into a range.
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, lower, upper)
    output = cv2.bitwise_and(img, img, mask=mask)
# 사용자가 색상을 조절하여 쉽게 추출할 수 있는 트래킹 창 만들기
cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)

while True:
    #frame = cv2.imread('./img/smarties.png')
    _, frame = cap.read()

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # BGR 이미지를 HSV로 변경

    l_h = cv2.getTrackbarPos("LH", "Tracking")
    l_s = cv2.getTrackbarPos("LS", "Tracking")
    l_v = cv2.getTrackbarPos("LV", "Tracking")

    u_h = cv2.getTrackbarPos("UH", "Tracking")
    u_s = cv2.getTrackbarPos("US", "Tracking")
    u_v = cv2.getTrackbarPos("UV", "Tracking")

    l_b = np.array([l_h,l_s,l_v])
    u_b = np.array([u_h,u_s,u_v])

    mask = cv2.inRange(hsv, l_b, u_b) # hsv에서 해당 색상 영역만 캐치

    res = cv2.bitwise_and(frame, frame, mask=mask) # frame에서 마스크로 영역 설정한 부분만 캐치

    cv2.imshow("frame", frame)
Exemple #50
0
    pass


cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.resizeWindow('image', 500,300)

# Create trackbars for color change
cv2.createTrackbar('Hue', 'image', 0, 180, nothing)

cap = cv2.VideoCapture(0)
kernel = np.ones((5, 5), np.uint8)
while (True):
    k = cv2.waitKey(1 & 0xFF)
    if k == 27:
        break
    hue = cv2.getTrackbarPos('Hue', 'image')
    # Capture frame-by-frame
    ret, frame = cap.read()
    mask = color_pick(hue, frame)
    # cv2.imshow('mask', mask)
    opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    # cv2.imshow('opening', opening)
    image, contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_NONE)

    contours.sort(key=cv2.contourArea, reverse=True)
    print(len(contours))
    cv2.putText(frame, 'count =' + str(len(contours)), (0, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
    if len(contours) > 0:
        for i in range(len(contours)):
Exemple #51
0
import numpy as numpy
import cv2


# Callback Funktion für Slider, sie tut nichts
def do_nothing():
    return


img = cv2.imread("Video_1\openCV\Chrysanthemum.jpg")
cv2.imshow("Original", img)

cv2.namedWindow("Processed Image")
cv2.createTrackbar("Brightness", "Processed Image", 0, 255, do_nothing)

# Processing Loop: fragt alle 30 ms den Sliderwert ab
while True:
    brightness = cv2.getTrackbarPos("Brightness", "Processed Image")
    processedImage = cv2.add(img, (brightness, brightness, brightness, 0))
    cv2.imshow("Processed Image", processedImage)

    # Abbruch bei Tastendruck
    if cv2.waitKey(30) != -1:
        break

cv2.destroyAllWindows()
Exemple #52
0
cv2.createTrackbar('VLo','frame1',123,255,nothing)
# upper
cv2.createTrackbar('HUp','frame1',52,179,nothing)
cv2.createTrackbar('SUp','frame1',197,255,nothing)
cv2.createTrackbar('VUp','frame1',255,255,nothing)

cv2.createTrackbar('areaTrackbar','frame1',10000,50000,nothing)

while(1):

	# Take each frame
	_, frame = cap.read()
	frame = frame[0:340, 60:610]

	# get current positions of four trackbars
	hLo = cv2.getTrackbarPos('HLo','frame1')
	sLo = cv2.getTrackbarPos('SLo','frame1')
	vLo = cv2.getTrackbarPos('VLo','frame1')
	hUp = cv2.getTrackbarPos('HUp','frame1')
	sUp = cv2.getTrackbarPos('SUp','frame1')
	vUp = cv2.getTrackbarPos('VUp','frame1')

	areaTrackbar = cv2.getTrackbarPos('areaTrackbar','frame1')



	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	# define range of interested (tuned to pink) color in HSV
	lower = np.array([hLo,sLo,vLo])
	upper = np.array([hUp,sUp,vUp])
Exemple #53
0
cv2.setTrackbarPos(sl, Window, 0)
cv2.setTrackbarPos(sh, Window, 255)
cv2.setTrackbarPos(vl, Window, 0)
cv2.setTrackbarPos(vh, Window, 255)

cap = cv2.VideoCapture(2)

while (cap.isOpened):
    ret, frame = cap.read()
    # cv2.imshow('Original', frame)

    # frame = cv2.GaussianBlur(frame, (5,5), 0)
    frame = cv2.medianBlur(frame, 3)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    hul = cv2.getTrackbarPos(hl, Window)
    huh = cv2.getTrackbarPos(hh, Window)
    sal = cv2.getTrackbarPos(sl, Window)
    sah = cv2.getTrackbarPos(sh, Window)
    val = cv2.getTrackbarPos(vl, Window)
    vah = cv2.getTrackbarPos(vh, Window)

    # HSVLOW = np.array([hul, sal, val])
    # HSVHIGH = np.array([huh, sah, vah])

    # Laptop webcam
    # HSVLOW = np.array([144, 135, 0])
    # HSVHIGH = np.array([179, 255, 255])

    # Logitech webcam
    # HSVLOW = np.array([0, 108, 0])
            rects = find_faces(img2, face_model)

            for rect in rects:
                shape = detect_marks(img2, landmark_model, rect)
                mask = np.zeros(img2.shape[:2], dtype=np.uint8)
                mask, end_points_left = eye_on_mask(mask, left, shape)
                mask, end_points_right = eye_on_mask(mask, right, shape)
                mask = cv2.dilate(mask, kernel, 5)

                eyes = cv2.bitwise_and(img2, img2, mask=mask)
                mask = (eyes == [0, 0, 0]).all(axis=2)
                eyes[mask] = [255, 255, 255]
                mid = (shape[42][0] + shape[39][0]) // 2
                eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
                threshold = cv2.getTrackbarPos('threshold', 'image')
                _, thresh = cv2.threshold(eyes_gray, threshold, 255,
                                          cv2.THRESH_BINARY)
                thresh = process_thresh(thresh)

                eyeball_pos_left = contouring(thresh[:, 0:mid], mid, img2,
                                              end_points_left)
                eyeball_pos_right = contouring(thresh[:, mid:], mid, img2,
                                               end_points_right, True)
                print_eye_pos(img2, eyeball_pos_left, eyeball_pos_right)

            if ang1 >= 48:
                print('Head down')
                # cv2.putText(img2, 'Head down', (30, 30), font, 2, (255, 255, 128), 3)
                speak("Head Down")
                j = int(j)
Exemple #55
0
import sys
import numpy as np
import cv2


def onBlend(x):
    pass


linux = cv2.imread('LinuxLogo.jpg', 1)
windows = cv2.imread('WindowsLogo.jpg', 1)

cv2.namedWindow('image')
cv2.createTrackbar('blend', 'image', 0, 100, onBlend)

while (1):
    alpha = cv2.getTrackbarPos('blend', 'image')
    dst = cv2.addWeighted(linux, alpha / 100.0, windows, 1.0 - (alpha / 100.0),
                          0)
    cv2.imshow('image', dst)

    if cv2.waitKey(27) & 0xFF == 27:
        break

cv2.destroyAllWindows()

def gec(self):
    pass


kamera = cv2.VideoCapture(0)
cv2.namedWindow('Canny', cv2.WINDOW_AUTOSIZE)

cv2.createTrackbar("Lower", "Canny", 20, 255, gec)
cv2.createTrackbar("Upper", "Canny", 25, 255, gec)

cv2.startWindowThread()

while (True):
    lower = cv2.getTrackbarPos("Lower", "Canny")
    upper = cv2.getTrackbarPos("Upper", "Canny")

    ret, goruntu = kamera.read()

    goruntu = cv2.cvtColor(goruntu, cv2.COLOR_BGR2GRAY)

    canny = cv2.Canny(goruntu, lower, upper, L2gradient=True)

    cv2.imshow('Canny', canny)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

kamera.release()
cv2.destroyAllWindows()
# In an infinite loop we repeatedly sends the request 
# according to position of ball in the frame
while True:
    # Reading image from caputre 
    _,image=capture.read()
    
    # Doing gaussian blur 
    image=cv2.GaussianBlur(image,(21,21),0)
    # (21,21) is the size of the gaussian kernel

    # Converting from BGR to LAB 
    lab_image=cv2.cvtColor(image,cv2.COLOR_BGR2LAB)

    # Masking the image to get only the ball
    # Getting lower value
    lower=np.asarray([cv2.getTrackbarPos('Low_L','image'),cv2.getTrackbarPos('Low_A','image'),cv2.getTrackbarPos('Low_B','image')],dtype="uint8")
    upper=np.asarray([255,255,255],dtype="uint8")
    masked_image=cv2.inRange(lab_image,lower,upper)

    # Doing bitwise and to get only the image of the ball
    #output_image=cv2.bitwise_and(image,image,mask=masked_image)

    # Finding contours in the output image
    #gray_image=cv2.cvtColor(output_image,cv2.COLOR_BGR2GRAY)
    contours,heirarcy=cv2.findContours(masked_image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) 

    if len(contours)==0:
        requests.get(url=nodemcu_url+'halt')
        
    # Drawing all contours
    cv2.drawContours(image,contours,-1,(0,255,0),2)
cv2.namedWindow("image")
cv2.createTrackbar("LH","image",0,255,callback)
cv2.createTrackbar("LS","image",0,255,callback)
cv2.createTrackbar("LV","image",0,255,callback)
cv2.createTrackbar("UH","image",255,255,callback)
cv2.createTrackbar("US","image",255,255,callback)
cv2.createTrackbar("UV","image",255,255,callback)
img=cv2.imread("smarties.png")
while(cap.isOpened()):
    ret,frame=cap.read()
    frame1= cv2.flip(frame,1) #Lateral Inversion
    if (ret==True):
        img=cv2.imread("computer_vision.jpg")
        img=cv2.resize(img,(640,480))
  #  hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
        lh=cv2.getTrackbarPos("LH","image")
        ls=cv2.getTrackbarPos("LS","image")
        lv=cv2.getTrackbarPos("LV","image")
        uh=cv2.getTrackbarPos("UH","image")
        us=cv2.getTrackbarPos("US","image")
        uv=cv2.getTrackbarPos("UV","image")
        lb=np.array([lh,ls,lv])
        ub=np.array([uh,us,uv])
        mask=cv2.inRange(frame1,lb,ub)
        res=cv2.bitwise_and(frame1,frame1,mask=mask)
        point=np.where(res==0)
        frame[point]=img[point]
        cv2.imshow("img",img)
        cv2.imshow("res",res)
        cv2.imshow("mask",mask)
        cv2.imshow("frame",frame1)
                    cnt += 1
                    cv2.circle(drawing, far, 8, [211, 84, 0], -1)
            return True, cnt
    return False, 0


# Camera
camera = cv2.VideoCapture(0)
camera.set(10,200)
cv2.namedWindow('trackbar')
cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)


while camera.isOpened():
    ret, frame = camera.read()
    threshold = cv2.getTrackbarPos('trh1', 'trackbar')
    frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter
    frame = cv2.flip(frame, 1)  # flip the frame horizontally
    cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                 (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
    cv2.imshow('original', frame)

    #  Main operation
    if isBgCaptured == 1:  # this part wont run until background captured
        img = removeBG(frame)
        img = img[0:int(cap_region_y_end * frame.shape[0]),
                    int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]  # clip the ROI
        cv2.imshow('mask', img)

        # convert the image into binary image
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
Exemple #60
0
    cv2.namedWindow("img_hsv", cv2.CV_WINDOW_AUTOSIZE)
    cv2.namedWindow("img_bin", cv2.CV_WINDOW_AUTOSIZE)

    # create the trackbars for img_bin window
    cv2.createTrackbar("threshold", "img_bin", 155, 255, onThresholdChange)
    cv2.createTrackbar("invert", "img_bin", 0, 1, onInvertChange)

    while 1:
        s, img_raw = cam.read()
        if s:
            img_gray = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)
            cv2.rectangle(img_raw, (10, 10), (200, 200), (0, 255, 0))

            img_hsv = cv2.cvtColor(img_raw, cv2.COLOR_BGR2HSV)

            threshold = cv2.getTrackbarPos("threshold", "img_bin")
            invert = cv2.getTrackbarPos("invert", "img_bin")

            if invert:
                s, img_bin = cv2.threshold(img_gray, threshold, 255,
                                           cv2.THRESH_BINARY_INV)
            else:
                s, img_bin = cv2.threshold(img_gray, threshold, 255,
                                           cv2.THRESH_BINARY)

            cv2.imshow("img_raw", img_raw)
            cv2.imshow("img_gray", img_gray)
            cv2.imshow("img_hsv", img_hsv)
            if s:
                cv2.imshow("img_bin", img_bin)
            else: