예제 #1
0
 def visualize(self):
     cv2.namedWindow('Incisor Segmentation', cv2.WINDOW_NORMAL)
     i = np.asarray(self.image[:,:]).copy()
     for pIdx, p in enumerate(self.shape.points):
         cv2.circle(i, (int(p.x.real), int(p.y.real)), 2, (255,0,0), -1)
     cv2.imshow('Incisor Segmentation', i)
     cv2.waitKey(0)
예제 #2
0
def colour_picker(colourSTR ="(unspecified)", colourGrabWindowSize = 5, colourHueWindowSize = 40, colourSaturationWindowSize= 40, colourValueWindowSize =40):
    global cam
    global hsv

    print "Right click the ",colourSTR," blob. Hit escape when done."
    cv2.namedWindow("image") 
    cv2.setMouseCallback("image", mouseCallBack, param=None)
    try:
        while True:
            #Get image from webcam and convert to greyscale
            ret, img = cam.read()
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            
            
            cv2.imshow("image", img)        
            
            cMinY = max(0, rightButtonY - colourGrabWindowSize)
            cMaxY = min(len(hsv) - 1, rightButtonY + colourGrabWindowSize)
            cMinX = max(0, rightButtonX - colourGrabWindowSize)
            cMaxX = min(len(hsv[0]) - 1, rightButtonX + colourGrabWindowSize)          
            
            cHue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 0]))
                        
            
            cSaturation = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 1]))
            cValue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 2]))
            
            #Sleep infinite loop for ~10ms
            #Exit if user presses <Esc>
            if cv2.waitKey(10) == 27:
                break
    
    finally:
        cv2.destroyWindow("image")
        return np.array(cHue, cSaturation, cValue)
예제 #3
0
    def start(self):
        # in case the cv library is not available must return
        # immediately in order to avoid any problems (required)
        if not cv2: return

        # retrieves the reference to the first video device
        # present in the current system, this is going to be
        # used for the capture of the image and delta calculus
        self.camera = cv2.VideoCapture(0)

        # creates both windows that are going to be used in the
        # display of the current results,
        cv2.namedWindow(self.win_image, cv2.CV_WINDOW_AUTOSIZE)
        cv2.namedWindow(self.win_delta, cv2.CV_WINDOW_AUTOSIZE)

        # sets the initial previous image as an invalid image as
        # there's no initial image when the loop starts
        self.previous = None

        # iterates continuously for the running of the main loop
        # of the current program (this is the normal behavior)
        while True:
            result = self.tick()
            if not result: break
            key = cv2.waitKey(10)
            if key == 27: break

        # destroys the currently displayed windows on the screen
        # so that they can no longer be used in the current screen
        cv2.destroyWindow(self.win_image)
        cv2.destroyWindow(self.win_delta)
예제 #4
0
    def __init__(self):
        """Initialize"""
        """set the number of torpedo's that should be on the board"""
        self.numTopedos = 5
        """set the number of mines"""
        self.numMines = 3
        """Initialize PyGame"""
        pygame.init()
        """Set the window Size"""
        infoObject = pygame.display.Info()
        self.width = infoObject.current_w
        self.height = infoObject.current_h
        """self.width = 1600
        self.height = 900"""

        """Create the Screen"""
        self.screen = pygame.display.set_mode((self.width, self.height))#, pygame.FULLSCREEN
        
        #camera rectangle values
        self.rectangleReady= False
        self.corner1Selected= False
        self.rectangle= None
        self.cap = cv2.VideoCapture(0)
        cv2.namedWindow('frame',1)
        cv2.setMouseCallback('frame', self.onmouse)
예제 #5
0
	def run(self):
		runFlag = True
		cv2.namedWindow("TurtleCam 9000", 1)
		while(runFlag):
			image, timesImageServed = self.robot.getImage()
			with self.lock:
				if timesImageServed > 30:
					if self.stalled == False:
						print "Camera Stalled!"
					self.stalled = True
				else:
					self.stalled = False


			frame = self.mcs.update(image.copy())
			cv2.imshow("TurtleCam 9000", frame)

			code = chr(cv2.waitKey(10) & 255)

			if code == 't':
				cv2.imwrite("/home/macalester/catkin_ws/src/speedy_nav/res/captures/cap-" + str(datetime.now()) + ".jpg", image)
				print "Image saved!"
			if code == 'q':
				break

			with self.lock:
				runFlag = self.runFlag
예제 #6
0
파일: agcv.py 프로젝트: trevstanhope/agcv
 def flash_window(self, img, title=''):
     cv2.namedWindow(title, cv2.WINDOW_NORMAL)
     if self.config['FULLSCREEN']: cv2.setWindowProperty(title, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
     cv2.imshow(title, img)
     if cv2.waitKey(5) == 0:
         time.sleep(0.05)            
         pass
예제 #7
0
파일: CannyStill.py 프로젝트: jhli973/spark
def main():
    imgOriginal = cv2.imread(r'C:\Users\dbsnail\ImageFolder\images.jpg')               # open image

    if imgOriginal is None:                             # if image was not read successfully
        print "error: image not read from file \n\n"        # print error message to std out
        os.system("pause")                                  # pause so user can see error message
        return                                              # and exit function (which exits program)
    
    imgGrayscale = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY)        # convert to grayscale

    imgBlurred = cv2.GaussianBlur(imgGrayscale, (5, 5), 0)              # blur
    
    imgCanny = cv2.Canny(imgBlurred, 100, 200)                          # get Canny edges

    cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE)        # create windows, use WINDOW_AUTOSIZE for a fixed window size
    cv2.namedWindow("imgCanny", cv2.WINDOW_AUTOSIZE)           # or use WINDOW_NORMAL to allow window resizing

    cv2.imshow("imgOriginal", imgOriginal)         # show windows
    cv2.imshow("imgCanny", imgCanny)

    cv2.waitKey()                               # hold windows open until user presses a key

    cv2.destroyAllWindows()                     # remove windows from memory

    return
예제 #8
0
 def display( self ):
     '''显示图片(须先analyze)'''
     if (self.test_image == None):
         raise Exception('The image is not tested')
     cv2.namedWindow("Image")   
     cv2.imshow("Image", self.test_image)   
     cv2.waitKey (0)  
예제 #9
0
def camactivate ():

	with picamera.PiCamera() as camera:
		camera.resolution = (512,512)
		time.sleep(2)
		camera.capture('im1.jpg')
		time.sleep(2)
		camera.capture('im2.jpg')
		time.sleep(2)
		camera.capture('im3.jpg')
		time.sleep(2)
		camera.capture('im4.jpg')

	im1=cv2.imread('im1.jpg',1)
	im2=cv2.imread('im2.jpg',1)
	im3=cv2.imread('im3.jpg',1)
	im4=cv2.imread('im4.jpg',1)

	cv2.putText(im1,'Cam1',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im2,'Cam2',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im3,'Cam3',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im4,'Cam4',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)


	cv2.namedWindow('Catenated Images',cv2.WINDOW_NORMAL)
	concat=np.zeros((1024,1024,3),np.uint8)
	concat[0:512,0:512,:]=im1
	concat[0:512,512:1024,:]=im2
	concat[512:1024,0:512,:]=im3
	concat[512:1024,512:1024,:]=im4

	cv2.imshow('Catenated Images',concat)
	cv2.imwrite('concat.jpg',concat)
	cv2.waitKey(0)
예제 #10
0
def position_interpolator(background):
    global positions
    if not isfile(POSITIONS_DUMP_FILENAME):
        def callback(event, x, y, flags, parameters):
            if event == 1: #cv2.EVENT_RBUTTONDOWN:
                positions.append(Coordinate(x, y))
    
        cv2.namedWindow("Interpolator")
        cv2.setMouseCallback("Interpolator", callback)

        while True: 
            cv2.imshow("Interpolator", background.array)
            if cv2.waitKey() & 0xFF == 27:
                break
        cv2.destroyWindow("Interpolator")
        with open(POSITIONS_DUMP_FILENAME, "w") as positions_dump_file:
            pickle.dump(positions, positions_dump_file) 
    else:
        with open(POSITIONS_DUMP_FILENAME, "r") as positions_dump_file:
            positions = pickle.load(positions_dump_file)
        
    
    t = map(lambda i: i * STEP, range(len(positions)))
    x = map(lambda p: p.x, positions)
    y = map(lambda p: p.y, positions)



    f_x = interpolate.interp1d(t, x, kind = "quadratic")
    f_y = interpolate.interp1d(t, y, kind = "quadratic")
    
    return PositionInterpolator(f_x, f_y)
예제 #11
0
파일: objdet.py 프로젝트: adrikayak/Duckter
    def __init__(self, src, threshold = 25, doRecord=True, showWindows=True):
        self.doRecord = doRecord
        self.show = showWindows
        self.frame = None
        
        self.cap = video.create_capture(src)
        self.cap.set(3,1280)
        self.cap.set(4,2316)
        self.ret, self.frame = self.cap.read() #Take a frame to init recorder
        self.frame_rate = self.cap.get(5)
        print self.frame_rate
        self.gray_frame = np.zeros((self.cap.get(3), self.cap.get(4), 1), np.uint8)
        self.average_frame = np.zeros((self.cap.get(3), self.cap.get(4), 3), np.float32)
        self.absdiff_frame = None
        self.previous_frame = None
        
        self.surface = self.cap.get(3) * self.cap.get(4)
        self.currentsurface = 0
        self.currentcontours = None
        self.threshold = threshold
        self.isRecording = False

        self.tracks = []
        self.tracks_dist = []
        self.track_len = 3
        self.frame_idx = 0
        self.detect_interval = 5
        
        # self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font

        self.trigger_time = 0
        if showWindows:
            cv2.namedWindow("Image", cv2.WINDOW_AUTOSIZE)
예제 #12
0
def get_polyline(image,window_name):
    cv2.namedWindow(window_name)
    class GetPoly:
        xys = []        
        done = False
        def callback(self,event, x, y, flags, param):
            if self.done == True:
                pass
            elif event == cv2.EVENT_LBUTTONDOWN:
                self.xys.append((x,y))
            elif event == cv2.EVENT_MBUTTONDOWN:
                self.done = True
    gp = GetPoly()
    cv2.setMouseCallback(window_name,gp.callback)
    print "press middle mouse button or 'c' key to complete the polygon"
    while not gp.done:
        im_copy = image.copy()
        for (x,y) in gp.xys:
            cv2.circle(im_copy,(x,y),2,(0,255,0))
        if len(gp.xys) > 1 and not gp.done:
            cv2.polylines(im_copy,[np.array(gp.xys).astype('int32')],False,(0,255,0),1)
        cv2.imshow(window_name,im_copy)
        key = cv2.waitKey(50)
        if key == ord('c'): gp.done = True
    #cv2.destroyWindow(window_name)
    return gp.xys
    def __init__(self, window_name):
        self.handlers = dict()
        self.window_name = window_name

        cv2.namedWindow(self.window_name)

        cv2.setMouseCallback(self.window_name, self.central_handler)
예제 #14
0
def test_color_block_finder_01():
    '''
    色块识别测试样例1 从图片中读取并且识别
    '''
    # 图片路径
    img_path = "demo-pic.png"
    # 颜色阈值下界(HSV) lower boudnary
    lowerb = (96, 210, 85) 
    # 颜色阈值上界(HSV) upper boundary
    upperb = (114, 255, 231)

    # 读入素材图片 BGR
    img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    # 检查图片是否读取成功
    if img is None:
        print("Error: 请检查图片文件路径")
        exit(1)

    # 识别色块 获取矩形区域数组
    rects = color_block_finder(img, lowerb, upperb)
    # 绘制色块的矩形区域
    canvas = draw_color_block_rect(img, rects)
    # 在HighGUI窗口 展示最终结果
    cv2.namedWindow('result', flags=cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.imshow('result', canvas)

    # 等待任意按键按下
    cv2.waitKey(0)
    # 关闭其他窗口
    cv2.destroyAllWindows()
예제 #15
0
    def run(self):
        rate = rospy.Rate(10)
        done = False
        cv2.namedWindow("kinect_view")
        cv2.setMouseCallback("kinect_view", self.mouse_call)

        while (not rospy.is_shutdown() and not done):

            if self.image is None:
                continue

            image = np.copy(self.image)
            state = self.states[self.state].replace('_', ' ')
            cv2.putText(image, 'Click the {}'.format(self.target_object), (10, self.image.shape[1] - 100), self.font, 1, (255, 100, 80), 2)
            self.draw_corners(image)

            if self.is_done:
                cv2.polylines(image, np.int32([self.corners]), True, (0, 255, 0), 6)
                done = True
                print 'DONE'

            cv2.imshow("kinect_view", image)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
                rate.sleep()

            if done:
                cv2.destroyWindow("kinect_view")
예제 #16
0
    def __init__(self):

        cv2.namedWindow("detected circles", 1)
        cv2.startWindowThread()
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/turtlebot_2/camera/rgb/image_raw",
                                          Image, self.callback)
예제 #17
0
def display_loop(framequeue, quick_catchup, quick_catchup_pixels):
  # Open a window in which to display the images
  display_window_name = "slowjector"
  cv2.namedWindow(display_window_name, cv2.cv.CV_WINDOW_NORMAL)
  last_delta_count = 0
  listq = deque()
  while True:
    sleep(0.001) # Small amount of sleeping for thread-switching
    data = framequeue.get()
    # Source thread will put None if it receives c-C; if this happens, exit the
    # loop and shut off the display.
    if data is None:
      break

    # Frames are pushed onto a queue (FIFO)
    listq.append(data)
    data = listq.popleft()

    # Otherwise, it puts a tuple (delta_count, image)
    delta_count, image = data

    # Draw the image
    cv2.imshow(display_window_name, image)

    # Optionally, catch up to the live feed after seeing some motion stop by
    # popping all images off of the queue.
    if (quick_catchup and
        delta_count <= quick_catchup_pixels and
        last_delta_count > quick_catchup_pixels):
      listq.clear()
    last_delta_count = delta_count

  # Clean up by closing the window used to display images.
  cv2.destroyWindow(display_window_name)
예제 #18
0
def goLiveT():
	cap = cv2.VideoCapture(0)
	cv2.namedWindow('image')
	# create trackbars for color change
	cv2.createTrackbar('Thres','image',0,255,nothing)
	# create switch for ON/OFF functionality
	switch = '0 : OFF \n1 : ON'
	cv2.createTrackbar(switch, 'image',0,1,nothing)
	while (1):

		_, imgOriginal = cap.read()
		cv2.imshow('imgOriginal',imgOriginal)
		filteredImage = rb.clearImage(imgOriginal)

		# get current positions of four trackbars
		binValue = cv2.getTrackbarPos('Thres','image')
		s = cv2.getTrackbarPos(switch,'image')

		k = cv2.waitKey(1) & 0xFF
		if k == 27:
			break

		if s == 0:
			pass 
		else:
			thresImage = rb.doThresHold(filteredImage,binValue)
			cv2.imshow('img', thresImage)

	cv2.destroyAllWindows()
예제 #19
0
파일: input.py 프로젝트: mkovacs/clavius
 def __enter__(self):
     print "entering PrintSink"
     print "  starting cv2 window thread"
     cv2.startWindowThread()
     print '  creating cv2 window "input"'
     cv2.namedWindow("input")
     return self
예제 #20
0
    def __init__(self):
        # self.image_pub = rospy.Publisher("/image_topic_2",Image ,queue_size=1)

        cv2.namedWindow("Image window", 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/kinect2/sd/image_depth_rect", Image, self.callback)
        self.camera_sub = rospy.Subscriber("/kinect2/sd/camera_info", CameraInfo, self.callback)
예제 #21
0
def imageShow(image, title="image", norm=True, wait=1):
    '''
    Display an image in a resizable cv window.  If the image is a numpy
    array, it will be converted to a cv image before display with an
    optional normalization of the image data to the range [0 ... 255].
    Numpy arrays with 3 channels get an RGB to BGR color swap.
    
    This function returns the value from cv.WaitKey(wait); this ensures
    the image is displayed and provides the option of capturing a keystroke
    if one is pressed in the allotted time.  If the wait parameter is
    None, cv.WaitKey() is not called.
    '''
    
    if type(image) == numpy.ndarray:
        # normalize the image data
        if norm:
            image = normalize(image)
        
        # color swap RGB to BGR
        if image.ndim == 3 and image.shape[2] == 3:
            image = image[..., ::-1]
    else:
        # we actually need to go back to numpy for this
        image = cv2array(image)
            
    cv2.namedWindow(title, cv.CV_WINDOW_NORMAL)
    cv2.imshow(title, image.astype(numpy.uint8))
    
    return cv2.waitKey(wait) if wait is not None else None
    def init_window(self):
        cv2.namedWindow(self.window_name)
        max_i, max_j = 0, 0
        if len(self.settings.window_panes) == 0:
            raise ImproperlyConfigured('settings.window_panes is empty.')
        self.panes = OrderedDict()
        for pane_name, pane_dimensions in self.settings.window_panes:
            if len(pane_dimensions) != 4:
                raise ImproperlyConfigured('pane dimensions should be a tuple of length 4, but it is "%s"' % repr(pane_dimensions))
            i_begin, j_begin, i_size, j_size = pane_dimensions
            max_i = max(max_i, i_begin + i_size)
            max_j = max(max_j, j_begin + j_size)
            if pane_name in self.panes:
                raise Exception('Duplicate pane name in settings: %s' % pane_name)
            self.panes[pane_name] = Pane(i_begin, j_begin, i_size, j_size)
        self.buffer_height = max_i
        self.buffer_width = max_j

        self.window_buffer = np.tile(np.array(np.array(self.settings.window_background) * 255, 'uint8'),
                                     (max_i,max_j,1))
        #print 'BUFFER IS:', self.window_buffer.shape, self.window_buffer.min(), self.window_buffer.max()

        for _,pane in self.panes.iteritems():
            pane.data = self.window_buffer[pane.i_begin:pane.i_end, pane.j_begin:pane.j_end]

        # Allocate help pane
        for ll in self.settings.help_pane_loc:
            assert ll >= 0 and ll <= 1, 'help_pane_loc values should be in [0,1]'
        self.help_pane = Pane(int(self.settings.help_pane_loc[0]*max_i),
                              int(self.settings.help_pane_loc[1]*max_j),
                              int(self.settings.help_pane_loc[2]*max_i),
                              int(self.settings.help_pane_loc[3]*max_j))
        self.help_buffer = self.window_buffer.copy() # For rendering help mode
        self.help_pane.data = self.help_buffer[self.help_pane.i_begin:self.help_pane.i_end, self.help_pane.j_begin:self.help_pane.j_end]
def main():
    cv2.namedWindow("setting", cv2.CV_WINDOW_AUTOSIZE)
    cv2.namedWindow("Red", cv2.CV_WINDOW_AUTOSIZE)
    #create a cam that pulls from our cam source
    video = cv2.VideoCapture(0)
    while True:
        #grab a frame
        _, frame = video.read()
        #generate the threshold images
        blueThresh = getThresholdImage(frame, [90,120, 50], [150, 255, 255])
        redThresh = getThresholdImage(frame, [120,100, 60], [255, 255, 255])
        orangeThresh = getThresholdImage(frame, [7,110, 60], [30, 255, 255])
        pinkThresh = getThresholdImage(frame, [110,30, 140], [255, 85, 255])
        
        #generate the contour detection for every color
        contourDetect(blueThresh, frame, (255, 0, 0))
        contourDetect(redThresh, frame, (0, 0, 255))
        contourDetect(orangeThresh, frame, (100, 150, 255))
        contourDetect(pinkThresh, frame, (150, 50, 255))
        
        cv2.imshow("setting", frame)
        cv2.imshow("Blue", blueThresh)
        cv2.imshow("Red", redThresh)
        key = cv2.waitKey(1)
        if key ==  1048603:
            break;
예제 #24
0
def loop(processimg):
	if not use_webcam:
		ctx = freenect.init()
		dev = freenect.open_device(ctx, 0)
		freenect.set_tilt_degs(dev, 10)
		freenect.close_device(dev)
		
	
	cv2.namedWindow('processing')
	for k, v in params.iteritems():
		cv2.createTrackbar(k, 'processing', v, 255, onchange(k))
	
	runonce = True
	while runonce:
		#runonce = False
		if imgpath != "":
			img = cv2.imread(imgpath)
		else:
			img = getimg()

		cv2.imshow('processing', cv2.resize(processimg(img), (width, height)))
		char = cv2.waitKey(10)
		if char == 27:
			break
		elif char == ord('p'):
			for k, v in params.iteritems():
				print "%s: %d" % (k, v)
		#freenect.set_tilt_degs(dev, pid)
	cv2.destroyAllWindows()
예제 #25
0
 def __showOpenCV(self, image):
     cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN)
     cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
     bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)  # I think this is being converted both ways ...
     cv2.imshow("test", bgr)
     cv2.waitKey(0)  # Scripting languages are weird, It will not display without this
     cv2.destroyAllWindows()
예제 #26
0
  def prepare_face_data(self, face_label):

    height = 480
    width = 640
    rate = rospy.Rate(10)
    facecount = 0
    face_images = []
    face_labels = []
    cv2.namedWindow("Training Set Preparation", 1)

    while (not rospy.is_shutdown()) and (facecount < TRAIN_SIZE+TEST_SIZE):
      if(self.is_image_present):
        im = self.image
      else:
        im = np.ones((height,width,3), np.uint8)
      (rows,cols,channels) = im.shape
      im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
      faces = face_cascade.detectMultiScale(im_gray, 1.3, 5)
      if (len(faces) == 1):
        for (x,y,w,h) in faces:
          if ((w>50) and (h>50)):
            cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
            face_gray = np.array(im_gray[y:y+h, x:x+w], 'uint8')
            face_sized = cv2.resize(face_gray, (200, 200))
            face_images.append(face_sized)
            face_labels.append(face_label)
            facecount += 1
      cv2.imshow("Training Set Preparation", im)
      cv2.waitKey(3)
      rate.sleep()

    cv2.destroyWindow("Training Set Preparation")
    return face_images, face_labels
예제 #27
0
  def recognize_faces(self, names):

    height = 480
    width = 640
    rate = rospy.Rate(10)
    cv2.namedWindow("Face Recognizer", 1)

    while not rospy.is_shutdown():
      if(self.is_image_present):
        im = self.image
      else:
        im = np.ones((height,width,3), np.uint8)
      (rows,cols,channels) = im.shape
      im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
      faces = face_cascade.detectMultiScale(im_gray, 1.3, 5)
      box = all_faces()
      for (x,y,w,h) in faces:
        if ((w>50) and (h>50)):
          cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
          face_gray = np.array(im_gray[y:y+h, x:x+w], 'uint8')
          face_sized = cv2.resize(face_gray, (200, 200))
          face_predicted, conf = recognizer_lb.predict(face_sized)
          print 'This is ', names[face_predicted],
          print '| Confidence: ', conf 
          box.x.append(x)       
          box.y.append(y)       
          box.w.append(w)       
          box.h.append(h)       
          box.s.append(names[face_predicted])
      if(len(box.s) > 0):
        self.f_publ.publish(box)

      cv2.imshow("Face Recognizer", im)
      cv2.waitKey(3)
      rate.sleep()
예제 #28
0
 def get_cams(self, image):
     cv2.namedWindow("Screw")
     cv2.startWindowThread()
     cv2.setMouseCallback("Screw", self._get_screw)
     self.screw_location = Point(0,0)
     while True:
         img = copy.copy(image)
         cv2.circle(img, self.screw_location.to_image(), 3, (0,0,255), 3)
         cv2.imshow('Screw', img)
         k = cv2.waitKey(33)
         if k==10:
             # enter pressed
             break
         elif k==-1:
             pass
         else:
             print k
             print 'Press Enter to continue..'
     cv2.destroyWindow('Screw')
     arc = self.screw_location - self.board_center
     cam_angle = arc.angle()
     self.cam_angle = cam_angle
     if len(self.base_cams) != 0:
         self.cams = self.rotate(self.base_cams, cam_angle)
         # self.cams = [c for c in self.cams if c.y <= 0]
         self.locate_cams(image)
예제 #29
0
def show_codewords(code_dict, wtype, wshape):
    line = concatinate_all_imgs(code_dict, wtype, wshape)
    
    cv2.namedWindow("str(i)",2)
    cv2.resizeWindow("str(i)", 800, 600)
    cv2.imshow("str(i)", line)
    cv2.waitKey()
예제 #30
0
def main():
    img = None
    main_win = Windows_handler.WinHandler(title='Nox',class_name='Qt5QWindowIcon')
    main_box = main_win.get_bbox()
    px_handler = Pixel_handler.PixelSearch(win_handler=main_win)
    mouse = Mouse_handler.MouseMovement(window_handler=main_win)
    main_win.init_window()
    cv2.namedWindow('image')
    cv2.namedWindow('config')

    # create trackbars for color change
    cv2.createTrackbar('R', 'config', 0, 255, nothing)
    cv2.createTrackbar('G', 'config', 0, 255, nothing)
    cv2.createTrackbar('B', 'config', 0, 255, nothing)
    cv2.createTrackbar('Offset', 'config', 0, 255, nothing)


    while True:

        img = px_handler.grab_window(bbox=main_box)
        img = px_handler.img_to_numpy(img,compound=False)
        img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)

        cv2.imshow('image',img)
        cv2.setMouseCallback('image', mouse_event, param=img)


        k = cv2.waitKey(1)
        if k == ord('q'):  # wait for ESC key to exit
            cv2.destroyAllWindows()
            quit(0)
예제 #31
0
def choosevid(): 
    global chemin
    chemin = askopenfilename()
    print(chemin)
    cheminsplit = chemin.split('/')
    global name
    name = cheminsplit.pop(len(cheminsplit)-1)
    command = ['ffmpeg.exe', '-i', chemin, '-f', 'image2pipe', '-pix_fmt','rgb24','-vcodec','rawvideo','-']
    vid = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
    image = vid.stdout.read(1080*1920*3) # Extraction de l'ensemble des données d'une image
    image = np.fromstring(image, dtype='uint8') # Normalisation des données en int 8 bits
    image = image.reshape((1080,1920,3))
    global img
    
    tkinter.messagebox.showinfo("Instruction","Selectionnez deux points à l'aide d'un double clic en bas a gauche puis en haut a droite du drapeau puis appuyez sur echap")
    
    class CoordinateStore:
        def __init__(self):
            self.points = []
    
        def select_point(self,event,x,y,flags,param):
                if event == cv2.EVENT_LBUTTONDBLCLK:
                    cv2.circle(image,(x,y),3,(255,0,0),-1)
                    self.points.append((x,y))
                    
    coordinateStore1 = CoordinateStore()    
    cv2.namedWindow('image')
    cv2.setMouseCallback('image',coordinateStore1.select_point)
    image=cv2.resize(image,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
    
    while(1):
        cv2.imshow('image',image)
        k = cv2.waitKey(20) & 0xFF
        if k == 27:
            break
    cv2.destroyAllWindows()
    
    coord = coordinateStore1.points
    print(coord)
    global xmin,xmax,ymin,ymax

    xmin = coord[1][1]
    xmax = coord[0][1]
    ymin = coord[0][0]
    ymax = coord[1][0]

    bouton_selec.pack_forget()
    image = image[xmin:xmax,ymin:ymax] 
    img = Image.fromarray(image)
    img = ImageTk.PhotoImage(img)
    
    global canvas_img, Frame1
    global canvas_pourc
    global canvas_temps
    global canvas_vit
    global text_vit
    global text_pourc
    global text_temps
    
    
    Frame1 = Frame(fenetre, borderwidth=2, relief=GROOVE)
    Frame1.pack(side="top", padx=5, pady=5)
    
    canvas_img = Canvas(Frame1, width=ymax-ymin, height=xmax-xmin, bg='ivory')
    canvas_img.pack(side="left", fill="both", expand=True)
    
    canvas_pourc = Canvas(Frame1, width=140, height=(ymax-ymin)//3, bg='RoyalBlue3')#(xmax-xmin)/2
    canvas_pourc.pack(side="top", fill="both", expand=True)
    
    canvas_vit = Canvas(Frame1, width=140, height=(ymax-ymin)//3, bg='red3')
    canvas_vit.pack(side="bottom", fill="both", expand=True)
    
    canvas_temps = Canvas(Frame1, width=140, height=(ymax-ymin)//3, bg='gray99')
    canvas_temps.pack(side="bottom", fill="both", expand=True)
    
    
    text_pourc = canvas_pourc.create_text(70, 50,font=("Purisa", 30, "bold"))
    text_temps = canvas_temps.create_text(70, 50,font=("Purisa", 28, "bold"))
    text_vit = canvas_vit.create_text(70, 50,font=("Purisa", 20, "bold"))
    #canvas = Canvas(fenetre, width=ymax-ymin, height=xmax-xmin)
    #canvas_img = Canvas(fenetre, width=ymax-ymin, height=xmax-xmin, bg='ivory')
    canvas_img.create_image(0, 0, anchor=NW, image=img)
    canvas_img.pack()
    
    canvas_pourc.itemconfigure(text_pourc, text="0.0%" )
    canvas_temps.itemconfigure(text_temps, text="-----")
    canvas_vit.itemconfigure(text_vit, text="0 m/s")
    
    Frame2 = Frame(fenetre, borderwidth=2, relief=GROOVE)
    Frame2.pack(side="bottom", padx=1, pady=1)
#    canvas.pack(side="left", fill="both", expand=True)
    bouton_gomme1 = Button(Frame2, text = "Gomme" ,command = boutongomme, background = "#C8C8C8",font=("Purisa", 12, "bold"))
    bouton_gomme1.pack(side="left", fill="both", expand=True)

#    bouton_right = Button(Frame2, text = "→" ,command = Buttonright, background = "#C8C8C8",font=("Purisa", 12, "bold"))
#    bouton_right.pack(side="left", fill="both", expand=True)
#
#    bouton_top = Button(Frame2, text = "↑" ,command = Buttonup, background = "#C8C8C8",font=("Purisa", 12, "bold"))
#    bouton_top.pack(side="left", fill="both", expand=True)
#
#    bouton_bottom = Button(Frame2, text = "↓" ,command = Buttondown, background = "#C8C8C8", font=("Purisa", 12, "bold"))
#    bouton_bottom.pack(side="left", fill="both", expand=True)
    
    bouton_traitement = Button(Frame2, text = "Commencer traitement",command = Traitement)
    bouton_traitement.pack()
예제 #32
0
def camera_work():
    global root, video_show2, socket2, video_show2_global, image, started_flag, flag_inet_work, socket_2_connected
    ic_v = InetConnection.InetConnect(sc.gethostname() + "_v", "client")
    ic_v.connect()
    image = np.zeros((480, 640, 3), np.uint8)
    time_frame = time.time()
    frames = 0
    frames_time = time.time()

    while 1:
        # try:
        # print("s",started_flag)
        # print("video status", video_show2_global, video_show2)
        if video_show2_global == 1:
            if video_show2 == 1:  # and started_flag == 1:
                # print("vid1", flag_inet_work)
                if flag_inet_work == True:
                    ic_v.send_and_wait_answer(robot_adres_inet, "p")
                    while 1:
                        j_mesg, jpg_bytes = ic_v.take_answer_bytes()
                        if len(jpg_bytes) > 1:
                            try:
                                A = np.frombuffer(jpg_bytes,
                                                  dtype=j_mesg['dtype'])
                                # arrayname = md['arrayname']sccv2.waitKey(1)

                                # image = A.reshape(j_mesg['shape'])
                                image = A.reshape(j_mesg['shape'])
                                image = cv2.imdecode(image, 1)
                                time_frame = time.time()
                                frames += 1

                            except:
                                pass

                        else:
                            # time.sleep(0.01)
                            break
                            # continue
                else:

                    try:
                        socket2.send_string("1", zmq.NOBLOCK)  # zmq.NOBLOCK
                    except:
                        # print("error", e)
                        pass
                    md = ""
                    t = time.time()
                    while 1:
                        try:
                            md = socket2.recv_json(zmq.NOBLOCK)
                        except:
                            pass
                        if md != "":
                            break
                        if time.time() - t > 1:
                            # print("break video")
                            break

                    if md != "" and video_show2 == 1:
                        msg = 0
                        t = time.time()
                        while 1:
                            try:
                                msg = socket2.recv(zmq.NOBLOCK)
                            except:
                                pass
                                # print("error", e)
                            if msg != 0:
                                break
                            if time.time() - t > 1:
                                # print("break video")
                                break

                        try:

                            A = np.frombuffer(msg, dtype=md['dtype'])
                            # arrayname = md['arrayname']sccv2.waitKey(1)
                            image = A.reshape(md['shape'])
                            image = cv2.imdecode(image, 1)
                            time_frame = time.time()
                            # print("frame", md['shape'])
                            # cv2.imshow("Robot frame", image)
                            # cv2.waitKey(1)
                            frames += 1

                        except:
                            pass

                # отрисовываем картинку
                if time.time() - time_frame > 2:

                    cv2.putText(image, "video lost",
                                (10, int(image.shape[0] - 10)),
                                cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                (255, 255, 255))
                    for i in range(int(time.time() - time_frame)):
                        cv2.putText(image, ".",
                                    (140 + (i * 10), int(image.shape[0] - 10)),
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                    (255, 255, 255))

                    # автореконнект видео
                    if time.time() - time_frame > 5:
                        # print("reconnect video")
                        if flag_inet_work == True:
                            ic_v.disconnect()

                        else:
                            if socket_2_connected:
                                socket2.close()

                        time_frame = time.time()
                        video_show2 = 0

                        continue

                if frames_time < time.time():
                    fps = frames
                    # print("fps:",fps)
                    frames_time = int(time.time()) + 1
                    # print(frames_time)
                    frames = 0
                if fps_show == 1:
                    cv2.putText(
                        image, "fps:" + str(fps),
                        (int(image.shape[1] - 80), int(image.shape[0] - 10)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255))
                cv2.imshow("Robot frame", image)
                cv2.waitKey(1)
                continue

            if video_show2 == 0:

                if flag_inet_work == True:
                    video_show2 = 1
                    ic_v.connect()
                    continue
                else:
                    # print("Connecting to soft...", robot_adres)
                    cv2.destroyAllWindows()
                    for i in range(1, 5):
                        cv2.waitKey(1)
                    context = zmq.Context()
                    socket2 = context.socket(zmq.REQ)
                    socket2.connect("tcp://" + robot_adres + ":5555")
                    socket_2_connected = True
                    # print("connect ok")
                    # context = zmq.Context()
                    # socket2 = context.socket(zmq.REQ)
                    # socket2.setsockopt(zmq.LINGER, 0)
                    # socket2.connect("tcp://" + robot_adres + ":5555")
                    # socket2.send_string("1")  # send can block on other socket types, so keep track
                    # # use poll for timeouts:
                    # poller = zmq.Poller()
                    # poller.register(socket, zmq.POLLIN)
                    # if poller.poll(1 * 1000):  # 10s timeout in milliseconds
                    #     #msg = socket2.recv_json()
                    #     pass
                    # else:
                    #     print("Timeout processing auth request")

                    # these are not necessary, but still good practice:
                    pass

                image = np.zeros((480, 640, 3), np.uint8)
                cv2.putText(image, "Connect to robot...", (180, 240),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
                time_frame = time.time()
                video_show2 = 1
                cv2.namedWindow("Robot frame")
                cv2.startWindowThread()
                # print("connected")

                continue
            if video_show2 == -1:
                # print("vid-1")
                # print("close socket2")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)

                if flag_inet_work == True:
                    video_show2 = 3
                    continue

                if socket_2_connected:
                    socket2.close()
                    socket_2_connected = False

                time.sleep(0.1)
                video_show2 = 3
                ic_v.disconnect()
                time.sleep(0.05)
                # print("video_show2", video_show2 )

                continue
            if video_show2 == 3:
                # print("vid3")
                # cv2.imshow("Robot frame", image)
                # cv2.destroyWindow("Robot frame")
                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)

                time.sleep(0.05)
                continue
                # print("vid??", video_show2, "started_flag==", started_flag)

        else:

            cv2.destroyAllWindows()
            cv2.waitKey(1)
            video_show2 = 3
            time.sleep(0.1)
예제 #33
0
import sys
import cv2
#主函数
if __name__ == "__main__":
    if len(sys.argv) > 1:
        #第一步:读入图像
        I = cv2.imread(sys.argv[1], cv2.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        print "Usge:python morphologyEx.py imageFile"
    #显示原图
    cv2.imshow("I", I)
    #结构元半径,迭代次数
    r, i = 1, 1
    MAX_R, MAX_I = 20, 20
    #显示形态学处理的效果的窗口
    cv2.namedWindow("morphology", 1)

    def nothing(*arg):
        pass

    #调节结构元半径
    cv2.createTrackbar("r", "morphology", r, MAX_R, nothing)
    #调节迭代次数
    cv2.createTrackbar("i", "morphology", i, MAX_I, nothing)
    while True:
        #得到当前的r值
        r = cv2.getTrackbarPos('r', 'morphology')
        #得到当前的i值
        i = cv2.getTrackbarPos('i', 'morphology')
        #创建结构元
        s = cv2.getStructuringElement(cv2.MORPH_RECT, (2 * r + 1, 2 * r + 1))
class Cam():
  points_graph = np.array([0,0,0]).reshape((3,1))
  counter = 0
  def nothing():
    pass
  def quatToRot(self, q):
      sqw = q[3]*q[3]
      sqx = q[0]*q[0]
      sqy = q[1]*q[1]
      sqz = q[2]*q[2]

      # invs (inverse square length) is only required if quaternion is not already normalised
      invs = 1 / (sqx + sqy + sqz + sqw)
      m00 = ( sqx - sqy - sqz + sqw)*invs # since sqw + sqx + sqy + sqz =1/invs*invs
      m11 = (-sqx + sqy - sqz + sqw)*invs
      m22 = (-sqx - sqy + sqz + sqw)*invs
      
      tmp1 = q[0]*q[1]
      tmp2 = q[2]*q[3]
      m10 = 2.0 * (tmp1 + tmp2)*invs
      m01 = 2.0 * (tmp1 - tmp2)*invs
      
      tmp1 = q[0]*q[2]
      tmp2 = q[1]*q[3]
      m20 = 2.0 * (tmp1 - tmp2)*invs 
      m02 = 2.0 * (tmp1 + tmp2)*invs 
      tmp1 = q[1]*q[2]
      tmp2 = q[0]*q[3]
      m21 = 2.0 * (tmp1 + tmp2)*invs
      m12 = 2.0 * (tmp1 - tmp2)*invs  
      R = np.matrix([[m00, m01, m02],
               [m10, m11, m12],
              [m20,m21,m22]])
      return R

  def publish_3d_coords(self, left_max_x, left_max_y, right_max_x, right_max_y, imageSize):
          #monocular calibration 1 = left 2 = right
    CMatr1 = np.matrix([[2531.915668, 0.000000, 615.773452],  
    [0.000000, 2594.436434, 344.505755],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    pp = pprint.PrettyPrinter(indent=4)

    # print("CMatr1", CMatr1)
    #left distortion parameters: 1.281681 -15.773048 -0.010428 0.012822 0.000000

    CMatr2 = np.matrix([[1539.714285, 0.000000, 837.703760],
    [0.000000, 1506.265655, 391.687374],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    # print("CMatr2", CMatr2)

    projPoints1 = np.array([[left_max_x],[left_max_y]]).astype(np.float)

    projPoints2 = np.array([[right_max_x],[right_max_y]]).astype(np.float)

    distort_left = np.array([1.281681, -15.773048, -0.010428, 0.012822, 0.000000]).astype(np.float)
    distort_right = np.array([0.091411, -0.461269, 0.021006, 0.040117, 0.000000]).astype(np.float)
    # print("distort_left", distort_left)
    # print("distort_right", distort_right)

    RMat1 = self.quatToRot(np.array([-0.029, 0.992, -0.110, 0.053]))
    RMat2 = self.quatToRot(np.array([-0.019, 0.997, -0.038, -0.071]))



    RFinal = np.matmul(np.linalg.inv(RMat1),RMat2)

    T_left = np.array([-0.268, 0.516, 3.283]).astype(np.float) #--------------------CHANGE

    T_right = np.array([0.018, -0.184, 1.255]).astype(np.float) #--------------------CHANGE

    T_final = T_right - T_left

    #print("RFinal", RFinal)
    #print("T_final", T_final)
    #print(imageSize)

    R1,R2,P1,P2,Q, a,b = cv2.stereoRectify(CMatr1, distort_left, CMatr2, distort_right, (1280,720), RFinal, T_final,  alpha=-1)
    

    #print("R1",R1)
    #print("R2",R2)CMatr1
    #print("P1",P1)
    #print("P2",P2)

    #pnt1 = cv2.undistortPoints(projPoints1, CMatr1, distort_left, R=RMat1, P=P1)
    #pnt2 = cv2.undistortPoints(projPoints2, CMatr2, distort_right, R=RMat2, P=P2)

    #print("left:",projPoints1)
    #print("right:", projPoints2)

    P1_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 0.0],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])
    P2_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 8092.200252104331],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])


    points4D = cv2.triangulatePoints(P1_stereo, P2_stereo, projPoints1, projPoints2)
    #points3D = cv2.convertPointsFromHomogeneous(points4D)
    #print(points4D)

    #Converts 4D to 3D by [x,y,z,w] -> [x/w, y/w, z/w]
    
    points3D = np.array([  points4D[0]/points4D[3], points4D[1]/points4D[3], points4D[2]/points4D[3] ])




  def run(self):
    points = []
    global hue
    global sat
    global val
   
    cap0 = cv2.VideoCapture(0)
    cap1 = cv2.VideoCapture(1)

    while True:
      try:
        _,frame0 = cap0.read()
        _,frame1 = cap1.read()

        s = cv2.getTrackbarPos('Max Sat',objeto_string)
        if s == 0:
        	s = global_max_sat

        lower = np.array([global_hue,global_sat,global_val])
        upper = np.array([global_max_hue,s,global_max_value])

        frame0_hsv = cv2.cvtColor(frame0,cv2.COLOR_BGR2HSV)
        frame0_hsv2 = frame0_hsv.copy()
        frame0_thresh = cv2.inRange(frame0_hsv,lower, upper)
        frame0_thresh = cv2.medianBlur(frame0_thresh,7)
        frame0_thresh2 = frame0_thresh.copy()

        frame0_inverted_image = cv2.bitwise_not(frame0_thresh2)
        frame0_kernel = np.ones((3,3),np.uint8)
        frame0_erosion = cv2.erode(frame0_inverted_image,frame0_kernel,iterations = 7)
        frame0_dilation = cv2.dilate(frame0_erosion,frame0_kernel,iterations = 15)

        frame0_edged = cv2.Canny(frame0_dilation, 30, 200)         
        _,frame0_contours,_ = cv2.findContours(frame0_edged,  
                          cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) 
  		
        maxArea = 0
        max_x = None
        max_y = None
        max_w = 0
        max_h = 0
        for c in frame0_contours:
          frame0_x,frame0_y,frame0_w,frame0_h = cv2.boundingRect(c)
          area = frame0_w*frame0_h
          if area > maxArea:
          	max_x = frame0_x
          	max_y = frame0_y
          	max_w = frame0_w
          	max_h = frame0_h

        left_max_x = max_x
        left_max_y = max_y

        if (left_max_x == None or left_max_y == None):
          print("No ball") #TODO: Fix this to publish NaN or equivalent
          continue
        
        cv2.rectangle(frame0_dilation, (max_x, max_y), (max_x+max_w, max_y+max_h), (255, 0, 255), 2)
        cv2.imshow("camera 1", frame0_dilation)
        

        frame1_hsv = cv2.cvtColor(frame1,cv2.COLOR_BGR2HSV)
        frame1_hsv2 = frame1_hsv.copy()
        frame1_thresh = cv2.inRange(frame1_hsv,lower, upper)
        frame1_thresh = cv2.medianBlur(frame1_thresh,7)
        frame1_thresh2 = frame1_thresh.copy()

        frame1_inverted_image = cv2.bitwise_not(frame1_thresh2)
        frame1_kernel = np.ones((3,3),np.uint8)
        frame1_erosion = cv2.erode(frame1_inverted_image,frame1_kernel,iterations = 7)
        frame1_dilation = cv2.dilate(frame1_erosion,frame1_kernel,iterations = 15)

        frame1_edged = cv2.Canny(frame1_dilation, 30, 200)         
        _, frame1_contours,_ = cv2.findContours(frame1_edged,  
                          cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) 
  
        
        maxArea = 0
        max_x = None
        max_y = None
        max_w = 0
        max_h = 0
        for c in frame1_contours:
          frame1_x,frame1_y,frame1_w,frame1_h = cv2.boundingRect(c)
          area = frame1_w*frame1_h
          if area > maxArea:
          	max_x = frame1_x
          	max_y = frame1_y
          	max_w = frame1_w
          	max_h = frame1_h

        right_max_x = max_x
        right_max_y = max_y
        
        if (right_max_x == None or right_max_y == None):
          print("No ball") #TODO: Fix this to publish NaN or equivalent
          continue

        cv2.rectangle(frame1_dilation, (max_x, max_y), (max_x+max_w, max_y+max_h), (255, 0, 255), 2)
        cv2.imshow("camera 2", frame1_dilation)

        
        self.publish_3d_coords(left_max_x, left_max_y, right_max_x, right_max_y, frame1_dilation.shape[::-1])


        if cv2.waitKey(1) ==1048603:
          exit(0)
          f.close()

      except ThreadError:
        self.thread_cancelled = True

    #plot the 3D points

  cv2.namedWindow(objeto_string)

  cv2.createTrackbar('Max Sat', objeto_string, global_sat, 255, nothing)
예제 #35
0
video_name = 'car_pov'
video_path = f"../../data/{video_name}.mp4"
cmap = 'gray'

# Get config for video
with open('./config.json') as json_file:
    data = json.load(json_file)[video_name]
    skip_frames = data['skip_frames']
    window_w = data['window_w']
    window_h = data['window_h']
    dense_params = data['flow_params']


# cv2.WINDOW_NORMAL makes the output window resizealbe
cv2.namedWindow('Resized Window', cv2.WINDOW_NORMAL)

# resize the window according to the screen resolution
# cv2.resizeWindow('Resized Window', 1400, 600) # drone1
cv2.resizeWindow('Resized Window', window_w, window_h) # car_pov

# Start
cap = cv2.VideoCapture(video_path)
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Sobel Filters
sobel_x = np.array([[1, 0, -1],
                    [2, 0, -2],
                    [1, 0, -1]]) # /8
sobel_y = np.array([[1, 2, 1],
예제 #36
0
test_X = (test_X - mean) / std

test_y_pred = clf.predict(test_X)

accuracy = np.sum(y == y_pred) / n_histograms
print('Training accuracy: ' + str(accuracy))
val_accuracy = np.sum(val_y == val_y_pred) / n_val_histograms
print('Validation accuracy: ' + str(val_accuracy))
test_accuracy = np.sum(test_y == test_y_pred) / n_test_histograms
print('Test accuracy: ' + str(test_accuracy))

TEST = False

if TEST:
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('image', 1280, 720)

    for dir in os.listdir('val'):
        for file in os.listdir('val/' + dir):
            print(process.process('val/' + dir + '/' + file))

            if False:
                img = cv2.imread('val/' + dir + '/' + file)
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                sift = cv2.xfeatures2d.SIFT_create()
                #print('val/' + dir + '/' + file)
                keypoints, des = sift.detectAndCompute(gray, None)
                # print(des)
예제 #37
0
                                    # show NG picture
                                    ng_dir = os.path.join(image_dir, 'NG')
                                    target_names = os.path.join(
                                        ng_dir, '*' + pattern_check + '.jpg')
                                    file_names = glob.glob(target_names)
                                    for file_name in file_names:
                                        img = cv2.imread(file_name)
                                        img = cv2.resize(img, (800, 600))
                                        cv2.imshow(file_name, img)

                                    img_path = os.path.join(
                                        image_dir,
                                        series_num + pattern + '.tif')
                                    log_data = LogData(pattern_check)
                                    img = cv2.imread(img_path)
                                    cv2.namedWindow("image")
                                    separateRow = [
                                        0, 900, 1800, 2700, 3600, 4383
                                    ]
                                    for i in range(len(separateRow) - 1):
                                        cv2.setMouseCallback(
                                            "image",
                                            log_data.on_mouse_callback,
                                            (0, separateRow[i]))
                                        cv2.imshow(
                                            'image',
                                            img[separateRow[i]:separateRow[
                                                i + 1], :])
                                        while 1:
                                            key = cv2.waitKey(20)
                                            if key == ord('m'):
예제 #38
0
파일: Lesson05.py 프로젝트: soumnsdt/Python
import cv2 as cv
import numpy as np

src_img = cv.imread("G:/pic/meinv.jpg")
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src_img)

face = src_img[50:500, 100:480]
gray = cv.cvtColor(face, cv.COLOR_BGR2GRAY)
change_face = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
src_img[50:500, 100:480] = change_face
cv.imshow("face", src_img)
cv.waitKey(0)
cv.destroyAllWindows()



예제 #39
0
cap = cv2.VideoCapture(1)

ret, mtx, dist, rvecs, tvecs = Camera_calibration()

# set dictionary size depending on the aruco marker selected
aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
parameters = aruco.DetectorParameters_create()
parameters.adaptiveThreshConstant = 10

ADAPTIVE_CONSTANT = None

manipulator = Manipulator("", "sa", "")
find_zero_flag = False

mid_zero = None
cv2.namedWindow("frame")
cv2.namedWindow("check")


def find_middle(points: Iterable):
    summ_x = 0
    summ_y = 0
    for p in points:
        summ_x += int(p[0])
        summ_y += int(p[1])
    return summ_x / len(points), summ_y / len(points)


# Find zero and constants
while not find_zero_flag:
    ret, frame = cap.read()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)

cv2.imshow("Drawings", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

#endregion

# region simple mouse event (drawing)

# mouse callback function (double click)
def draw_circle(event,x,y,flags,param):
    if event == cv2.EVENT_LBUTTONDBLCLK:
        cv2.circle(img,(x,y),20,(255,0,0),-1)
    elif (event == cv2.EVENT_FLAG_RBUTTON):
        cv2.circle(img,(x,y),20,(0,0,255),-1)

# Create a black image, a window and bind the function to window
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)

while(1):
    cv2.imshow('image',img)
    if cv2.waitKey(20) & 0xFF == 27: # <esc>
        break
cv2.destroyAllWindows()

# endregion
예제 #41
0
import cv2
''' Öffnen einer Kamera '''
cap = cv2.VideoCapture(0)
cv2.namedWindow("Ergebnis", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Ergebnis", cv2.WND_PROP_FULLSCREEN,
                      cv2.WINDOW_FULLSCREEN)
''' Auslesen, Modifizieren und Ausgeben von Bildern'''
while True:
    ret, frame = cap.read()
    frame = frame[0:50, 0:50]
    cv2.imshow('Ergebnis', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
''' Fenster schließen, nachdem q gedrückt wurde''' ''
cap.release()
cv2.destroyAllWindows()
예제 #42
0
emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'

# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = [
    "angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"
]

#feelings_faces = []
#for index, emotion in enumerate(EMOTIONS):
# feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))

# starting video streaming
cv2.namedWindow('your_face')
camera = cv2.VideoCapture(0)
while True:
    frame = camera.read()[1]
    #reading the frame
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_detection.detectMultiScale(gray,
                                            scaleFactor=1.1,
                                            minNeighbors=5,
                                            minSize=(30, 30),
                                            flags=cv2.CASCADE_SCALE_IMAGE)

    canvas = np.zeros((250, 300, 3), dtype="uint8")
    frameClone = frame.copy()
    if len(faces) > 0:
예제 #43
0
while camera.IsGrabbing():
    grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)   

    if grabResult.GrabSucceeded():
        # Access the image data
        image = converter.Convert(grabResult)
        img = image.GetArray()
        img = cv2.resize(img, (512, 512))      
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        fft = np.fft.fft2(img)
        fftShift = np.fft.fftshift(fft)
        magnitude =  0.1 * np.log(np.abs(fftShift))
        
        cv2.namedWindow('title', cv2.WINDOW_NORMAL)
        cv2.imshow('title', magnitude)

        # videoWriter.write(magnitude)

        k = cv2.waitKey(1)           

        if k == 27:
            break
    grabResult.Release()   
    
# Releasing the resource    
camera.StopGrabbing()
# videoWriter.release()
cv2.destroyAllWindows()
예제 #44
0
    camera       = Camera()
    frame        = camera.source_image.copy()
    frame_width  = frame.shape[0]
    frame_height = frame.shape[1]
else:
    cap          = cv2.VideoCapture(0)
    ret, frame   = cap.read()
    frame_width  = int(cap.get(3))
    frame_height = int(cap.get(4))

slope_deg       = 0
keyboard        = Pose2D()
show_keyb_frame = False
keyb_top_left, keyb_bottom_right = (-1, -1), (-1, -1)

cv2.namedWindow("frame")
cv2.setMouseCallback("frame", mouse_event)

def processing_keyboard(x1, y1, x2, y2, obj_id):
    global slope_deg, send_point, keyb_frame
    global keyb_top_left, keyb_bottom_right

    box_h = int(((y2 - y1) / unpad_h) * img.shape[0])
    box_w = int(((x2 - x1) / unpad_w) * img.shape[1])
    y1    = int(((y1 - pad_y // 2) / unpad_h) * img.shape[0])
    x1    = int(((x1 - pad_x // 2) / unpad_w) * img.shape[1])
    color = colors[int(obj_id) % len(colors)]
    cls   = 'object'

    keyb_top_left     = (x1, y1)
    keyb_bottom_right = (x1+box_w, y1+box_h)
예제 #45
0
image_names = [
    'C:\\Users\\kurnia\\Desktop\\Face_detection_images\\face2.jpg',
    'C:\\Users\\kurnia\\Desktop\\Face_detection_images\\198198_faces.jpg'
]
images = []
max_width = 0  # find the max width of all the images
total_height = 0  # the total height of the images (vertical stacking)

for name in image_names:
    # open all images and find their sizes
    images.append(cv2.imread(name))
    if images[-1].shape[1] > max_width:
        max_width = images[-1].shape[1]
    total_height += images[-1].shape[0]

# create a new array with a size large enough to contain all the images
final_image = np.zeros((total_height, max_width, 3), dtype=np.uint8)

current_y = 0  # keep track of where your current image was last placed in the y coordinate
for image in images:
    # add an image to the final array and increment the y coordinate
    final_image[current_y:image.shape[0] +
                current_y, :image.shape[1], :] = image
    current_y += image.shape[0]

cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)

cv2.imshow('image', final_image)

cv2.imwrite('fin.PNG', final_image)
    'fly': ObjectStats(),
}

count = 0
while True:
    ret, frame = cap.read()
    if not ret:
        break
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Watershed blob detection
    # see https://www.pyimagesearch.com/2015/11/02/watershed-opencv/
    # ----------------------------------------------------------------------------------------
    if method == 'test':
        if count == 0:
            cv2.namedWindow('frame_orig', cv2.WINDOW_NORMAL)
            cv2.namedWindow('frame_thresh', cv2.WINDOW_NORMAL)
            #cv2.namedWindow('frame_labels', cv2.WINDOW_NORMAL)

        thresh_man = 35
        #thresh_ostu = skimage.filters.threshold_otsu(frame_gray)
        #thresh_yen = skimage.filters.threshold_yen(frame_gray)
        #thresh_li = skimage.filters.threshold_li(frame_gray)
        #thresh_niblack = skimage.filters.threshold_niblack(frame_gray,window_size=11,k=-0.5)
        #thresh_tri = skimage.filters.threshold_triangle(frame_gray)
        #thresh_iso = skimage.filters.threshold_isodata(frame_gray)

        #frame_mask = frame_gray > thresh_ostu
        #frame_mask = frame_gray > thresh_yen
        #frame_mask = frame_gray > thresh_li
        #frame_mask = frame_gray > thresh_niblack
import cv2
import numpy as np
import tensorflow as tf

m_new = tf.keras.models.load_model('Digit.h5')

img = np.ones((600,600),dtype='uint8')*255
# 255 - White , 0 - Black
img[100:500,100:500] = 0
windowName = 'Digits Project'
cv2.namedWindow(windowName)
def draw_type(event,x,y,a,b):
# event - Mouse Events = Left click, Right CLick, Mouse Move
# x and y are the centres of the shape
    global state
    if event == cv2.EVENT_LBUTTONDOWN:
        state = True
        cv2.circle(img,(x,y),10,(255,255,255),-1)
    elif event == cv2.EVENT_MOUSEMOVE:
        if state == True:
            cv2.circle(img,(x,y),10,(255,255,255),-1)
    else:
        state = False

state = False # Flags in Microcontroller
cv2.setMouseCallback(windowName,draw_type)
while True:
    cv2.imshow(windowName,img)
    key = cv2.waitKey(1) # waiting for one milli second
    if key == ord('q'): 
        break
예제 #48
0
import numpy as np
import cv2 as cv
import os
from scipy.ndimage import zoom

inputpath = ".\\inputImages"
outputpath = ".\\downsample"

for root,folders,files in os.walk(inputpath):
    # loop the images
    for file in files:
        image = cv.imread(root+"\\"+file)

        height, width, channels = image.shape

        height = height / (width / 32)

        scaledImage = cv.resize(image,(32, int(height)),interpolation=cv.INTER_CUBIC)

        cv.namedWindow(file, cv.WINDOW_NORMAL)

        cv.imshow(file, image)

        # write out the new accessed images
        cv.imwrite(outputpath+"\\"+file,scaledImage)
        #cv.waitKey(0)
        cv.destroyAllWindows()
예제 #49
0
#Original code: https://stackoverflow.com/questions/22003441/streaming-m3u8-file-with-opencv
import cv2
import subprocess as sp
import numpy as np
VIDEO_URL = "https://videos3.earthcam.com/fecnetwork/9974.flv/chunklist_w917803974.m3u8"
VIDEO_URL
cv2.namedWindow("Streetcam",cv2.WINDOW_NORMAL)
FFMPEG_BIN = "/anaconda3/bin/ffmpeg"
#command = [FFMPEG_BIN,"-i", VIDEO_URL,
#           "-loglevel", "quiet", # no text output
#           "-an",   # disable audio
#           "-f", "image2pipe",
#           "-pix_fmt", "bgr24",
#           "-vcodec", "rawvideo", "-"
#            ]
command = [ FFMPEG_BIN,"-i", VIDEO_URL,
        "-y", # (optional) overwrite output file if it exists
        "-f", "rawvideo",
        "-vcodec","rawvideo",
        "-s", "420x360", # size of one frame
        "-pix_fmt", "rgb24",
        "-r", "24", # frames per second
        "-", # The imput comes from a pipe
        "-an", # Tells FFMPEG not to expect any audio
        "-vcodec", "mpeg",
        "my_output_videofile.mp4" ]
command
pipe = sp.Popen(command, stdin=sp.PIPE, stdout=sp.PIPE)
pipe
while True:
    raw_image = pipe.stdout.read(1280*720*3) # read 432*240*3 bytes (= 1 frame)
                far = tuple(res[f][0])
                a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
                b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
                c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
                angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))  # cosine theorem
                if angle <= math.pi / 2:  # angle less than 90 degree, treat as fingers
                    cnt += 1
                    cv2.circle(drawing, far, 8, [211, 84, 0], -1)
            return True, cnt
    return False, 0


# Camera
camera = cv2.VideoCapture(0)
camera.set(10,200)
cv2.namedWindow('trackbar')
cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)


while camera.isOpened():
    ret, frame = camera.read()
    threshold = cv2.getTrackbarPos('trh1', 'trackbar')
    frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter
    frame = cv2.flip(frame, 1)  # flip the frame horizontally
    cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                 (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
    cv2.imshow('original', frame)

    #  Main operation
    if isBgCaptured == 1:  # this part wont run until background captured
        img = removeBG(frame)
예제 #51
0
 def __imshow__(self, img_mat, name='name'):
     cv.namedWindow(name, cv.WINDOW_AUTOSIZE)
     cv.imshow(name, img_mat)
     cv.waitKey(0)
     return
예제 #52
0
]

# get start time
start = time.time()

# =============================================================================
# Pre-process the entire image (subsetting, shadow masking, color masking)
# =============================================================================

# subset the image?
if subset == 'y':
    while True:
        img = cv2.imread(im)
        win_name = "ROI Selector ('spacebar' to end)"
        cv2.startWindowThread()
        cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
        cv2.moveWindow(win_name, 0, 0)
        cv2.resizeWindow(win_name,
                         func.resizeWin(img, resize)[0],
                         func.resizeWin(img, resize)[1])
        r = cv2.selectROI(win_name, img, False, False)
        if r[2] < 10 or r[3] < 10:
            print("\nBad ROI: {}\nThe image will not be subset, try again".
                  format(str(r)))
            cv2.destroyAllWindows()
        else:
            cv2.destroyAllWindows()
            break

# read the image
if subset == 'y':
예제 #53
0
    rob,coordvalid = vis.returnDynamicCoordinates(display = 1)

    if DISPLAY:
        cv2.imshow('frame',img_cul)
        img_real = cv2.warpPerspective(img_cul, vis.trans, (500,500))
        obstacles = vis.getMap(downscale = False)
        if not isinstance(obstacles,bool):
            cv2.drawContours(img_real, obstacles, -1, (0,255,0), 3)
            for p in obstacles:
                for corner in p:
                    cv2.circle(img_real, tuple(corner.reshape(2)), 5, (255,255,0), thickness=1, lineType=8, shift=0)
        if coordvalid:
            pt1 = (int(rob[0]*5), int(rob[1]*5))
            pt2 = (int(rob[0]*5+math.cos(rob[2])*50), int(rob[1]*5+math.sin(rob[2])*50))

            cv2.line(img_real,pt1,pt2,(128,128,0),thickness=3)
            cv2.circle(img_real,pt1,10,(128,128,0),thickness = 4)


        cv2.imshow('map', img_real)
        cv2.namedWindow('internal map',cv2.WINDOW_NORMAL)
        cv2.resizeWindow('internal map', 500,500)
        cv2.imshow('internal map', cv2.cvtColor(vis.frame,cv2.COLOR_HSV2BGR))
    t1 = time.process_time()
    #print(t1-t0)
    if cv2.waitKey(1) & 0XFF == ord('q'):
        break 

cap.release()
cv2.destroyAllWindows()
예제 #54
0
                      str(coord_col + i) + " - adding range")
                if i >= layerMax:
                    i = i_initial
                    j += 1
                else:
                    i += 1
                if j >= layerMax:
                    color = "U"
                    print("ERROR - color undefined")
                    print("------------------------")
                    break
    return color


# Creates a resizable window frame - one loads video/image into it
cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
# Program functions perfectly normal w/out line in


def main():
    """Stage 1.1: Obtain masks for each individual color in image"""
    # Captures frame-by-frame
    frame = cv2.imread("image_02.jpg", cv2.IMREAD_COLOR)

    # Gaussian filter is applied to captured image - remove noises
    image_gaussian = cv2.GaussianBlur(frame, (5, 5), 0)

    # Converts color-space from BGR to HSV
    frame_hsv = cv2.cvtColor(image_gaussian, cv2.COLOR_BGR2HSV)

    # In OpenCV, range is [179, 255, 255]
예제 #55
0
import cv2 as cv
import numpy as np

backSub = cv.createBackgroundSubtractorMOG2()

capture = cv.VideoCapture(0)
cv.namedWindow("And", cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty("And", cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)

while True:
    kernel = np.ones((3, 3), np.uint8)
    ret, frame = capture.read()
    if frame is None:
        break

    fgMask = backSub.apply(frame)

    cv.rectangle(frame, (10, 2), (100, 20), (255, 255, 255), -1)
    cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15),
               cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

    # cv.imshow('Frame', frame)
    # cv.imshow('FG Mask', fgMask)
    res = cv.bitwise_and(frame, frame, mask=fgMask)

    # gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)

    # create a binary thresholded image
    _, binary = cv.threshold(fgMask, 225, 255, cv.THRESH_BINARY_INV)
    dilation = cv.dilate(binary, kernel, iterations=1)
    # cv.imshow('dilation', binary)
예제 #56
0
pipeline.start(config)

try:
    while True:

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        # Stack both images horizontally
        images = np.hstack((color_image, depth_colormap))

        # Show images
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', images)
        cv2.waitKey(1)

finally:

    # Stop streaming
    pipeline.stop()
예제 #57
0
# -*- coding: utf-8 -*-
import cv2
import math
import numpy as np

cv2.namedWindow("src")
cv2.namedWindow("dst")
cap = cv2.VideoCapture(0)

while 1:
    ret, img_src = cap.read()

    img_bgr = cv2.split(img_src)
    img_dst = cv2.merge((img_bgr[2], img_bgr[0], img_bgr[1]))

    cv2.imshow("src", img_src)
    cv2.imshow("dst", img_dst)
    ch = cv2.waitKey(1)
    if ch == ord("q"):
        break

cv2.destroyAllWindows()
예제 #58
0
showImg = True

cap = cv2.VideoCapture('1.mp4')

if showImg == False:
    fourcc = cv2.VideoWriter_fourcc('X', '2', '6', '4')
    # fourcc = cv2.VideoWriter_fourcc('H','E','V','C')
    out = cv2.VideoWriter('detectionTest.mkv', fourcc, 20, (2560, 1440))
    # out = cv2.VideoWriter('detectionTest.mp4', -1, 20, (2560,1440))

wait = 100000
w = True

if showImg:
    cv2.namedWindow('frame', 0)
    cv2.namedWindow('rawFg', 0)
    cv2.namedWindow('fg', 0)
    cv2.namedWindow('contours', 0)
    cv2.namedWindow('roi', 0)
    cv2.namedWindow('test', 0)

    cv2.resizeWindow('frame', 600, 600)
    cv2.resizeWindow('rawFg', 600, 600)
    cv2.resizeWindow('fg', 600, 600)
    cv2.resizeWindow('contours', 600, 600)
    cv2.resizeWindow('roi', 600, 600)
    cv2.resizeWindow('test', 600, 600)

detector = Detector()
예제 #59
0
# loading models
face_cascade = cv2.CascadeClassifier('./model/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)
face_model= Model()
face_model.load()

# getting input model shapes for inference
emotion_target_size = (48,48)
face_target_size = (128,128)

# starting lists for calculating modes
emotion_window = []

# starting video streaming

cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)

# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
    cap = cv2.VideoCapture(0) # Webcam source
    # cap = cv2.flip(cap,-1)
else:
    cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source

while cap.isOpened(): # True:
    ret, bgr_image = cap.read()
    bgr_image = cv2.flip(bgr_image,1)
    #bgr_image = video_capture.read()[1]
예제 #60
0
_, threshold_to_zero = cv2.threshold(img, 12, 255, cv2.THRESH_TOZERO)

cv2.imshow("Image", img)
cv2.imshow("th binary", threshold_binary)
cv2.imshow("th binary inv", threshold_binary_inv)
cv2.imshow("th trunc", threshold_trunc)
cv2.imshow("th to zero", threshold_to_zero)

cv2.waitKey(0)
cv2.destroyAllWindows()

import cv2
import numpy as np


cv2.namedWindow("Image")
cv2.createTrackbar("Threshold value", "Image", 128, 255, nothing)

while True:
    value_threshold = cv2.getTrackbarPos("Threshold value", "Image")
    _, threshold_binary = cv2.threshold(img, value_threshold, 255, cv2.THRESH_BINARY)
    _, threshold_binary_inv = cv2.threshold(img, value_threshold, 255, cv2.THRESH_BINARY_INV)
    _, threshold_trunc = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TRUNC)
    _, threshold_to_zero = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TOZERO)
    _, threshold_to_zero_inv = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TOZERO_INV)

    cv2.imshow("Image", img)
    cv2.imshow("th binary", threshold_binary)
    cv2.imshow("th binary inv", threshold_binary_inv)
    cv2.imshow("th trunc", threshold_trunc)
    cv2.imshow("th to zero", threshold_to_zero)