Example #1
1
def main():
   global preview

   parse_args(sys.argv[1:])
   cam = PiCamera()
   cam.rotation = -90
   cam.resolution = (width, height)
   cam.framerate = 60
   rawCapture = PiRGBArray(cam, size=(width, height))

   startTime = time.time()
   endTime = time.time() + capTime
   frames = 0

   for image in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
      if image is None:
         print 'capture failed'
         break
      frames += 1
      frame = image.array

      if preview: 
         cv2.imshow("frame", frame)
         key = cv2.waitKey(1) & 0xFF


      if time.time() > endTime:
         break
      rawCapture.truncate(0)

   print "Average Framerate for " + str(frames) + \
      " frames was: " + str(float(frames) / capTime) + "fps"
   cv2.destroyAllWindows()
Example #2
0
    def startCapture(self):
        print "pressed start"
        self.capturing = True
        cap = self.c
        while (self.capturing):
            ret, frame = cap.read()

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            faces = self.faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE
                # flags=cv2.cv.CV_HAAR_SCALE_IMAGE
            )

            # Draw a rectangle around the faces
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # Display the resulting frame
            cv2.imshow('Video', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cv2.destroyAllWindows()
Example #3
0
def recordVideo( ):
    global outputfile
    global targetWidth
    global targetHeight
    global recordTime
    global cap
    global fps
    global sleepTime

    #fourcc = cap.get(cv.CV_CAP_PROP_FOURCC)
    fourcc = cv.CV_FOURCC('X','V','I','D')

    videoWriter = cv2.VideoWriter(outputfile, int(fourcc), fps, (targetHeight,targetWidth))

    for frameCount in range(recordTime):
        if interrupt :
            break
        #print ("{}/{}".format(frameCount, totalFrames), end='\r')
        if DEBUG :
            sys.stdout.write("\r {0}/{1} recorded...".format(frameCount/fps, recordTime / fps))
        ret, frame = cap.read()
        smallFrame = cv2.resize(frame, (targetHeight,targetWidth))    

        videoWriter.write(smallFrame)

        time.sleep(sleepTime)
        #k = cv2.waitKey(40) & 0xff
        #if k == 27:
        #    break

    cap.release()
    videoWriter.release()
    cv2.destroyAllWindows()
    print "\nRecording completed. File saved at {}.".format(outputfile)
def main():
    global countClicks, coordinates, copyimage

    cv2.resizeWindow(windowname, 700, 700)

    while (countClicks < 4):
        preseedKey = cv2.waitKey(1)
        cv2.imshow(windowname, image)

        if preseedKey & 0xFF == 27:
            break

    pointone = np.float32(
        [[coordinates[0], coordinates[1]],
         [coordinates[2], coordinates[3]],
         [coordinates[4], coordinates[5]],
         [coordinates[6], coordinates[7]]])
    pointtwo = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])

    perspective = cv2.getPerspectiveTransform(pointone, pointtwo)
    output = cv2.warpPerspective(copyimage, perspective, (310, 310))

    cv2.imshow("Output Image", output)
    cv2.waitKey(0)

    cv2.destroyAllWindows()
Example #5
0
def image():
    # Load an color image in grayscale

    # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    # print img.shape
    for image in glob.glob("../extras/faceScrub/download/*/*.jpg"):
        start = time.time()
        img = cv2.imread(image)
        # res = cv2.resize(img, (227, 227), interpolation=cv2.INTER_CUBIC)

        FACE_DETECTOR_PATH = "../extras/haarcascade_frontalface_default.xml"

        detector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
        rects = detector.detectMultiScale(img, scaleFactor=1.4, minNeighbors=1,
                                          minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

        # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # construct a list of bounding boxes from the detection
        # rects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]

        # update the data dictionary with the faces detected
        # data.update({"num_faces": len(rects), "faces": rects, "success": True})

        print "time", time.time() - start
        for (x, y, w, h) in rects:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            roi_color = img[y:y + h, x:x + w]
            cv2.imshow('image', roi_color)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        cv2.imshow('image', img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #6
0
 def close(self, delay=0.5):
     """
     Function to shutdown application safely
     1. Close windows
     2. Disable controller
     3. Release capture interfaces 
     """
     self.log_msg('SYS', 'Shutting Down!')
     if self.controller is None:
         self.log_msg('WARN', 'Controller already off!')
     else:
         try:
             self.log_msg('CTRL', 'Closing Controller ...')
             self.controller.close() ## Disable controller
         except Exception as error:
             self.log_msg('CTRL', 'ERROR: %s' % str(error), important=True)
     for i in range(len(self.cameras)):
         if self.cameras[i] is None:
             self.log_msg('CAM', 'WARN: Camera %d already off!' % i)
         else:
             try:
                 self.log_msg('CAM', 'WARN: Closing Camera %d ...' % i)
                 self.cameras[i].release() ## Disable cameras
             except Exception as error:
                 self.log_msg('CAM', 'ERROR: %s' % str(error), important=True)
     if self.config['DISPLAY_ON']:
         cv2.destroyAllWindows() ## Close windows
Example #7
0
    def _timelapse_old(self, rate=1, duration=60):
        """
        get timelapsed video with given gap and duration
        """
        #img1 = cv2.imread("snapshot0.jpg")
        #height, width, layers = img1.shape

        fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
        video  = cv2.VideoWriter()
        #if not video.open("timelapsed.avi", fourcc, rate, (640, 480), True):
        if not video.open("timelapsed.avi", fourcc, 1, (640, 480), True):
            dprint('failed to open videowriter')
            raise ExceptionCam('failed to open videowriter')

        for i in range(10):
            res=requests.get(self.url_snapshot+str(3), auth=(self.username, self.password), stream=True)
            if res.status_code == 200:
                #_img="snapshot.jpeg_" + time.strftime('%Y:%m:%d_%H:%M:%S')
                _img="snapshot{0}.jpg".format(i)
                with open(_img, "wb") as outfile:
                    res.raw.decode_content = True
                    shutil.copyfileobj(res.raw, outfile)

                img = cv2.imread(_img)
                video.write(img) 
	        subprocess.call(["rm", "-rf",  _img])

        video.release()
        cv2.destroyAllWindows()
Example #8
0
def goLiveT():
	cap = cv2.VideoCapture(0)
	cv2.namedWindow('image')
	# create trackbars for color change
	cv2.createTrackbar('Thres','image',0,255,nothing)
	# create switch for ON/OFF functionality
	switch = '0 : OFF \n1 : ON'
	cv2.createTrackbar(switch, 'image',0,1,nothing)
	while (1):

		_, imgOriginal = cap.read()
		cv2.imshow('imgOriginal',imgOriginal)
		filteredImage = rb.clearImage(imgOriginal)

		# get current positions of four trackbars
		binValue = cv2.getTrackbarPos('Thres','image')
		s = cv2.getTrackbarPos(switch,'image')

		k = cv2.waitKey(1) & 0xFF
		if k == 27:
			break

		if s == 0:
			pass 
		else:
			thresImage = rb.doThresHold(filteredImage,binValue)
			cv2.imshow('img', thresImage)

	cv2.destroyAllWindows()
def show_video(name):
    cap = cv2.VideoCapture(name)
    rotate = False
    M = cv2.getRotationMatrix2D((480,270), 180, 1.0)
    if not cap.isOpened():
      print("Error when reading video")
    else:
        while(True):
            try:
                # Capture frame-by-frame
                ret, frame = cap.read()
                frame = cv2.resize(frame, (960,540))
                if rotate:
                    frame = cv2.warpAffine(frame, M, (960, 540))
                cv2.putText(frame,'press the escape key when done',(20,20), cv2.FONT_HERSHEY_SIMPLEX, 1,(130,130,130),2)
                cv2.imshow(name,frame)
            except:
                cap = cv2.VideoCapture(name)
                ret, frame = cap.read()
                frame = cv2.resize(frame, (960,540))
                cv2.imshow(name,frame)
            k = cv2.waitKey(20)
            if k == 27:
                break
            elif k == 114:
                rotate = False if rotate is True else True

    # When everything is done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    return rotate
def detect_gaze_direction(video_capture,predictor):
	cam = cv2.VideoCapture(video_capture)
	cam.set(3,640)
	cam.set(4,480)
	video_capture = cam

	detector = dlib.get_frontal_face_detector()

	while True:
	    # Capture frame-by-frame
		ret, frame = video_capture.read()
		if ret:
			frame_color = frame
			frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			dets = detector(frame, 1)
			for k, d in enumerate(dets):
		        # Get the landmarks/parts for the face in box d.
				shape = predictor(frame, d)
		        # print(type(shape.part(1).x))
				cv2.circle(frame_color,(shape.part(36).x,shape.part(36).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(39).x,shape.part(39).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(42).x,shape.part(42).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(45).x,shape.part(45).y),2,(0,0,255))
				x1 = shape.part(36).x
				y1 = shape.part(37).y-2
				x2 = shape.part(39).x
				y2 = shape.part(40).y+2
				split = frame[y1:y2,x1:x2]
				split = process_eye(split)
				split = filter_eye(split)
				centre = cross_spread(split)
				frame[y1:y2,x1:x2]=split
				y1 = y1+2
				y2 = y2-2
				centre[1]=centre[1]-2
				# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
				# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
				cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))			
				x1 = shape.part(42).x
				y1 = shape.part(43).y-2
				x2 = shape.part(45).x
				y2 = shape.part(46).y+2
				split = frame[y1:y2,x1:x2]
				split = process_eye(split)
				split = filter_eye(split)
				centre = cross_spread(split)
				frame[y1:y2,x1:x2]=split
				y1 = y1+2
				y2 = y2-2
				centre[1]=centre[1]-2
				# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
				# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
				cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))
			# Display the resulting frame
	        cv2.imshow('Video', frame_color)
	        if cv2.waitKey(1) & 0xFF == ord('q'):
	            break
	# Release video capture
	video_capture.release()
	cv2.destroyAllWindows()
Example #11
0
def maskcolor(path,_hsv_mask):
	if path=='0':
		path=0;
	cap = cv2.VideoCapture(path)

	while(cap.isOpened()):
		_, frame = cap.read()
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

		# Threshold the HSV image to get only blue colors
		mask = cv2.inRange(hsv,_hsv_mask[0],_hsv_mask[1]);

		# Bitwise-AND mask and original image
		_res = cv2.bitwise_and(frame,frame, mask= mask)
		_gray = cv2.cvtColor(_res,cv2.COLOR_RGB2GRAY)
		_edge = cv2.adaptiveThreshold(_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
		cnt,hchy = cv2.findContours(_edge,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		print hchy
		cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
		#x,y,w,h = cv2.boundingRect(np.vstack(cnt))
		#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)		
		#rect = cv2.minAreaRect(np.vstack(cnt))
		#box = cv2.cv.BoxPoints(rect)
		#box = np.int0(box)
		#cv2.drawContours(frame,[box],0,(0,0,255),2)
		cv2.imshow('a',frame)
		#cv2.imshow('b',_edge)
		#cv2.imshow('c',res)
		#plt.show()

		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
	cap.release()
	cv2.destroyAllWindows()
Example #12
0
        def run(self, camera=False):
                frame = cv2.namedWindow(FRAME_NAME)

                # Set callback
                cv2.setMouseCallback(FRAME_NAME, self.draw)

                if camera:
                        cap = cv2.VideoCapture(0)
                        for i in range(10):
                                status, image = cap.read()
                else:
                        image = cv2.imread('00000001.jpg')

                self.image = cv2.undistort(image, CMATRIX, DIST, None, NCMATRIX)

                # Get various data about the image from the user
                self.get_pitch_outline()

                self.get_zone('Zone_0', 'draw LEFT Defender')
                self.get_zone('Zone_1', 'draw LEFT Attacker')
                self.get_zone('Zone_2', 'draw RIGHT Attacker')
                self.get_zone('Zone_3', 'draw RIGHT Defender')

                self.get_goal('Zone_0')
                self.get_goal('Zone_3')

                print 'Press any key to finish.'
                cv2.waitKey(0)
                cv2.destroyAllWindows()

                # Write out the data
                # self.dump('calibrations/calibrate.json', self.data)
                tools.save_croppings(pitch=self.pitch, data=self.data)
Example #13
0
def show_roi(roi_list):
    for roi in roi_list:
        (r, g, b) = (roi[0], roi[1], roi[2])
        roi = cv2.merge((r, g, b))
        cv2.imshow("img", roi)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #14
0
def show_picture():
    #display image, requires user to press a button to continue
    imgFile = cv2.imread('temp.png')
    cv2.imshow('angle less than 10', imgFile)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    return None
def listener():	
	rospy.Subscriber("/usb_cam/image_raw", Image, callback)
	rospy.init_node("listener", anonymous=True)

	rospy.spin()

	cv2.destroyAllWindows()
Example #16
0
File: img.py Project: ftyszyx/tools
    def getMultiTemplePos(self,srcPicPath,templePicPath):
        print("srcpath",srcPicPath,"temppath",templePicPath)
        img_src=cv2.imread(srcPicPath)
        img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
        srcw,srch=img_src_gray.shape[::-1]
        print("get pic:",srcw,srch)
        img_temple=cv2.imread(templePicPath)
        img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
        templew,templeh=img_temple_gray.shape[::-1]
        res = cv2.matchTemplate(img_src_gray,img_temple_gray,cv2.TM_CCOEFF_NORMED) 
        # print("get temple",res)
        # cv2.imshow('src',img_src_gray)
        # cv2.imshow('temple',img_temple_gray)
        # cv2.waitKey(0)

        threshold = 0.7 
        loc = np.where( res >= threshold)
        print(loc)
        # zipres=zip(*loc[::-1])
        # print("zipres",zipres)
        # if len(zipres)==0:
        #     return False,None,None,None
        # else:
        #     return True,zipres[0],templew,templeh
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_src, pt, (pt[0] + templew, pt[1] + templeh),(7,249,151), 2)   
        cv2.imshow('Detected',img_src)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
def close_and_exit():
    """
    Close all windows and exit safely

    """
    cv2.destroyAllWindows()
    sys.exit(0)
def test_color_block_finder_01():
    '''
    色块识别测试样例1 从图片中读取并且识别
    '''
    # 图片路径
    img_path = "demo-pic.png"
    # 颜色阈值下界(HSV) lower boudnary
    lowerb = (96, 210, 85) 
    # 颜色阈值上界(HSV) upper boundary
    upperb = (114, 255, 231)

    # 读入素材图片 BGR
    img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    # 检查图片是否读取成功
    if img is None:
        print("Error: 请检查图片文件路径")
        exit(1)

    # 识别色块 获取矩形区域数组
    rects = color_block_finder(img, lowerb, upperb)
    # 绘制色块的矩形区域
    canvas = draw_color_block_rect(img, rects)
    # 在HighGUI窗口 展示最终结果
    cv2.namedWindow('result', flags=cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.imshow('result', canvas)

    # 等待任意按键按下
    cv2.waitKey(0)
    # 关闭其他窗口
    cv2.destroyAllWindows()
Example #19
0
def main():
    image=image_pro()
    game=game_body()
    game.gui_start()
    start,end,score=0,1,0
    speech.say("Welcome to swords")
    speech.say("press any key to begin")
    while end:
          start,end=game.start(),game.end()
          #start=game.start()
          #print end
          while start:
              #profile.run('main()')
              time1 = time.time()
              im,posX,posY=image.image_core()
              game.sword_center_draw(im,posX,posY)
              score=game.game_core(posX,posY)
              time2 = time.time()
              #print ((time2-time1)*1000.0),'sec'
              end=game.end()
              start=end
    ''' option for retry'''
    game.gui_end()
    speech.say("final score is "+str(score))
    cv2.destroyAllWindows()
    pygame.quit ()
Example #20
0
def main():
    files = glob.glob("./scans/*.jpg")
    files += glob.glob("./scans/*.jpeg")
    for f in files:
        reset_stats()
        print "Processing: " + f.split("/")[len(f.split("/")) - 1]

        schedule = Schedule()
        schedule.load_data()
        if schedule.get_has_schedule():
            scan_image(f, schedule)

            print "Sheet ok? ",
            while True:
                cv2.imshow("image", cv2.resize(img, (446, 578)))
                cv2.moveWindow("image", 0, 0)
                # user_in = raw_input()
                key = cv2.waitKey(-1)
                if key == ord("y"):
                    print "Sheet ok... Dumping data"
                    dump_stats()
                    os.remove(f)
                    break
                elif key == ord("n"):
                    print "Marking to redo"
                    #os.rename(f, "./scans/redo/" + f.split("/")[len(f.split("/")) - 1])
                    break
                elif key == ord("q"):
                    exit(0)
                else:
                    continue
            cv2.destroyAllWindows()
        else:
            print "Unable to load schedule... Aborting"
Example #21
0
def main():

    for fname in glob("left/*/*/ein/sceneModel/model.yml"):
        print fname

        f = open(fname) 

        lines = []
        # ignore the %YAML:1.0, because the python parser doesn't handle 1.0.
        f.readline() 

        for line in f:
            # for some reason the python parser doesn't like this line either.
            if "background_pose" in line:
                continue
            lines.append(line)
        data = "\n".join(lines)
        
        ymlobject = yaml.load(data)
        #print ymlobject
        scene = ymlobject["Scene"]
        observed_map = GaussianMap.fromYaml(scene["observed_map"])
        image = observed_map.toImage()
        cv2.imwrite("observed.png", image)
        cv2.imshow("observed map", image)
        
        
        
        
        print "observed map: ", observed_map.width, "x", observed_map.height
        dimage = readMatFromYaml(scene["discrepancy_magnitude"])
        cv2.imshow("discrepancy magnitude", dimage)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #22
0
def main():
    cap = cv2.VideoCapture(0)
    disto = cycle(funcs)

    time = 0
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if time % 3 == 0:       #chooses how long to change the distortion
            distfunc = disto.next()
        frm = distfunc(frame)

        # Our operations on the frame come here
        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Display the resulting frame
        cv2.imshow('frame',frm)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        time += int(random.random() * 10)       #add a random element to the distortion

        if time / 50  > 1:
            disto = cycle(funcs)                ## reshuffle the effects so it won't repeat so much.
            time = 0

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def main(argv):
    args = str(sys.argv[1])
    hogParams = {'hitThreshold': -.5, 'scale': 1.05}
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    video = cv2.VideoCapture(args)

    ret, frame = video.read()

    while(ret):

        cimg = np.copy(frame)
        people, w = hog.detectMultiScale(frame, **hogParams)
        filetered = []
        for ri, r in enumerate(people):
            for qi, q in enumerate(people):
                if ri != qi and inside(r, q):
                    print "break"
                    break
            else:
                filetered.append(r)
        # draw_detections(frame, people)
        draw_detections(cimg, filetered, 1)
        cv2.imshow('detected people', cimg)
        cv2.waitKey(2)

        ret, frame = video.read()

    cv2.destroyAllWindows()
    video.release()
Example #24
0
    def run(self):
        self.ratio = 100
        self.resize_im = array([])
        while True:
            ch = 0xFF & cv2.waitKey(50)
            if ch == 27:
                break

            if ch in [ord('s'), ord('S')]:
                if self.resize_im.any():
                    cv2.imwrite(str(self.filename), self.resize_im)
                    print 'Image Saved.'
                    break

            if ch in [ord('a'), ord('A')]:
                newFilename = QFileDialog.getSaveFileName(self, 'Save File As...', os.getenv('HOME'),
                                                          "Images (*.png *.xpm *.jpg)")
                if newFilename:
                    if self.resize_im.any():
                        cv2.imwrite(str(newFilename), self.resize_im)
                        print 'Image Saved as ' + newFilename
                        break

            if ch in [ord('r'), ord('R')]:
                ratio, ok = QInputDialog.getText(self, 'Set Image Ratio', 'Type ratio value in % (ex: 80): ')
                if ok:
                    if int(ratio) > 0 and int(ratio) <= 100:
                        self.ratio = int(ratio)
                        self.resizeImage()
                    else:
                        print 'Image scale must be between 1 and 100'

        cv2.destroyAllWindows()
Example #25
0
def main():
	# version 3.0.0
	# version 2.4.11
	print cv2.__version__

	imgPlate = cv2.imread('plate_judge.jpg',cv2.IMREAD_COLOR)

	PlateLocater.m_debug = False
	Result = PlateLocater.fuzzyLocate(imgPlate)

	print type(Result)
	print '候选车牌数量:',len(Result)
	print Result[0].shape

	platesJudge(Result)

	# imgGray = cv2.cvtColor(imgPlate,cv2.COLOR_BGR2GRAY)
	# cv2.imshow('src',imgGray)
	# imgEqulhist = cv2.equalizeHist(imgGray)
	# cv2.imshow('equal',imgEqulhist)
	cv2.waitKey(0)
	cv2.destroyAllWindows()


	# box = cv2.boxPoints(mr)  # if you are use opencv 3.0.0
	# box = cv2.cv.boxPoints(mr) # if your are using opencv 2.4.11

	# svm 参考
	# http://answers.opencv.org/question/5713/save-svm-in-python/
	#

	# 遇到的问题
	# http://answers.opencv.org/question/55152/unable-to-find-knearest-and-svm-functions-in-cv2/

	return None
Example #26
0
def main():
    cv.NamedWindow("original", cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow("keyboard", cv.CV_WINDOW_AUTOSIZE)

    cam = cv2.VideoCapture(0)
    cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 600)
    cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    cam.set(cv.CV_CAP_PROP_FPS, 24)
    ret, frame = cam.read()

    player = sound.SoundPlayer('./resources/s1.sf2')
    
    while True:
        ret, frame = cam.read()

        keyboard_image = KeyboardRecognizer(frame).get_keyboard()
        if keyboard_image is not None:
            notes = KeysRecognizer(keyboard_image, 'keyboard').get_pressed_keys()
            player.play_notes(notes)

        cv2.imshow('original', frame)
        
        if cv2.waitKey(1) == 27:  # Escape code
            break

    cv2.destroyAllWindows()
Example #27
0
def main(camera_index=0, scale_down_ratio=1, scale_up_ratio=None):
    if scale_up_ratio is None:
        scale_up_ratio = 1 / scale_down_ratio
    left_controller = MultiTrackbarWindow("Left Tracker", [{"name": "H"}, {"name": "S"}, {"name": "V"}])
    right_controller = MultiTrackbarWindow("Right Tracker", [{"name": "H"}, {"name": "S"}, {"name": "V"}])
    # Declare the interface through which the camera will be accessed
    camera = cv2.VideoCapture(camera_index)
    while True:
        # Grab the current frame
        able_to_retrieve_frame, frame = camera.read()
        # Get a boolean value determining whether a frame was successfully grabbed
        # from the camera and then the actual frame itself.
        if not able_to_retrieve_frame:
            print("Camera is not accessible. Is another application using it?")
            print("Check to make sure other versions of this program aren't running.")
            break

        # Resize the frame, blur it, and convert it to the HSV color space.
        resized = cv2.resize(frame, (0, 0), fx=scale_down_ratio, fy=scale_down_ratio)
        blurred = cv2.GaussianBlur(resized, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        # cv2.imshow("HSV", hsv)
        find_agent(hsv, frame, left_controller, right_controller, scale_up_ratio)
        # Get the value for the key we entered with '& 0xFF' for 64-bit systems
        key = cv2.waitKey(1) & 0xFF
        # Stop the program if the key was q/Q
        if key == ord('q') or key == ord('Q'):
            break

    # cleanup the camera and close any open windows
    camera.release()
    cv2.destroyAllWindows()
Example #28
0
def locate_thumbnail(thumbnail_filename, source_filename, display=False, save_visualization=False,
                     save_reconstruction=False, reconstruction_format="jpg"):
    thumbnail_basename, thumbnail_image = open_image(thumbnail_filename)
    source_basename, source_image = open_image(source_filename)

    logging.info("Attempting to locate %s within %s", thumbnail_filename, source_filename)
    kp_pairs = match_images(thumbnail_image, source_image)

    if len(kp_pairs) >= 4:
        title = "Found %d matches" % len(kp_pairs)
        logging.info(title)

        H, mask = find_homography(kp_pairs)

        new_thumbnail, corners, rotation = reconstruct_thumbnail(thumbnail_image, source_image, kp_pairs, H)

        print(json.dumps({
            "master": {
                "source": source_filename,
                "dimensions": {
                    "height": source_image.shape[0],
                    "width": source_image.shape[1],
                }
            },
            "thumbnail": {
                "source": thumbnail_filename,
                "dimensions": {
                    "height": thumbnail_image.shape[0],
                    "width": thumbnail_image.shape[1],
                }
            },
            "bounding_box": {
                "height": corners[0][1] - corners[0][0],
                "width": corners[1][1] - corners[1][0],
                "x": corners[1][0],
                "y": corners[0][0],
            },
            "rotation_degrees": rotation
        }))

        if save_reconstruction:
            new_filename = "%s.reconstructed.%s" % (thumbnail_basename, reconstruction_format)
            cv2.imwrite(new_filename, new_thumbnail)
            logging.info("Saved reconstructed thumbnail %s", new_filename)
    else:
        logging.warning("Found only %d matches; skipping reconstruction", len(kp_pairs))
        new_thumbnail = corners = H = mask = None

    if display or save_visualization:
        vis_image = visualize_matches(source_image, thumbnail_image, new_thumbnail, corners, kp_pairs, mask)

    if save_visualization:
        vis_filename = "%s.visualized%s" % os.path.splitext(thumbnail_filename)
        cv2.imwrite(vis_filename, vis_image)
        logging.info("Saved match visualization %s", vis_filename)

    if display:
        cv2.imshow(title, vis_image)
        cv2.waitKey()
        cv2.destroyAllWindows()
Example #29
0
def main():
    imgOriginal = cv2.imread(r'C:\Users\dbsnail\ImageFolder\images.jpg')               # open image

    if imgOriginal is None:                             # if image was not read successfully
        print "error: image not read from file \n\n"        # print error message to std out
        os.system("pause")                                  # pause so user can see error message
        return                                              # and exit function (which exits program)
    
    imgGrayscale = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY)        # convert to grayscale

    imgBlurred = cv2.GaussianBlur(imgGrayscale, (5, 5), 0)              # blur
    
    imgCanny = cv2.Canny(imgBlurred, 100, 200)                          # get Canny edges

    cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE)        # create windows, use WINDOW_AUTOSIZE for a fixed window size
    cv2.namedWindow("imgCanny", cv2.WINDOW_AUTOSIZE)           # or use WINDOW_NORMAL to allow window resizing

    cv2.imshow("imgOriginal", imgOriginal)         # show windows
    cv2.imshow("imgCanny", imgCanny)

    cv2.waitKey()                               # hold windows open until user presses a key

    cv2.destroyAllWindows()                     # remove windows from memory

    return
	# clear the stream in preparation for the next frame and update
	# the FPS counter
	rawCapture.truncate(0)
	fps.update()
 
	# check to see if the desired number of frames have been reached
	if i == args["num_frames"]:
		break
 
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
 
# do a bit of cleanup
cv2.destroyAllWindows()
stream.close()
rawCapture.close()
camera.close()

# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
 
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
	# grab the frame from the threaded video stream and resize it
	# to have a maximum width of 400 pixels
def hueFinder(image, verbosity=0):
	"""Given an image of a fruit, it finds the center of the fruit and draws a radius from the center to approximate the hue."""
	image_bw = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
	th1 = cv2.adaptiveThreshold(image_bw.copy(), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
	image_copy = image.copy()

	radMin = 999
	dpMin =0
	for dp in range(1,10,1):
		if(verbosity>1):
			image = image_copy.copy()
		circ = cv2.HoughCircles(image_bw.copy(), cv2.HOUGH_GRADIENT, dp, minDist = 400, minRadius=80)
		if(circ is not None):
			for c in circ:
				x,y,r =c[0].astype("int")
				if(radMin>r and r<200):
					radMin=r
					dpMin=dp
				if(verbosity>1):
					print(dp)
					cv2.circle(image,(x,y),r,(0,255,0),2)
					showImage(image,title=str(dp),waitTime=500)
		else:
			if(verbosity>1):
				print("Helllo",dp)
	if(verbosity>1):
		image = image_copy.copy()

	circ = cv2.HoughCircles(image_bw.copy(), cv2.HOUGH_GRADIENT, dpMin, minDist = 400, minRadius=80)

	if(circ is not None):
		imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
		x,y,r = circ[0,0].astype("int")
		print(radMin)
		if(radMin>110):
			radMin=70
		hues = []
		values = []
		imageMasked = np.zeros(imageHSV.shape[:2])
		for i in range(0,imageHSV.shape[0]):
			for j in range(0,imageHSV.shape[1]):
				dx = i-y
				dy = j-x
				if (((dx**2)+(dy**2)) <= (radMin-10)**2) and imageHSV[i][j][0]<60 and imageHSV[i][j][0]>23:
					imageMasked[i][j]=imageHSV[i][j][0]
					#if(imageHSV[i][j][2]<200):
					hues.append(imageHSV[i][j][0])
					values.append(imageHSV[i][j][2])

		if(verbosity>0):
			# showImage(imageMasked, title="Masked Image", waitTime = 5000)
			plt.imshow(imageMasked)
			plt.colorbar()
			plt.show()

		return ("GREEN" if (0.26307*np.mean(values) + (-1.76579)*np.mean(hues))<(-0.00985) else "YELLOW", np.mean(hues), np.mean(values))

	else:
		cv2.destroyAllWindows()
		return ("UNKNOWN",-1,-1)