示例#1
1
def main():
   global preview

   parse_args(sys.argv[1:])
   cam = PiCamera()
   cam.rotation = -90
   cam.resolution = (width, height)
   cam.framerate = 60
   rawCapture = PiRGBArray(cam, size=(width, height))

   startTime = time.time()
   endTime = time.time() + capTime
   frames = 0

   for image in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
      if image is None:
         print 'capture failed'
         break
      frames += 1
      frame = image.array

      if preview: 
         cv2.imshow("frame", frame)
         key = cv2.waitKey(1) & 0xFF


      if time.time() > endTime:
         break
      rawCapture.truncate(0)

   print "Average Framerate for " + str(frames) + \
      " frames was: " + str(float(frames) / capTime) + "fps"
   cv2.destroyAllWindows()
示例#2
0
        def runCam():
                # initialize the camera and grab a reference to the raw camera capture
                camera = PiCamera()
                camera.resolution = (640, 480)
                camera.vflip = True
                camera.framerate = 72
                rawCapture = PiRGBArray(camera, size=(640, 480))

                camera.start_preview()

                time.sleep(0.1)

                # capture frames from the camera
                for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                        # grab the raw NumPy array representing the image, then initialize the timestamp
                        # and occupied/unoccupied text
                        image = frame.array
                 
                        # show the frame
                        cv2.imshow("Frame", image)
                        key = cv2.waitKey(1) & 0xFF
                 
                        # clear the stream in preparation for the next frame
                        rawCapture.truncate(0)
                 
                        # if the `q` key was pressed, break from the loop
                        if key == ord("q"):
                                break
class PiVideoStream:
	def __init__(self, resolution=(320, 240), framerate=32):
		# initialize the camera and stream
		self.camera = PiCamera()
		self.camera.resolution = resolution
		self.camera.framerate = framerate
		self.rawCapture = PiRGBArray(self.camera, size=resolution)
		self.stream = self.camera.capture_continuous(self.rawCapture,
			format="bgr", use_video_port=True)

		# initialize the frame and the variable used to indicate
		# if the thread should be stopped
		self.frame = None
		self.stopped = False
	
	def __init__(self, resolution=(320, 240), framerate=32, rotate=0):
		# initialize the camera and stream
		self.camera = PiCamera()
		self.camera.rotation = rotate
		self.camera.resolution = resolution
		self.camera.framerate = framerate
		self.rawCapture = PiRGBArray(self.camera, size=resolution)
		self.stream = self.camera.capture_continuous(self.rawCapture,
			format="bgr", use_video_port=True)

		# initialize the frame and the variable used to indicate
		# if the thread should be stopped
		self.frame = None
		self.stopped = False

	def start(self):
		# start the thread to read frames from the video stream
		t = Thread(target=self.update, args=())
		t.daemon = True
		t.start()
		return self

	def update(self):
		# keep looping infinitely until the thread is stopped
		for f in self.stream:
			# grab the frame from the stream and clear the stream in
			# preparation for the next frame
			self.frame = f.array
			self.rawCapture.truncate(0)

			# if the thread indicator variable is set, stop the thread
			# and resource camera resources
			if self.stopped:
				self.stream.close()
				self.rawCapture.close()
				self.camera.close()
				return

	def read(self):
		# return the frame most recently read
		return self.frame

	def stop(self):
		# indicate that the thread should be stopped
		self.stopped = True
示例#4
0
文件: main3.py 项目: caygen/PoliceDC
class CameraThread (threading.Thread):
    def __init__(self):
    	threading.Thread.__init__(self)
        # Camera setup
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        self.camera.vflip = True
        self.camera.hflip = True
        self.camera.video_stabilization = True
        
        # Camera warmup
        time.sleep(0.1)
        
        # Camera configuration
        self.camera.exposure_mode = 'off'
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = 1.5
        self.camera.iso = 100
        self.camera.shutter_speed = 6660

        
    def run(self):
        global ALLSTOP
        global target
        global last_seen
        global last_time

        print "Camera thread start"
        

        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # Grab frame
    	    big = frame.array
    	    image = cv2.resize(big, (res_var, int(big.shape[0]*res_var/big.shape[1])), interpolation = cv2.INTER_AREA)
            image = image[30:127, :] #img[y:y+h, x:x+w]
            
            # Cleanup
            self.rawCapture.truncate(0)
            
    	    # Get HSV image
    	    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            
            # Color Filter
            target.there, coords, target.where, image = ColorFilter2(image, hsv, [red_lower, blue_lower], [red_upper, blue_upper], (0,255,0), minArea)
            
            if target.there:
                if target.where < center_range and target.where > -center_range:
                    last_seen = "center"
                elif target.where <= center_range:
                    last_seen = "right"
                elif target.where >= -center_range:
                    last_seen = "left"
                last_time = time.clock()
            
            #print "Target", target.there, last_time, last_seen

            if ALLSTOP:
                 break
def rpiCapture(opts):

    global _frame
    global _frameCounter

    # initialize the camera and grab a reference to the raw camera capture
    camera= PiCamera()
    camera.resolution = CAPTURE_RES 
    camera.framerate = 4 
    rawCapture = PiRGBArray(camera, size=CAPTURE_RES)
    
    # allow the camera to warmup
    time.sleep(0.1)
     
    # capture frames from the camera
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True) :
	# grab the raw NumPy array representing the image, then initialize the timestamp
	# and occupied/unoccupied text
        with _frameLock :
            #copy frame handled by video capturing structure, so it won't be modified by next captures
            _frame = numpy.copy(frame.array)
            if(_frameCounter == 0) :
                initStream(frame, opts)
            _frameCounter += 1

	    # clear the stream in preparation for the next frame
        rawCapture.truncate(0)    
        
        if _stopThreads :
            return
示例#6
0
class image_publisher:

  def __init__(self):

    self.camera = PiCamera()
    self.camera.resolution = (1024, 768) #(1920, 1080)
    self.camera.ISO = 100
    self.camera.sa = 100
    self.camera.awb = "flash"
    self.camera.co = 100
    self.rawCapture = PiRGBArray(self.camera)
    
    time.sleep(0.1)

    self.image_pub = rospy.Publisher("image_topic",Image)
    self.small_image_pub = rospy.Publisher("small_image_topic",Image)
    self.bridge = CvBridge()

  def publishImage(self):
    for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
        try:
          cv_image = frame.array
        except CvBridgeError as e:
          print(e)
	#cv_image = (cv_image * 0.5.astype(umpy.uint8)
        self.rawCapture.truncate(0)
        try:
          self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
          small = cv2.resize(cv_image, (0,0), fx=0.5, fy=0.5)
          self.small_image_pub.publish(self.bridge.cv2_to_imgmsg(small, "bgr8"))
        except CvBridgeError as e:
          print("problem")
          print(e)
示例#7
0
class ThePiCamera(Camera):
    """
    Class for Raspberry Pi camera frame retrieval.
    """
    def __init__(self):
        """
        Constructor for ThePiCamera. This initializes parameters for Raspberry
        Pi camera capture.
        """
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        time.sleep(0.1) # allow the camera to warm up

    def get_iterator(self):
        """
        Returns an iterator for obtaining a continuous stream of camera frames.
        """
        return self.camera.capture_continuous(self.rawCapture, format='bgr', use_video_port=True)

    def get_frame(self, raw_frame):
        """
        Retrieves the camera frame returned by the iterator, converted into a
        2D array.
        """
        array = raw_frame.array
        self.rawCapture.truncate(0)
        return array

    def destroy(self):
        """
        Cleans up memory used for Raspberry Pi camera capture.
        """
        self.camera.close()
示例#8
0
class VideoThread(Thread):
	def __init__(self):
		Thread.__init__(self)
		self.camera = picamera.PiCamera()
		self.camera.vflip = True
		self.camera.framerate = 30
		self.camera.resolution = (640, 480)
		self.camera.use_video_port = True
		self.camera.quality = 85

		self.rawCapture = PiRGBArray(self.camera, size=(640, 480))

	def run(self):
		time.sleep(1) # warm up the camera

		for frame in camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
			image = frame.array

			cv2.imshow("Frame", image)
			key = cv2.waitKey(1) & 0xFF

			self.rawCapture.truncate(0)

			if key == ord("q"):
				break
示例#9
0
class FrameGrabber(threading.Thread):
    def __init__(self, frameQueue, angleQueue):
        threading.Thread.__init__(self)
        self.frameQueue = frameQueue
        self.angleQueue = angleQueue
        self.camera = PiCamera()
        self.camera.hflip = CAMERA_HFLIP
        self.camera.vflip = CAMERA_VFLIP          
        self.camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
        self.camera.framerate = CAMERA_FRAMERATE
        self.rawCapture = PiRGBArray(self.camera, size=(CAMERA_WIDTH, CAMERA_HEIGHT))
        self.startCaptureTime = time.time()
        self.sc = ServoController()

    def run(self):
        # allow the camera to warmup
        runtime = time.time()
        sleeptime = min(runtime - self.startCaptureTime,1)
        time.sleep(sleeptime)

        # capture frames from the camera
        while not shutdownEvent.isSet():
            angle = self.angleQueue.get(block=True)
            self.sc.setCameraAngle(angle)
            ts = time.time()
            self.camera.capture(self.rawCapture, format='bgr', use_video_port=True)
            self.frameQueue.put((ts,self.rawCapture.array,angle))
            self.rawCapture.truncate(0)
示例#10
0
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        #self.video = cv2.VideoCapture(0)
        self.camera = PiCamera()
        #self.rawCapture = PiRGBArray(self.camera)
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
 
        
        # allow the camera to warmup
        time.sleep(0.1)

        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
    
    def __del__(self):
        #self.video.release()
        pass
    
    def get_frame(self):
        # grab an image from the camera
        self.camera.capture(self.rawCapture, format="bgr")
        image = self.rawCapture.array
        self.rawCapture.truncate(0) 
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tostring()
def handle_get_bubblescope_properties(req):
    with PiCamera() as camera:
        # Camera settings
        # I don't know what they mean
        camera.resolution = (img_width,img_height)
        camera.ISO = 100
        camera.sa = 100
        camera.awb = "flash"
        camera.co = 100

        raw_capture = PiRGBArray(camera)

        time.sleep(0.1)

        # Before doing anything, capture the first frame and find the inner circle
        camera.capture(raw_capture, format="bgr")
        image = raw_capture.array
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        circles = cv2.HoughCircles(image_gray, cv2.cv.CV_HOUGH_GRADIENT, 1, 200, param1=50, param2=40, minRadius=10, maxRadius=180)
        raw_capture.truncate(0)

        if circles is not None:
            x, y, r = circles[0][0]

            res = GetBubblescopePropertiesResponse()
            res.center = (x,y)
            res.inner_radius = r*1.2
            res.outer_radius = r*2.25
            return res
        else:
            print "no circle found\n"
            return GetBubblescopePropertiesResponse()
示例#12
0
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        # self.video = cv2.VideoCapture(0)
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
        # --------------- ARW -------------
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        # Get a generator object that serves up the frames
        self.frame_gen = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)
         
    def __del__(self):
#        self.video.release()
        pass

    def get_frame(self):
        #success, image = self.video.read()
        #   We are using Motion JPEG, but OpenCV defaults to capture raw images,
        #   so we must encode it into JPEG in order to correctly display the
        #   video stream.
        #ret, jpeg = cv2.imencode('.jpg', image)
        #return jpeg.tobytes()
        # --------------- ARW -------------
        frame = self.frame_gen.next()                   # get next frame
        image = frame.array
        ret, jpeg = cv2.imencode('.jpeg', image )       # jpeg to buffer
        self.rawCapture.truncate(0)                     # clear stream in prep for next frame
        return jpeg.tobytes()
示例#13
0
def video_thread():
    # enable the thread to modify the global variable 'latest_video_frame': (this variable will be accessed by functions doing some sort of video analysis or video streaming)
    global latest_video_frame   
    
    # create an instance of the RPI camera class:
    camera = PiCamera() 
    
    # rotate the camera view 180 deg (I have the RPI camera mounted upside down):
    camera.hflip = True
    camera.vflip = True 
    
    # set resolution and frame rate:
    camera.resolution = (640, 480)
    camera.framerate = 30
    
    # create a generator 'video_frame_generator' which will continuously capture video frames 
    # from the camera and save them one by one in the container 'generator_output': ('video_frame_generator' is an infinte iterator which on every iteration (every time 'next()' is called on it, like eg in a for loop) gets a video frame from the camera and saves it in 'generator_output'))  
    generator_output = PiRGBArray(camera, size=(640, 480))
    video_frame_generator = camera.capture_continuous(generator_output, format="bgr", use_video_port=True)
    
    # allow the camera to warm up:
    time.sleep(0.1)
    
    for item in video_frame_generator:
        # get the numpy array representing the latest captured video frame from the camera
        # and save it globally for everyone to access:
        latest_video_frame = generator_output.array 
        
        # clear the output container so it can store the next frame in the next loop cycle:
        # (please note that this is absolutely necessary!)
        generator_output.truncate(0)        
        
        # delay for 0.033 sec (for ~ 30 Hz loop frequency):
        time.sleep(0.033) 
示例#14
0
class CamThread(Thread):
	""" Thread to continuously read incoming frames from the raspberry pi camera
		and send those frames to all the clients connected to the server.
	"""
	def __init__(self):
		print 'init CamThread'
		self.cam = PiCamera()
		self.cam.resolution = (480,320)
		self.cam.framerate = 25
		self.rawCap = PiRGBArray(self.cam)
		# Wait for the webcam to open
		sleep(0.2)

		self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

		Thread.__init__(self)
		self.daemon = True
		self.running = True

	def run(self):
		print 'CamThread running'
		for frame in self.cam.capture_continuous(self.rawCap, format='bgr', use_video_port=True):
			if not self.running:
				break

			img = self.rawCap.array
			rows, cols, channels = img.shape

			if rows > 0 and cols > 0:
				# 180 degrees rotation
				img = self.rotate(img, 180)

				# Serialize and send
				img_ser = self.encode(img, '.jpg')
				for c in connected_clients:
					self.send(img_ser, (c, CLIENT_PORT))
			# Clear the camera buffer begore grabbing the next frame
			self.rawCap.truncate(0)
		self.sock.close()
		self.cam.close()

	def encode(self, img, ext='.jpg'):
		return cv2.imencode(ext, img)[1].tostring()

	def rotate(self, frame, angle):
		rows, cols, channels = frame.shape
		RotMat = cv2.getRotationMatrix2D((cols/2, rows/2), angle, 1)
		return cv2.warpAffine(frame, RotMat, (cols, rows))

	def send(self, data, addr):
		# Send the data splitted in oackets of 1024 bytes
		while len(data) > 1024:
			self.sock.sendto(data[:1024], addr)
			data = data[1024:]
		if len(data) > 0:
			self.sock.sendto(data, addr)

	def stop(self):
		self.running = False
示例#15
0
def make_photo():
    timestamp = int(time())
    camera.resolution = (1920,1080)
    rawCapture = PiRGBArray(camera)
    camera.capture('photo_'+str(timestamp)+'.png')
    rawCapture.truncate(0)
    camera.resolution = (640, 480)
    rawCapture = PiRGBArray(camera)
示例#16
0
class CameraThread (threading.Thread):
    def __init__(self):
    	threading.Thread.__init__(self)
        ## Camera
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        
        # Camera warmup
        time.sleep(0.1)

        # Init red & blue threads
        redThread = RedFilterThread(red_lower, red_upper, "red")
        redThread.daemon = True
        redThread.start()
        blueThread = BlueFilterThread(blue_lower, blue_upper, "blue")
        blueThread.daemon = True
        blueThread.start()
        
    def run(self):
        global ALLSTOP
        global targetPos
        global hasTarget
        global image
        global hsv

        print "Camera thread is running"

        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # Grab frame
    	    image = cv2.flip(frame.array,0)
    	    image = cv2.resize(image, (100, int(image.shape[0]*100/image.shape[1])), interpolation = cv2.INTER_AREA)

    	    # Get HSV image
    	    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            
            # Cleanup
            self.rawCapture.truncate(0)

            # Compare
            targetPosList = list(set(targetPos_red).intersection(targetPos_blue))
            if len(targetPosList) > 0:
                targetPos = targetPosList[0]
                hasTarget = True
                print "Match at", targetPos
            else:
                targetPos = 0
                hasTarget = False
                print "No match"
                print "Red", targetPos_red
                print "Blue", targetPos_blue

            if ALLSTOP:
                 break

            time.sleep(1)
示例#17
0
class OpenCVTEST:
    def __init__(self):
        # initialize the camera and grab a reference to the raw camera capture
        factor = 1.5
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        # allow the camera to warmup
        time.sleep(0.1)
        cv2.namedWindow("Video")

        self.pictureBuffer = deque()

        self.currentPicture = None
        self.firstPicture = None

    def showPicture(self, picture):
        cv2.imshow("Video", picture)

    def addPicture(self, picture):
        self.pictureBuffer.append(picture)

    def getCurrentPicture(self):
        return self.currentPicture

    def getPicture(self):
        return self.pictureBuffer.popleft()

    def forePlay(self):
        number = 0
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            self.currentPicture = frame.array
            self.addPicture(self.currentPicture)
            self.rawCapture.truncate(0)

            number += 1
            if number >= 100:
                break
        self.play()

    def play(self):
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            self.currentPicture = frame.array
            self.addPicture(self.currentPicture)
            self.showPicture(self.getPicture())
            self.rawCapture.truncate(0)

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        print("Bye bye!")
示例#18
0
def make_video():
    timestamp = int(time())
    camera.resolution = (1920,1080)
    rawCapture = PiRGBArray(camera)
    camera.start_recording('video_'+str(timestamp)+'.h264')
    sleep(10)
    camera.stop_recording()
    rawCapture.truncate(0)
    camera.resolution = (640, 480)
    rawCapture = PiRGBArray(camera)
示例#19
0
class PiVideoStream:
    def __init__(self, resolution=(640, 480), framerate=48):
        # initialize the camera and stream
        self.camera = PiCamera()
        #self.camera.video_stabilization = True
        self.camera.resolution = resolution
        self.camera.framerate = framerate
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
            format="bgr", use_video_port=True)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
        
    # Settings for consistent images capture
    # Must be called some times after the camera is started
    def consistent(self):
        self.camera.shutter_speed = self.camera.exposure_speed
        self.camera.exposure_mode = 'off'
        g = self.camera.awb_gains
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = g
示例#20
0
	def connect(self):
		sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

		server_address = (conf.ip, conf.videoport)

		sock.connect(server_address)

		# initialize the camera and grab a reference to the raw camera capture
		camera = PiCamera()
		camera.resolution = (640, 480)
		camera.framerate = 32
		rawCapture = PiRGBArray(camera, size=(640, 480))

		# allow the camera to warmup
		time.sleep(0.1)

		frm = 0

		# capture frames from the camera
		for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
			# grab the raw NumPy array representing the image, then initialize the timestamp
			# and occupied/unoccupied text
			image = frame.array

			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

			gray = cv2.flip(gray,0)
			gray = cv2.flip(gray,1)

			frm = frm + 1
			if (frm >= 256):
			   frm = 0

			data = np.zeros((640), dtype=np.uint8)
			data[0] = data[1] = data[2] = data[3] = data[5] = 32
			sent = sock.sendto(data, server_address)

			# for i in range(1,480):
			#    data = gray[i,:]
			#    sent = sock.sendto(data, server_address)

			data = gray.reshape(640*480,1)
		   	sent = sock.sendto(data, server_address)


			#cv2.imshow("My Image", gray)

			if cv2.waitKey(1) & 0xFF == ord('q'):
			  break

			# clear the stream in preparation for the next frame
			rawCapture.truncate(0)

			if (self.keeprunning == False):
			  break
示例#21
0
class PiVideoStream:
    def __init__(self, resolution=(320, 240), format = "bgr", framerate=30):
        # initialize the camera and stream
        self.camera = PiCamera()
        time.sleep(.2) #let camera init
        self.camera.resolution = resolution
        self.camera.framerate = framerate
        self.camera.framerate = 30
        self.camera.sharpness = -100
        self.camera.contrast = 50
        self.camera.saturation = 100
        self.camera.brightness = 40
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = (1.29, 1.44)
        self.camera.shutter_speed = 800
        self.camera.exposure_mode = 'night'
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
            format=format, use_video_port=True)
        time.sleep(.2) #let camera init

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # flag thread to stop
        self.stopped = True
示例#22
0
    def run(self):
        try:
            with picamera.PiCamera() as camera:
                camera.resolution = (self.CAMERA_WIDTH, self.CAMERA_HEIGHT)
                camera.framerate = 32
                rawCapture = PiRGBArray(camera, size=(self.CAMERA_WIDTH, self.CAMERA_HEIGHT))

                time.sleep(0.1)
                # Now fix the values
                camera.shutter_speed = camera.exposure_speed
                camera.exposure_mode = 'off'
                g = camera.awb_gains #TODO: make this fixed?
                camera.awb_mode = 'off'
                camera.awb_gains = g

                for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                    last_time = time.time()

                    image = frame.array
                    rawCapture.truncate(0)
                    if self.hsv:
                        hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
                        hsvImage = cv2.resize(hsvImage, (len(image[0]) / self.scale_down, len(image) / self.scale_down))
                        green_color_mask, blue_color_mask = self.run_hsv(hsvImage, self.callback)
                        cv2.imshow("hsv_image", hsvImage)
                    else:
                        self.run_bgr(image, self.callback)
                        # cv2.imshow("rgb_image", image)


                    # green_color_mask = cv2.inRange(image, self.green_lower_dark_bgr, self.green_upper_dark_bgr)
                    # cv2.imshow("green_color_mask", green_color_mask)
                    #
                    # cv2.waitKey(1)

                    # stop thread
                    if self._stopped():
                        self.__simLogger.debug("Stopping camera thread")
                        break

                    # sleep if processing of camera image was faster than minimum frames per second
                    sleep_time = float(MIN_FPS - (time.time() - last_time))
                    print "sleep time: ", sleep_time
                    if sleep_time > 0:
                        time.sleep(sleep_time)
            cv2.destroyAllWindows()
        except Exception as e:
            self.error_callback()
            print "Error: ", str(e), str(sys.exc_info()[0]) + ' - ' + traceback.format_exc()
            # self.__simLogger.critical("Camera exception: " + str(e) + str(
            # sys.exc_info()[0]) + ' - ' + traceback.format_exc())
        except (KeyboardInterrupt, SystemExit):
            self.error_callback()
            raise
示例#23
0
文件: navim.py 项目: halden718/quad
def navigateImg():
    moveTime = 0.5
    sleepTime = 2
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))
    e1 = 20.00000
    e2 = 20.00000
    time.sleep(0.1)
    dst = (320, 240)
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        try:
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array
            w = 0
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            lower_blue = np.array([100,50,50])
            upper_blue = np.array([120,255,255])
            mask = cv2.inRange(hsv, lower_blue, upper_blue)
            loc = getCenter(mask)
            print(dst[1]-loc[1])
            print(dst[0]-loc[0])
            if(abs(dst[1]-loc[1]) > e1):
                if(dst[1] > loc[1]):
                    print('b')
                    backwards(20+int((dst[1]-loc[1])/3))
                else:
                    print('f')
                    forwards(20-int((dst[1]-loc[1])/3))
            else:
                print('fb good')
                w = w + 1
            if(abs(dst[0]-loc[0]) > e2):
                if(dst[0] > loc[0]):
                    print('l')
                    left(20+int((dst[0]-loc[0])/3))
                else:
                    print('r')
                    right(20-int((dst[0]-loc[0])/3))
            else:
                print('lf good')
                w = w + 1
            if w == 2:
                print('drop')
            time.sleep(moveTime)
            zero()
            time.sleep(sleepTime)
            rawCapture.truncate(0)
        except OSError:
            print('io')
示例#24
0
class VideoStream(object):
    def __init__(self):
        self.camera = PiCamera()
        self.camera.resolution = (1920, 1080)
        self.preview_window = (0, 0, 640, 480)
        self.camera.framerate = 15
        self.stream = None
        self.rgb_array = PiRGBArray(self.camera, size=self.camera.resolution)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped

        self.frame = None
        self.preview_stopped = False
        self.capture_stopped = False

    def capture_hi_res(self):
        self.stream = self.camera.capture_continuous(self.rgb_array, use_video_port=True, splitter_port=2, format='bgr')
        Thread(target=self.capture_raw, args=()).start()

    def capture_raw(self):

        for f in self.stream:
            self.frame = f.array
            self.rgb_array.truncate(0)

            if self.capture_stopped:
                self.stream.close()
                self.rgb_array.close()
                self.camera.close()


    def stop_capture(self):
        self.capture_stopped = True

    def preview_stream(self):

        # start the thread to read frames from the video stream
        Thread(target=self.preview, args=()).start()

    def preview(self):
        self.camera.start_preview(fullscreen=False, window=self.preview_window)

        if self.preview_stopped:
            self.camera.stop_preview()

    def read(self):
        return self.frame

    def stop_preview(self):
        self.preview_stopped = True
示例#25
0
def camera(fov_v, fov_h, threshold, landing_area_x, landing_area_y):
    #make the bitstream for the radio message
    radio_stream = bitstream.BitStream()

    # init camera and get a reference to the raw capture
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))

    #time for the camera to warm up which is apparently a thing, and we'd probably only have to do it once rather than every picture
    time.sleep(0.1)

    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        #get the data from the drone
        altitude, drone_coords, heading = read_drone_data()

        #get an image from the camera
        edges = frame.array #TODO FROM CAMERA
        rawCapture.truncate(0)

        #do edge detection on the image
        minVal, maxVal = find_canny_thresholds(altitude)  #TODO something about these minVal, maxVal based on altitude
        edges = cv2.Canny(edges, minVal, maxVal)
        rows, cols = edges.shape

        #create the decision grid
        decision_grid, grid_cols, grid_rows, grid_pix_x, grid_pix_y = create_decision_grid(altitude, fov_h, rows, cols, landing_area_x, landing_area_y)

        #create the radio message to be sent
        radio_stream.write(altitude, np.uint8) #1 byte total
        radio_stream.write(heading, np.uint16) #3 bytes total
        radio_stream.write((grid_rows, grid_cols), np.uint16) #7 bytes total
        radio_stream.write(drone_coords, float) #23 bytes total

        for row in xrange(grid_rows):
            for col in xrange(grid_cols):
                decision_grid[row][col] = len(np.where(edges[row*grid_pix_y:row*grid_pix_y+grid_pix_y, col*grid_pix_x:col*grid_pix_x+grid_pix_x] > 0)[0])
                if decision_grid[row][col] >= threshold:
                    radio_stream.write(1, bool)
                else:
                    radio_stream.write(0, bool)

        #radio can only handle a byte as the smallest unit of information, so we have to pad the signal
        pad = 8 - ((grid_rows * grid_cols) % 8)
        if pad == 8:
            pad = 0
        for i in xrange(pad):
            radio_stream.write(0, bool)

        message = radio_stream.read(str, 23 + (grid_rows*grid_cols + pad)/8)
示例#26
0
class Camera(threading.Thread):

	def __init__(self):
		
		
		self.image = []
		self.angle = 0
		
		# initialize the camera and grab a reference to the raw camera capture
		self.camera = PiCamera()
		self.camera.resolution = (640, 480)
		self.camera.framerate = 32
		self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
		time.sleep(0.1)

		#Set up thread
		threading.Thread.__init__(self)
		self.threadID = 100
		self.name = "camera"
		#Thread exits when sent to True
		self.exitFlag = False
	 
	def run(self):
		# capture frames from the camera
		for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
			# grab the raw NumPy array representing the image, then initialize the timestamp
			# and occupied/unoccupied text
			self.image = frame.array
		 
			# show the frame
			# cv2.imshow("Frame", image)
			gb_lane_cfg = LaneCfg
			proc = {'houghlinesP':detect_lane_over_houghlinesP}
			self.angle = proc[gb_lane_cfg['set']['proc']](gb_lane_cfg['set']['proc'], self.image, gb_lane_cfg)


			#utilities.log("angle " + str(self.angle))
		 
			# clear the stream in preparation for the next frame
			self.rawCapture.truncate(0)
		 
			# if the `q` key was pressed, break from the loop
			if self.exitFlag == True:
				break
		
		self.camera.close()

	def stop(self):
		print("stoping camera")
		self.exitFlag = True
示例#27
0
def camera_loop():
    use_pi = False
    try:
        from picamera import PiCamera
        from picamera.array import PiRGBArray
        use_pi = True
    except OSError:
        print('Couldn\'t load picamera, using OpenCV video capture')
        use_pi = False

    if use_pi:
        print('Opening camera')
        camera = PiCamera()
        rawCapture = PiRGBArray(camera)

        camera.resolution = (640, 480)
        time.sleep(1)

        camera.start_preview()
        camera.awb_mode = 'off'
        camera.exposure_mode = 'off'

        # use initial values from config file
        camera.awb_gains = (config['camera']['awb_red_gain'], config['camera']['awb_red_gain'])
        camera.shutter_speed = config['camera']['shutter_speed']
        camera.iso = config['camera']['iso']

        print('Opened camera')

        # capture frames from the camera
        for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image
            image = frame.array

            handle_image(image)

            # clear the stream in preparation for the next frame
            rawCapture.truncate(0)

            # set all the settings if they changed in the config
            camera.awb_gains = (config['camera']['awb_red_gain'], config['camera']['awb_red_gain'])
            camera.shutter_speed = config['camera']['shutter_speed']
            camera.iso = config['camera']['iso']
    else:
        print('Opening camera')
        capture = cv2.VideoCapture(0)
        while True:
            success, img = capture.read()
            if success:
                handle_image(img)
示例#28
0
class PiVideoStream(object):
    def __init__(self, resolution=(320, 240), frame_rate=32):
        # Init camera and Stream
        self.camera = PiCamera()
        self.camera.resolution = resolution
        self.camera.framerate = frame_rate
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)

        self.frame = None
        self.stopped = False

    def start(self):
        """
        Starts the update thread that reads the camera data
        :return:
        """
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        """
        Repeatedly stores the most recently captured frame
        :return:
        """
        for f in self.stream:
            # Store the current frame
            self.frame = f.array
            # Clear the array ready for the next frame
            self.rawCapture.truncate(0)

            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        """
        Gets the most recently read frame
        :return: Most recently captured RGB array from camera
        """
        return self.frame

    def stop(self):
        """
        Exits out of the update function/thread
        :return:
        """
        self.stopped = True
示例#29
0
class Eye:
    #relays information to 4 pins where there all steppers invloving rotation should be plugged
    def __init__(self, resWidth, resHeight, framerate):
        self.camera = PiCamera()
        self.camera.resolution = (resWidth, resHeight)
        self.camera.framerate = framerate
        self.rawCapture = PiRGBArray(self.camera, size=(resWidth, resHeight))
        self.objectWidth = 50
        self.objectHeight = 50
        self.xPercent = resHeight/2;
        self.yPercent = resWidth/2;

    def getCenter(self):
        return (self.xPercent, self.yPercent);

    def recognizeFace(self, showPreview):
        face_cascade = cv2.CascadeClassifier('../CAM/haar/haarcascade_frontalface_default.xml')
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array

            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.1, 5)
            for (x,y,w,h) in faces:
                self.xPercent = float(x+(w/2))/float(self.camera.resolution[0]);
                self.yPercent = float(y+(h/2))/float(self.camera.resolution[1]);
                if showPreview:
                    cv2.rectangle(gray,(x,y),(x+w,y+h),(255,255,0),1)
            # print the coords
            print 'face percents on screen'
            print (self.xPercent, self.yPercent);

            # show the frame
            if showPreview:
            	cv2.imshow("Frame", gray)

            key = cv2.waitKey(1) & 0xFF

            # clear the stream in preparation for the next frame
            self.rawCapture.truncate(0)

            # if the `q` key was pressed, break from the loop
            if key == ord("c"):
                break

    def followFaceWithArm(self, arm):
        face_cascade = cv2.CascadeClassifier('../CAM/haar/haarcascade_frontalface_default.xml')
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):

            image = frame.array

            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.1, 5)
            for (x,y,w,h) in faces:
                self.xPercent = float(x+(w/2))/float(self.camera.resolution[0]);
                self.yPercent = float(y+(h/2))/float(self.camera.resolution[1]);
                self.objectWidth = w
                self.objectHeight = h
                arm.positionPercent(self.xPercent,self.yPercent)

            print ('Face at percents...', self.xPercent,self.yPercent, '  Face dims...', self.objectWidth, self.objectHeight)
            # go to face

            key = cv2.waitKey(1) & 0xFF

            # clear the stream in preparation for the next frame
            self.rawCapture.truncate(0)

            # if the `q` key was pressed, break from the loop
            if key == ord("c"):
                break

    def camPreview(self):
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
        	# grab the raw NumPy array representing the image, then initialize the timestamp
        	# and occupied/unoccupied text
        	image = frame.array

        	cv2.imshow("Frame", image)
        	key = cv2.waitKey(1) & 0xFF

        	# clear the stream in preparation for the next frame
        	self.rawCapture.truncate(0)

        	# if the `q` key was pressed, break from the loop
        	if key == ord("x"):
        		break

    def videoLoop(self, outputWindow):
        try:
            for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
                image = frame.array
                grayImg = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                # cv2.putText(grayImg, 'FACE TRACKING, <f> to toggle', (0,45), 16, .35, (255,255,255), 1)
                outputWindow.drawGuidesAndText(grayImg, ['MM', 'FM', 'MF'])

                if outputWindow.robot.isTracking:
                    faces = outputWindow.robot.trackingHaar.detectMultiScale(grayImg, 1.1, 5)
                    for (x,y,w,h) in faces:
                        xPercent = float(x+(w/2))/float(outputWindow.robot.eye.camera.resolution[0]);
                        yPercent = float(y+(h/2))/float(outputWindow.robot.eye.camera.resolution[1]);
                        outputWindow.robot.arm.positionPercent(xPercent, yPercent)

                grayImg= Image.fromarray(grayImg)
                grayPhotoImg = ImageTk.PhotoImage(grayImg)
                if outputWindow.panel is None: # create and mount panel if it's not there
                    outputWindow.panel = Label(outputWindow.frame, image=grayPhotoImg)
                    outputWindow.panel.image = grayPhotoImg
                    outputWindow.panel.pack(padx=10, pady=10)
                else:
                    outputWindow.panel.configure(image=grayPhotoImg)
                    outputWindow.panel.image = grayPhotoImg
                self.rawCapture.truncate(0)

        except RuntimeError, e:
            print("[INFO] caught a RuntimeError")
示例#30
0
    # stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
    stream = picamera.PiCameraCircularIO(camera, size=3000000)
    # video recording into circular buffer from splitter port 1
    camera.start_recording(stream, format='h264', splitter_port=1)
    #camera.start_recording('test.h264', splitter_port=1)
    # wait 2 seconds for stable video data
    camera.wait_recording(2, splitter_port=1)
    # motion_detected = False
    print(camera.resolution)

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text???????????????????
        rawCapture.truncate()
        rawCapture.seek(0)

        frame2 = frame.array
        frameNo = frameNo + 1
        img_rgb = frame2
        frame2arm = getCroppedImage(frame2, resetArmCrops)
        img_gray2arm = cv2.cvtColor(frame2arm, cv2.COLOR_BGR2GRAY)

        # tripSense()
        # print("GPIO", GPIO.input(sensor[0]))
        # print('Sensore', GPIO.input(sensor[1]), GPIO.input(sensor[2]))
        while (GPIO.input(sensor[0]) == GPIO.HIGH):
            # GPIO.output((segment7All[ballCounter % 10]), GPIO.LOW)
            # print('Ball Timer Awake ', ballCounter)
            done = False
示例#31
0
    def capture_thread(self, IPinver):
        global frame_image, camera
        ap = argparse.ArgumentParser()  # OpenCV initialization
        ap.add_argument("-b",
                        "--buffer",
                        type=int,
                        default=64,
                        help="max buffer size")
        args = vars(ap.parse_args())
        pts = deque(maxlen=args["buffer"])

        font = cv2.FONT_HERSHEY_SIMPLEX

        camera = picamera.PiCamera()
        camera.resolution = (640, 480)
        camera.framerate = 20
        rawCapture = PiRGBArray(camera, size=(640, 480))

        context = zmq.Context()
        footage_socket = context.socket(zmq.PUB)
        print(IPinver)
        footage_socket.connect("tcp://%s:5555" % IPinver)

        avg = None
        motionCounter = 0
        lastMovtionCaptured = datetime.datetime.now()

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            frame_image = frame.array
            timestamp = datetime.datetime.now()

            if FindColorMode:
                ####>>>OpenCV Start<<<####
                hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, colorLower, colorUpper)  # 1
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    cv2.putText(
                        frame_image,
                        "Target Detected",
                        (40, 60),
                        font,
                        0.5,
                        (255, 255, 255),
                        1,
                        cv2.LINE_AA,
                    )
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))
                    X = int(x)
                    Y = int(y)
                    if radius > 10:
                        cv2.rectangle(
                            frame_image,
                            (int(x - radius), int(y + radius)),
                            (int(x + radius), int(y - radius)),
                            (255, 255, 255),
                            1,
                        )

                    if Y < (240 - tor):
                        error = (240 - Y) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.up(outv)
                        Y_lock = 0
                    elif Y > (240 + tor):
                        error = (Y - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.down(outv)
                        Y_lock = 0
                    else:
                        Y_lock = 1

                    if X < (320 - tor * 3):
                        error = (320 - X) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.lookleft(outv)
                        servo.turnLeft(coe_Genout(error, 64))
                        X_lock = 0
                    elif X > (330 + tor * 3):
                        error = (X - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.lookright(outv)
                        servo.turnRight(coe_Genout(error, 64))
                        X_lock = 0
                    else:
                        move.motorStop()
                        X_lock = 1

                    if X_lock == 1 and Y_lock == 1:
                        switch.switch(1, 1)
                        switch.switch(2, 1)
                        switch.switch(3, 1)
                        moveCtrl(ultra.checkdist(), back_R, forward_R)
                    else:
                        move.motorStop()
                        switch.switch(1, 0)
                        switch.switch(2, 0)
                        switch.switch(3, 0)

                else:
                    cv2.putText(
                        frame_image,
                        "Target Detecting",
                        (40, 60),
                        font,
                        0.5,
                        (255, 255, 255),
                        1,
                        cv2.LINE_AA,
                    )
                    move.motorStop()
                ####>>>OpenCV Ends<<<####

            if WatchDogMode:
                gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)

                if avg is None:
                    print("[INFO] starting background model...")
                    avg = gray.copy().astype("float")
                    rawCapture.truncate(0)
                    continue

                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                # threshold the delta image, dilate the thresholded image to fill
                # in holes, then find contours on thresholded image
                thresh = cv2.threshold(frameDelta, 5, 255,
                                       cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                # print('x')
                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < 5000:
                        continue

                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame_image, (x, y), (x + w, y + h),
                                  (128, 255, 0), 1)
                    text = "Occupied"
                    motionCounter += 1
                    # print(motionCounter)
                    # print(text)
                    LED.colorWipe(255, 16, 0)
                    lastMovtionCaptured = timestamp
                    switch.switch(1, 1)
                    switch.switch(2, 1)
                    switch.switch(3, 1)

                if (timestamp - lastMovtionCaptured).seconds >= 0.5:
                    LED.colorWipe(255, 255, 0)
                    switch.switch(1, 0)
                    switch.switch(2, 0)
                    switch.switch(3, 0)

            if FindLineMode:
                cvFindLine()

            cv2.line(frame_image, (300, 240), (340, 240), (128, 255, 128), 1)
            cv2.line(frame_image, (320, 220), (320, 260), (128, 255, 128), 1)

            if FindLineMode and not frameRender:
                encoded, buffer = cv2.imencode(".jpg", frame_findline)
            else:
                encoded, buffer = cv2.imencode(".jpg", frame_image)

            jpg_as_text = base64.b64encode(buffer)
            footage_socket.send(jpg_as_text)

            rawCapture.truncate(0)
示例#32
0
class Camera(HDThread):
    def __init__(self, thread_name, logging, detection_queue, visibility_queue,
                 target_fps):
        super().__init__(thread_name, logging, target_fps)
        self.detection_queue = detection_queue  # type: queue.Queue
        self.visibility_queue = visibility_queue
        try:
            self.logging.info("{} - Init PiCamera...".format(thread_name))
            from picamera import PiCamera
            from picamera.array import PiRGBArray
            self.camera = PiCamera()
            self.camera.resolution = (640, 480)
            self.rawCapture = PiRGBArray(self.camera)
            self.picamera_mode = True
            self.logging.info("{} - Init PiCamera success".format(thread_name))
        except:
            self.logging.info("{} - Init VideoCapture...".format(thread_name))
            self.camera = cv2.VideoCapture(0)
            self.logging.info(
                "{} - Init VideoCapture success".format(thread_name))
            self.picamera_mode = False

    def _run(self) -> None:
        image = self._from_camera()
        # wait until queue is empty
        if self.detection_queue.full():
            self.detection_queue.get()
        self.detection_queue.put(image)
        if self.visibility_queue.full():
            self.visibility_queue.get()
        self.visibility_queue.put(image)

    def is_module_in_error(self):
        return self.in_error

    def _from_camera(self):
        temp_time = start_time = datetime.now()
        self.logging.debug("{} - Start.".format(self.thread_name))
        if self.picamera_mode:
            self.camera.capture(self.rawCapture,
                                format="bgr",
                                use_video_port=True)
            img = self.rawCapture.array
        else:
            ret, img = self.camera.read()
        self.logging.debug("{} - Capturing image. Duration={}".format(
            self.thread_name,
            datetime.now() - temp_time))

        if self.picamera_mode:
            temp_time = datetime.now()
            self.rawCapture.truncate(0)
            self.logging.debug("{} - Capturing truncate Duration={}".format(
                self.thread_name,
                datetime.now() - temp_time))

        iteration_time = datetime.now() - start_time
        self.iteration_time_sec = iteration_time.microseconds / 1000000
        self.logging.debug("{} - End. Total Duration={}".format(
            self.thread_name, iteration_time))
        return img
示例#33
0
def main():
    # create logger'
    logger = logging.getLogger('home_security')
    logger.setLevel(logging.DEBUG)
    # create file handler which logs even debug messages
    fh = logging.FileHandler('home_security.log')
    fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)

    #syslog = logging.handlers.SysLogHandler(address = '/dev/log')
    #syslog.setLevel(logging.ERROR)
    #logger.addHandler(syslog)

    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--conf",
                    required=True,
                    help="path to the JSON configuration file")
    args = vars(ap.parse_args())

    # filter warnings, load the configuration and initialize the Dropbox
    # client
    warnings.filterwarnings("ignore")
    conf = json.load(open(args["conf"]))
    client = None

    # check to see if the Dropbox should be used
    if conf["use_dropbox"]:
        if conf["accessToken"]:
            accessToken = conf["accessToken"]
            userID = "*****@*****.**"
        else:
            # connect to dropbox and start the session authorization process
            #flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"])
            #print "[INFO] Authorize this application: {}".format(flow.start())
            #authCode = raw_input("Enter auth code here: ").strip()

            # finish the authorization and grab the Dropbox client
            #(accessToken, userID) = flow.finish(authCode)
            print " ************* error *************"

        print "accessToken:{} userID:{}".format(accessToken, userID)

        # Create a dropbox object using an API v2 key
        dbx = dropbox.Dropbox(token)

        #client = DropboxClient(accessToken)
        print "[SUCCESS] dropbox account linked"

    # initialize the camera and grab a reference to the raw camera capture
    camera = PiCamera()
    camera.resolution = tuple(conf["resolution"])
    camera.framerate = conf["fps"]
    rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))

    # allow the camera to warmup, then initialize the average frame, last
    # uploaded timestamp, and frame motion counter
    print "[INFO] warming up..."
    time.sleep(conf["camera_warmup_time"])
    avg = None
    lastUploaded = datetime.datetime.now()
    dayNumber = lastUploaded.toordinal()
    motionCounter = 0

    # capture frames from the camera
    for f in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
        # grab the raw NumPy array representing the image and initialize
        # the timestamp and movement flag
        frame = f.array
        timestamp = datetime.datetime.now()
        dayNumberNow = timestamp.toordinal()
        movement = False

        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=600)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the average frame is None, initialize it
        if avg is None:
            print "[INFO] starting background model..."
            avg = gray.copy().astype("float")
            rawCapture.truncate(0)
            continue

        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)

        (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)

        # loop over the contours. 0,0 is tlc. y increases down, x increase right
        x, y = 0, 0
        for c in cnts:
            (x, y, w, h) = cv2.boundingRect(c)
            # if the contour is too small, y co-ord is too low ignore it
            if (cv2.contourArea(c) < conf["min_area"]) or ((y + h) < 320):
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            movement = True

        # draw the text and timestamp on the frame
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        cv2.putText(frame, "x: {} y: {}".format(x, y), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1)

        # check to see if there is movement
        if movement:
            logger.info("movement detected")
            # check to see if enough time has passed between uploads
            if (timestamp -
                    lastUploaded).seconds >= conf["min_upload_seconds"]:
                # increment the motion counter
                motionCounter += 1
                # check to see if the number of frames with consistent motion is
                # high enough
                if motionCounter >= conf["min_motion_frames"]:
                    # check to see if dropbox should be used
                    if conf["use_dropbox"]:
                        # write the image to temporary file
                        t = TempImage()
                        cv2.imwrite(t.path, frame)
                        suffix = (dayNumberNow % 20) + 1  #(1..20)
                        new_path = "Public/SecurityDawson65_" + str(suffix)
                        # upload the image to Dropbox and cleanup the tempory image
                        try:
                            path = "{base_path}/{timestamp}.jpg".format(
                                base_path=new_path, timestamp=ts)
                            logger.info("[UPLOAD] {}".format(path))
                            #client.put_file(path, open(t.path, "rb"))

                            # we want to overwite any previous version of the file
                            contents = open(t.path, "rb").read()
                            meta = dbx.files_upload(
                                contents,
                                path,
                                mode=dropbox.files.WriteMode("overwrite"))
                        except Exception as e:
                            logger.exception("Network error. Upload failed")
                            time.sleep(30)  #wait for dropbox to recover
                        finally:
                            t.cleanup()

                    # update the last uploaded timestamp and reset the motion
                    # counter
                    lastUploaded = timestamp
                    motionCounter = 0
                else:
                    logger.info(
                        "failed min_motion_frames {}".format(motionCounter))
            else:
                logger.info("failed min_upload_seconds")

        # otherwise, no movement detected
        else:
            motionCounter = 0
            if dayNumber != dayNumberNow:
                #midnight. clear new folder
                suffix = (dayNumberNow % 20) + 1  #(1..20)
                new_path = "Public/SecurityDawson65_" + str(suffix)
                delete_files(dbx, logger, new_path)
                dayNumber = dayNumberNow
                logger.info("old files deleted for day %s" %
                            str(dayNumberNow % 20 + 1))

        # check to see if the frames should be displayed to screen
        if conf["show_video"]:
            # display the security feed
            cv2.imshow("Security Feed", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key is pressed, break from the loop
            if key == ord("q"):
                break

        # clear the stream in preparation for the next frame
        rawCapture.truncate(0)
示例#34
0
class VideoCapturer:
    multiplier = int()
    height = int()
    width = int()
    camera = object()
    filename = str()
    rawCapture = object()
    fifoFilters = []
    snapRate = 20

    def __init__(self, _height, _width, mult, frame_rate):
        self.multiplier = mult
        self.height = int(mult * _height)
        self.width = int(mult * _width)
        self.camera = PiCamera()
        self.camera.resolution = (self.height, self.width)
        self.camera.framerate = frame_rate
        self.rawCapture = PiRGBArray(self.camera,
                                     size=(self.height, self.width))
        time.sleep(0.1)
        self.filename = "temp.mp4"

    def runCam(self, loadFilters=False):
        count = 0
        prev = float()
        for frame in self.camera.capture_continuous(self.rawCapture,
                                                    format="bgr",
                                                    use_video_port=True):
            image = frame.array
            image = self.rotateImage(image, 270)
            b, g, r = cv2.split(image)
            if loadFilters:
                for funct in self.fifoFilters:
                    b = funct(b)
            else:
                b = image
            count = count + 1
            if count > 0:
                b = 0.5 * (b + prev)
            prev = b
            cv2.imshow("Frame", b)
            if (count > 0 and count % self.snapRate == 0):
                cv2.imwrite(str(count) + ".png", b)

            key = cv2.waitKey(1) & 0xFF
            self.rawCapture.truncate(0)
            if key == "ord":
                break
        self.camera.close()
        cv2.destroyAllWindows()

    # works for single channel only
    def addFilter(self, newFilterFunction):
        self.fifoFilters.append(newFilterFunction)

    def rotateImage(self, arr, angle):
        if angle > 89:
            arr = np.rot90(arr)
        if angle > 179:
            arr = np.rot90(arr)
        if angle > 269:
            arr = np.rot90(arr)
        return arr
示例#35
0
    def cam(self):
        # initialize the camera and grab a reference to the raw camera capture
        camera = PiCamera()
        height = 500
        width = 500
        camera.resolution = (width, height)
        camera.framerate = 60
        #camera.color_effects=(128,128)
        camera.iso = 500
        #camera.image_effect='denoise'
        #camera.exposure_mode = 'night'
        rawCapture = PiRGBArray(camera, size=(width, height))

        # allow the camera to warmup
        time.sleep(0.5)

        logging.basicConfig(
            level=logging.DEBUG,
            format='[%(levelname)s] (%(threadName)-9s) %(message)s',
        )

        try:
            for frame in camera.capture_continuous(rawCapture,
                                                   format="bgr",
                                                   use_video_port=True):
                # grab the raw NumPy array representing the image
                image = frame.array
                logging.debug("Thread")
                #print image
                #print cv2.mean(image)
                template_y = 0
                template_x = 0
                grid = 0
                width_size = int(width / self.grid_column)
                height_size = int(height / self.grid_row)
                for i in range(self.grid_row):
                    template_x = 0
                    for j in range(self.grid_column):
                        #t = threading.Thread(target=process_eachgrid, args=(image,template_x,template_y,grid,width_size,height_size))
                        #t.start()
                        #t.join()
                        self.process_eachgrid(image, template_x, template_y,
                                              grid, width_size, height_size)
                        template_x = template_x + width_size
                        grid = grid + 1
                    template_y = template_y + height_size
                self.draw_grid(image, height, width, width_size, height_size)
                #print com.motion_start
                # clear the stream in preparation for the next frame
                rawCapture.truncate(0)
                # show the image
                cv2.imshow("Security Feed", image)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break
                #if not com.motion_start:
                #break

        finally:
            #com.motion_start= False
            cv2.destroyWindow("Security Feed")
示例#36
0
import time

camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))

cascPath = sys.argv[0]
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    # Capture frame-by-frame
    frame = frame.array
    rawCapture.truncate(0)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
    )

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # Display the resulting frame
    cv2.imshow('Video', frame)
示例#37
0
def run():
    relay_pin = [26]
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(relay_pin, GPIO.OUT)
    GPIO.output(relay_pin, 0)

    with open('labels', 'rb') as f:
        dict = pickle.load(f)
        f.close()

    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 30
    raw_capture = PiRGBArray(camera, size=(640, 480))

    face_cascade = cv2.CascadeClassifier("haarcascade_face.xml")
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.read("trainer.yml")

    font = cv2.FONT_HERSHEY_SIMPLEX

    init_time = 0
    playback_started = False
    for frame in camera.capture_continuous(raw_capture,
                                           format="bgr",
                                           use_video_port=True):
        frame = frame.array
        # flips the camera
        frame = cv2.flip(frame, -1)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.5,
                                              minNeighbors=5)
        for (x, y, w, h) in faces:
            roi_gray = gray[y:y + h, x:x + w]

            id_, conf = recognizer.predict(roi_gray)
            highest_conf = 0
            for name, value in dict.items():
                if value == id_:
                    print(name + " conf: " + str(conf))
                    highest_conf = name

            if conf >= 70 and conf < 105:
                GPIO.output(relay_pin, 1)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                cv2.putText(frame, highest_conf + str(conf), (x, y), font, 2,
                            (0, 0, 255), 2, cv2.LINE_AA)

                # music section
                with open("user_profiles.json", "r") as user_profiles:
                    user_dict = json.load(user_profiles)
                    try:

                        if (not playback_started):
                            song = user_dict[highest_conf]
                            pygame.mixer.init()
                            pygame.mixer.music.load(song)
                            pygame.mixer.music.play()

                            init_time = time.time()
                            playback_started = True
                        if (playback_started):
                            curr_time = time.time()
                            if curr_time - init_time > 10:
                                playback_started = False
                                pygame.mixer.music.fadeout(5000)
                        #while pygame.mixer.music.get_busy() == True:
                        # pygame.time.Clock().tick(100000)
                        # break;

                        # pygame.mixer.music.fadeout(20000)

                    except KeyError:
                        print(
                            "Song not found! Please set up your user profile.")

            else:
                GPIO.output(relay_pin, 0)

        cv2.imshow('frame', frame)
        key = cv2.waitKey(1)

        raw_capture.truncate(0)

        if key == 27:
            break

    cv2.destroyAllWindows()
示例#38
0
class VideoStream(object):

    def __init__(self, src=None, usePiCamera=False, resolution=(320, 240),
                 framerate=30, format='bgr', trigger=None):
        # initialize the camera and stream
        if usePiCamera:
            self.camera = PiCamera()
        else:
            if isinstance(src, (UnifiedCamera, PiCamera)):
                self.camera = src
            else:
                self.camera = UnifiedCamera(src)
        self.resolution = resolution
        self.framerate = framerate
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self._warm_up_time = 2
        self._creation_time = time()

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self._stop = True  # thread is stopped or non existent
        if trigger is None:
            trigger = Event()
        self.trigger = trigger
        self.format = format
        self._thread_free = Event()
        self._order = Event()
        self._lock = RLock()
        self._thread = None
        self._frame_cache = None
        # the optimization flag lets the camera wait until it is needed
        self._optimize = True

    @property
    def resolution(self):
        return self.camera.resolution

    @resolution.setter
    def resolution(self, value):
        self.camera.resolution = value

    @property
    def framerate(self):
        return self.camera.framerate

    @framerate.setter
    def framerate(self, value):
        self.camera.framerate = value

    def _update_func(self):
        #print("camera thread {} started".format(self._thread))
        # initialize events
        #toggle = True
        try:
            df = time() - self._creation_time
            if df < self._warm_up_time:
                # allow to warm up if camera is opened too quickly
                sleep(self._warm_up_time-df)
            self.camera.start_preview()  # start camera
            self.frame = self.camera.capture(self.rawCapture, self.format)
            if self.frame is None:
                raise CameraError("camera '{}' not working".format(self.camera))
            self._thread_free.set()  # notify thread is free to receive orders
            # keep looping infinitely until the thread is stopped
            while not self._stop:
                # because self.trigger can be shared it is possible that
                # self.read function was called setting self.trigger once
                # but this can be turned off while in other threads so
                # we have to check self.thread_free is not set meaning
                # that it has to produce a frame, thus it should not
                # be blocked
                if self._optimize:
                #    self.trigger.clear()  # make the thread wait until event
                #    #self.__debug("event waiting in {}'s thread".format(id(self)))
                    if self.framerate:
                        self.trigger.wait(1/self.framerate)  # wait until read function or event calls
                    else:
                        self.trigger.wait(10)
                #    self._thread_free.clear()  # tell thread is busy
                #    #self.__debug("event was unblocked in {}'s thread".format(id(self)))
                #    self._order.set()  # there is an order

                # if the thread indicator variable is set, stop the thread
                # and resource camera resources
                #if self._stop:
                #    break

                self.frame = self.camera.capture(self.rawCapture, self.format)
                #print("{}taking photo in {}".format((""," ")[toggle],id(self)))
                #toggle = not toggle
                # notify frame was produced and thread is free
                # as quickly as possible
                #self._thread_free.set()
                self.rawCapture.truncate(0)
                if self._frame_cache is None and self.trigger.is_set():
                    self._frame_cache = self.frame
                    self._order.set()
        finally:
            # ending thread
            #self.rawCapture.close()
            self.camera.close()
            self._stop = True
            self._thread_free.set()  # prevents blocking in main
            self._order.set()  # prent blocking in main
            #print("camera thread {} ended".format(self._thread))

    def read(self):
        if self.trigger.is_set():
            self._order.wait()
            return self._frame_cache
        return self.frame

    def clear_order(self):
        self._frame_cache = None
        self._order.clear()

    def get_frame(self):
        """
        safely give frame from latest read
        """
        if self._stop:
            raise RuntimeError("{} must be started".format(type(self)))
        return self.read()

    def start(self):
        # start the thread to read frames from the video stream
        with self._lock:
            # if several threads are trying to start it wait
            # if while this lock was waiting and this thread ended in another
            # lock, open the tread again to prevent inconsistencies
            if self.closed():
                self._thread = t = Thread(target=self._update_func, args=())
                t.daemon = False
                self._stop = False  # thread started
                self._thread_free.clear()
                t.start()
                self._thread_free.wait(10)
                if self._stop:
                    raise CameraError("camera '{}' not ready".format(self.camera))
        return self

    def close(self):
        with self._lock:
            if not self.closed():
                # indicate that the thread should be stopped
                self._stop = True
                self._thread_free.clear()
                self.trigger.set()  # un-pause threads
                self._thread.join(10)
                if not self.closed():
                    raise Exception("Thread didn't close")

    def closed(self):
        with self._lock:
            return self._thread is None or not self._thread.is_alive()

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    @classmethod
    def __debug(cls, data=None, start_debug=False):
        if not hasattr(cls, "checks"):
            cls.checks = Counter()
            cls.check_cum = set()

        if start_debug and data in cls.check_cum:
            cls.checks[frozenset(cls.check_cum)] += 1
            cls.check_cum = set()
            cls.check_cum.add(data)
        else:
            cls.check_cum.add(data)
示例#39
0
class Camera(MultiProcessing):

    _showImage = True

    _camera = None
    _rawCapture = None

    _cascade = None
    _nested = None

    __cameraResolutionX = 640
    __cameraResolutionY = 480

    __posXFaceKey = MultiProcessing.get_next_key(
    )  #-1 # -1=no face, 0=max left, 1=max right
    __posYFaceKey = MultiProcessing.get_next_key(
    )  #-1 # -1=no face, 0=max bottom, 1=max top

    _released = False

    _delay_seconds = 1
    _delay_seconds_when_idle = 1.5

    def __init__(self):
        print("camera init")
        super().__init__(prio=20)
        self.posXFace = -1
        self.posYFace = -1
        super().StartUpdating()

    ## multi process properties START ##

    @property
    def posXFace(self):
        return self.GetSharedValue(self.__posXFaceKey)

    @posXFace.setter
    def posXFace(self, value):
        self.SetSharedValue(self.__posXFaceKey, value)

    @property
    def posYFace(self):
        return self.GetSharedValue(self.__posYFaceKey)

    @posYFace.setter
    def posYFace(self, value):
        self.SetSharedValue(self.__posYFaceKey, value)

    ## multi process properties END ##

    def detect(self, img, cascade):
        rects = cascade.detectMultiScale(
            img,
            scaleFactor=1.3,
            minNeighbors=3,
            minSize=(int(self.__cameraResolutionX / 15),
                     int(self.__cameraResolutionY / 15)),
            flags=cv2.CASCADE_SCALE_IMAGE)
        if len(rects) == 0:
            return []
        rects[:, 2:] += rects[:, :2]
        return rects

    def draw_rects(self, img, rects, color):
        for x1, y1, x2, y2 in rects:
            cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)

    def Update(self):

        print("camera start")

        cv2.destroyAllWindows()

        # initialize the camera and grab a reference to the raw camera capture
        self._camera = PiCamera()
        self._camera.resolution = (self.__cameraResolutionX,
                                   self.__cameraResolutionY)
        self._camera.framerate = 32
        self._rawCapture = PiRGBArray(
            self._camera
        )  #, size=(self._camera.resolution.width, self._camera.resolution.height))

        # allow the camera to warmup
        time.sleep(0.1)

        cascade_fn = "/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_alt.xml"
        nested_fn = "/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_eye.xml"
        #cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_default.xml")
        #nested_fn  = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")

        self._cascade = cv2.CascadeClassifier(cascade_fn)
        self._nested = cv2.CascadeClassifier(nested_fn)

        for frame in self._camera.capture_continuous(self._rawCapture,
                                                     format="bgr",
                                                     use_video_port=True):

            if (super().updating_ended == True):
                return

            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array

            # local modules
            #from video import create_capture
            from common import clock, draw_str

            #self._camera.capture(self._rawCapture, format="bgr")
            #image = self._rawCapture.array

            cv2.imshow('image', image)

            # clear the stream in preparation for the next frame
            self._rawCapture.truncate(0)

            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            t = clock()
            rects = self.detect(gray, self._cascade)

            if (self._showImage == True):
                vis = image.copy()
                self.draw_rects(vis, rects, (0, 255, 0))

            dt = 0

            found_something = False

            if not self._nested.empty():
                posX = -1
                posY = -1
                bestWidth = -1
                for x1, y1, x2, y2 in rects:
                    width = x2 - x1
                    if (width > bestWidth):
                        bestWidth = width
                        posX = (x1 +
                                (x2 - x1) / 2) / self._camera.resolution.width
                        posY = (y1 +
                                (y2 - y1) / 2) / self._camera.resolution.height
                    if (self._showImage == True):
                        roi = gray[y1:y2, x1:x2]
                        vis_roi = vis[y1:y2, x1:x2]
                        subrects = self.detect(roi.copy(), self._nested)
                        self.draw_rects(vis_roi, subrects, (255, 0, 0))
                self.posXFace = posX
                self.posYFace = posY

                dt = clock() - t

                if (posX != -1):
                    #print('camera time: %.1f ms' % (dt*1000))
                    found_something = True

            if (self._showImage == True):
                draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000))
                cv2.imshow('facedetect', vis)

            if (found_something == True):
                time.sleep(self._delay_seconds)
            else:
                time.sleep(self._delay_seconds_when_idle)

    def ResetFace(self):
        self.posXFace = -1
        self.posYFace = -1

    def Release(self):
        if (self._released == False):
            self._released = True
            print("shutting down camera")
            super().EndUpdating()

    def __del__(self):
        self.Release()
示例#40
0
font = cv2.FONT_HERSHEY_SIMPLEX

fps_navg = 5  # how many frames to average over for the FPS timer
fps_times = []

fourcc = cv2.VideoWriter_fourcc(*"MJPG")
out = cv2.VideoWriter("output2.avi", fourcc, 5.0, (447, 448))

with picam as camera:
    camera.resolution = (xres, yres)
    stream = PiRGBArray(camera, size=(xres, yres))

    for frame in camera.capture_continuous(stream,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array
        image = cv2.flip(image, -1)
        #cv2.circle(image, (448, 416), 100, (0, 0, 0), -1)
        image = image[int(yres / 4.8):yres - int(yres / 3.318),
                      int(xres / 4.053):xres - int(xres / 3.8)].copy()

        cv2.imshow("Press q to exit", image)
        out.write(image)
        key = cv2.waitKey(1) & 0xFF

        stream.truncate(0)

        if key == ord("q"):
            out.release()
            break
        right.stop()

    else:
        #move forward
        left.start(100)
        right.start(100)
        sleep(0)
        forward()
        sleep(0.2)
        left.stop()
        right.stop()

    if cv2.waitKey(1) == 27:
        break
##################################################################################################################

    raw_cap.truncate(0)
    #raw_cap.seek(0)
    frame_cnt = frame_cnt + 1
    #if the picam has captured 10 seconds of video leave the loop and stop recording
    if (frame_cnt > 100):
        #cam.stop_preview()
        #cam.close()
        break
###################################################################################################################

print "Ending...."

GPIO.cleanup()
cv2.destroyAllWindows()
示例#42
0
class camera():
    def __init__(self):
        conf_file = open(
            "/home/max/projects/catflap/ros_catkin_ws/src/catflap_image_collector/scripts/conf.yaml",
            'r')
        self.cfg = yaml.load(conf_file)
        conf_file.close()
        self.telegram_publisher = rospy.Publisher('telegram_message',
                                                  String,
                                                  queue_size=150)

    def camera_init(self):
        self.camera = PiCamera()
        self.camera.resolution = self.cfg["camera"]["resolution"]
        self.camera.rotation = self.cfg["camera"]["rotation"]
        self.camera.framerate = self.cfg["camera"]["framerate"]
        self.rawCapture = PiRGBArray(self.camera,
                                     size=self.cfg["camera"]["resolution"])
        self.telegram_publisher.publish("camera init")
        self.lock = Lock()

    def classifier_init(self):
        self.cascade = cv2.CascadeClassifier(
            self.cfg["camera"]["classifier"]["path"])
        bg_temp = cv2.imread(self.cfg["camera"]["background_image"])
        bg_temp = bg_temp[:, 150:390, :]
        self.bg = cv2.cvtColor(bg_temp, cv2.COLOR_BGR2GRAY)

    def get_cat_faces(self):
        pass

    def detect_prey(self, cnt, h, area_threshold=200):
        # check if a contour reaches below height h
        # if the contour are below height h is above the threshold, return True
        prey_detected = False
        ind_list = []
        for i, point in enumerate(cnt):
            if cnt[i][0][1] <= h:
                ind_list.append(i)
        prey_cnt = np.delete(cnt, ind_list, 0)
        if len(prey_cnt) > 0 and cv2.contourArea(prey_cnt) >= area_threshold:
            prey_detected = True
        else:
            prey_detected = False

        return (prey_detected, prey_cnt)

    def move_cnts(self, cnt, x, y):
        for i, point in enumerate(cnt):
            cnt[i][0][0] = cnt[i][0][0] + x
            cnt[i][0][1] = cnt[i][0][1] + y
        return cnt

    def get_biggest_contour(self, cnts):
        areas = []
        for c in cnts:
            areas.append(cv2.contourArea(c))
        sortedCnts = sorted(zip(areas, cnts), key=lambda x: x[0], reverse=True)
        return sortedCnts[0][1]

    def action(self, trust):
        self.lock.acquire()
        # take a picture, detect
        prey_detected = False
        catsnout_detected = False
        counter = 6
        settings = ""
        for frame in self.camera.capture_continuous(self.rawCapture,
                                                    format="bgr",
                                                    use_video_port=True):
            self.image = frame.array
            self.rawCapture.truncate(0)
            counter -= 1
            if counter < 1:
                break
        n = datetime.now()
        timestamp_text = "{0:04d}_{1:02d}_{2:02d}_{3:02d}_{4:02d}_{5:02d}_{6:01d}".format(
            n.year, n.month, n.day, n.hour, n.minute, n.second,
            int(n.microsecond / 100000))
        settings = "{0}: ".format(timestamp_text)
        settings = settings + "{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12}, {13}, {14}, {15}, {16}, {17}".format(
            self.camera.sharpness, self.camera.contrast,
            self.camera.brightness, self.camera.saturation, self.camera.ISO,
            self.camera.exposure_compensation, self.camera.exposure_mode,
            self.camera.meter_mode, self.camera.awb_mode,
            self.camera.analog_gain, self.camera.awb_gains,
            self.camera.digital_gain, self.camera.drc_strength,
            self.camera.exposure_speed, self.camera.iso,
            self.camera.saturation, self.camera.sensor_mode,
            self.camera.shutter_speed)
        settings = settings + "\n"
        try:
            file = open("/home/max/camera_settings.txt", "a")
            file.write(settings)
            file.close()
        except Exception:
            rospy.logdebug("Unexpected Error: {0}".format(sys.exc_info()[0]))

        filename = "/home/max/Pictures/raw/{0}.png".format(timestamp_text)
        cv2.imwrite(filename, self.image)
        # region of interest, background area
        self.image = self.image[:, 150:390, :]

        image_text = "no catsnout detected"
        ## detection
        rects = self.cascade.detectMultiScale(self.image,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(67, 50),
                                              maxSize=(240, 195),
                                              flags=cv2.CASCADE_SCALE_IMAGE)
        if rects != ():
            catsnout_detected = True
            # reset picture sequence when catsnout was detected
            self.picture_sequence = 0
            for (x, y, w, h) in rects:
                (x1, y1) = (x + int(w * 4. / 5.), y + h + h)
                if y1 > self.image.shape[0]:
                    y1 = self.image.shape[0]
                # paint catsnout detection window in green
                cv2.rectangle(self.image, (x, y), (x1, y1), (0, 255, 0), 2)
            # grayscale image
            img = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
            # histogram equalization
            img = cv2.equalizeHist(img)
            # extract region of interes
            roi = img[y:y1, x:x1]
            # extract region of interest in the background image
            bg_roi = self.bg[y:y1, x:x1]
            bg_roi = cv2.multiply(bg_roi, .6)
            # subtract the roi from background
            diff_roi = cv2.subtract(roi, bg_roi)
            blur = cv2.GaussianBlur(diff_roi, (5, 5), 0)
            ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_OTSU)
            # erode and dilate -> remove small blobs
            #thresh = cv2.erode(thresh, None, iterations=1)
            #thresh = cv2.dilate(thresh, None, iterations=2)
            #thresh = cv2.erode(thresh, None, iterations=1)
            #thresh = cv2.dilate(thresh, None, iterations=2)
            # invert
            thresh = cv2.bitwise_not(thresh)
            # find contours
            image_c, cnts, hierarchy = cv2.findContours(thresh, 1, 2)
            # look for biggest contour
            biggest_cnt = self.get_biggest_contour(cnts)
            # shift contour coordinates from roi to orig image
            biggest_cnt_moved = self.move_cnts(biggest_cnt, x, y)
            # draw contour
            cv2.drawContours(self.image, [biggest_cnt_moved],
                             -1, (0, 60, 100),
                             thickness=1)
            # detect prey
            prey_detected, prey_cnt = self.detect_prey(biggest_cnt_moved,
                                                       y + int(h / 5 * 4))
            if prey_detected:
                # draw detection in red
                cv2.drawContours(self.image, [prey_cnt],
                                 -1, (0, 0, 255),
                                 thickness=2)
                image_text = "prey detected"
                trust = trust * 0.33
                filename_mod = "/home/max/Pictures/classified/cat_prey/{0}_prey.png".format(
                    timestamp_text)
            else:
                image_text = "no prey detected"
                trust = trust * 2.0
                filename_mod = "/home/max/Pictures/classified/cat_no_prey/{0}_no_prey.png".format(
                    timestamp_text)
        else:
            trust = trust * 0.9
            filename_mod = "/home/max/Pictures/classified/no_catsnout/{0}_no_catsnout.png".format(
                timestamp_text)

        cv2.putText(self.image, timestamp_text, (2, 14),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 2)
        cv2.putText(self.image, image_text, (2, 32), cv2.FONT_HERSHEY_SIMPLEX,
                    0.66, (0, 0, 255), 2)
        cv2.putText(self.image, "{0:.2f}/3.00".format(trust), (2, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.52, (0, 0, 255), 2)

        cv2.imwrite(filename_mod, self.image)
        self.telegram_publisher.publish(filename_mod)
        self.lock.release()
        return (catsnout_detected, prey_detected)
示例#43
0
class Sender:

    '''
    :param str ip: the IP of the groundstation
    :param int port: the receiving port of the groundstation
    :param int stream_quality: the JPEG compression rate (0-100)
    :param tuple stream_quality: the size of streaming video (width, height)
    '''

    def __init__(self, ip="192.168.192.101", port=5800, stream_quality=15, stream_size=(352, 256), fps=4.0):

        self.ip = ip  # IP of ground station
        self.port = port  # port of socket
        self.stream_quality = stream_quality  # JPEG quality 0-100
        self.stream_size = stream_size
        self.streaming = False  # flag of streaming video
        self.recording = False  # flag of recording video
        # If TRUE, user requested streamStop(), but waitting __streamThread() stop the thread
        self.stoppingStreamThread = False
        self.fps = fps

        self.sock = socket.socket()
        # self.sock.setsockopt(socket.SOL_SOCKET, IN.SO_BINDTODEVICE, "zt2lrwgvd2"+'\0')

        print("Connecting to socket %s:%d" % (self.ip, self.port))
        self.sock.connect((self.ip, self.port))

        # Enable instant reconnection and disable timeout system
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        print("Connected!")

        # initialize the camera and grab a reference to the raw camera capture
        self.camera = PiCamera()
        self.camera.resolution = "1024x768"
        self.camera.rotation = 180
        self.camera.exposure_compensation = 5  # -25 to 25
        self.rawCapture = PiRGBArray(self.camera, size=self.stream_size)

        print("Stream quality:", self.stream_quality)
        print("Stream size:", self.stream_size)
        print("FPS:", self.fps)


    def changeQuality(self, qty):
        if qty > 100:
            qty = 100
        if qty < 0:
            qty = 0
        self.stream_quality = qty
        print("Video quality: %i" % qty)

    def changeResolution(self, res):
        if res == "HD":
            self.stream_size = (704, 512)
            self.rawCapture = PiRGBArray(self.camera, size=self.stream_size)
            self.streamRestart()
            print("Change to High Res.")

        if res == "SD":
            self.stream_size = (352, 256)
            self.rawCapture = PiRGBArray(self.camera, size=self.stream_size)
            self.streamRestart()
            print("Change to Low Res.")

    def _videoFileName(self):
        now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
        return '/data/v-{}.h264'.format(now)

    def recordingStart(self):
        # Start highres. video recording
        if self.recording == False:
            print("Recording Start")
            self.recording = True
            video_file = self._videoFileName()
            self.camera.start_recording(video_file)
            print("Recording video to "+video_file)

    def recordingStop(self):
        if self.recording == True:
            print("Recording Stop")
            self.recording = False
            self.camera.stop_recording()

    # Continue the recording in the specified output; close existing output
    def splitRecording(self):
        if self.recording == True:
            video_file = self._videoFileName()
            self.camera.split_recording(video_file)
            print("Split video to "+video_file)
        else:
            print("No video is recording")

    def streamStart(self):
        if self.streaming == False:
            self.streaming = True
            if self.stoppingStreamThread == False:
                print("Video stream started")
                t = threading.Thread(target=self.__streamThread)
                t.start()
            else:
                print("Video stream start again.")
                self.stoppingStreamThread = False

    def streamStop(self):
        if self.streaming == True and self.stoppingStreamThread == False:
            self.streaming = False
            self.stoppingStreamThread = True

    def streamRestart(self):
        self.streamStop()
        while self.stoppingStreamThread == True:
            pass
        self.streamStart()

    def increaseExposure(self):
        if self.camera.exposure_compensation < 25:
            self.camera.exposure_compensation = self.camera.exposure_compensation + 5
            print("Exposure compensation:", self.camera.exposure_compensation)

    def reduceExposure(self):
        if self.camera.exposure_compensation > -25:
            self.camera.exposure_compensation = self.camera.exposure_compensation - 5
            print("Exposure compensation:", self.camera.exposure_compensation)

    def changeFPS(self, data):
        if data > 0 and data < 60:
            self.fps = data
        print("FPS:", self.fps)

    # encode the capture image + timestamp
    # send it by socket
    def __streamThread(self):
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True, resize=self.stream_size):
            t = time.time()
            image = frame.array
            encode_param = [IMWRITE_JPEG_QUALITY, self.stream_quality]
            result, imgencode = imencode('.jpg', image, encode_param)
            data = numpy.array(imgencode)

            # Add timestamp
            stringData = data.tostring() + str(time.time()).ljust(13)

            # Send the size of the data for efficient unpacking
            self.sock.sendall(str(len(stringData)).ljust(16).encode())

            # Send the data
            self.sock.sendall(stringData)

            # Clear the stream in preparation for the next frame
            self.rawCapture.truncate(0)

            # Check if request to stop the stream thread
            if self.streaming == False:
                self.stoppingStreamThread = False
                print("Video stream stop")
                break

            # Limit FPS to save bandwidth
            s = 1/self.fps-(time.time()-t)
            if s < 0:
                s = 0
            time.sleep(s)
示例#44
0
def signal():
    camera = PiCamera()
    camera.resolution = (320, 240)
    camera.framerate = 40
    rawCapture = PiRGBArray(camera, size=(320, 240))
    camera.shutter_speed = 50
    camera.iso = 1600
    camera.rotation = 180

    time.sleep(1)

    ret = True
    counter = 0
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        # Capture frame-by-frame

        img = frame.array[:120][:][::]
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        cont = img.copy()
        canny = cv2.Canny(gray, 30, 250)

        contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        new_contour = []
        counter += 1
        hm = 50
        get_number = 0

        for i in range(0, len(contours)):
            epsilon = 0.1 * cv2.arcLength(contours[i], True)
            approx = cv2.approxPolyDP(contours[i], epsilon, closed=True)

            if (abs(cv2.contourArea(contours[i])) > 0):
                # if(abs(cv2.contourArea(contours[i])) > 0 and hierarchy[0][i][0] > -1):

                # xm, ym, wm, hm = cv2.boundingRect(contours[hierarchy[0][i][0]])
                if ((hm) > 40):
                    x, y, w, h = cv2.boundingRect(contours[i])
                    rel = h / w
                    addh = int(h / 8)
                    addw = int(w / 8)
                    if (rel > 1.5 and h > 20 and w > 10 and w < 40 and h < 55 and y > 5 and (y + h) < 235 and x < 220):
                        # if(rel > 1.3):
                        # cv2.polylines(cont, [contours[i]], False, (0,255,0), 2)
                        number = \
                            cv2.threshold(gray[y - addh:y + h + addh, x - addw:x + w + addw], 30, 255,
                                          cv2.THRESH_BINARY)[1]
                        try:
                            if number[0][0].all() == 0:
                                number = cv2.bitwise_not(number)

                            nonzeroes = np.count_nonzero(number) / np.prod(number.shape)
                            if number[0][0].all() != 0 and number[:][0].all() and nonzeroes > 0.65 and nonzeroes < 0.88:
                                get_number = tesseract(number)

                                if (show_pictures == "yes"):
                                    cv2.rectangle(cont, (x - addw, y - addh), (x + w + addw, y + h + addh), (0, 0, 255),
                                                  1)

                                    cv2.imshow('number', cv2.resize(number, None, fx=5, fy=5))
                                    cv2.putText(cont, "h={}  w={} c={}".format(h, w, cv2.isContourConvex(contours[i])),
                                                (x + w + addw + 5, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
                                    cv2.putText(cont,
                                                "r={:.2f} a={} n={}".format(rel, abs(cv2.contourArea(contours[i])), i),
                                                (x + w + addw + 5, y + 15), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
                                    cv2.putText(cont, "x={} y={}".format(x, y),
                                                (x + w + addw + 5, y + 30), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
                                    cv2.putText(cont, "number={} l={}".format(get_number, len(approx)),
                                                (x + w + addw + 5, y + 45), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0), 1)
                                    print(str(get_number) + " " + str(
                                        np.count_nonzero(number) / np.prod(number.shape)) + " " + str(
                                        np.count_nonzero(number)) + " " + str(np.prod(number.shape)))

                        except:
                            count = 1
        if (show_pictures == "yes"):
            cv2.imshow('canny', canny)
            cv2.imshow('cont', cont)

        #    cv2.destroyAllWindows()

        try:
            if int(get_number) > 0:
                sound.play(str(int(get_number)))
        except:
            print("not a number")

        rawCapture.truncate(0)
        #waitkey is needed, else picture wont display with imshow
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
def Follow_Line_Up_Top(camera,
                       timeout,
                       testMode=False,
                       intersectionQueue=[],
                       intersectionSpeeds=[],
                       intersectionTimes=[],
                       intersectionPeriods=[],
                       robot=loadRobot('ROBOSON.json')):

    # Adjusting Robot variables
    """IncreaseTime = False 
    firstInter = True"""
    baseSpeed = robot.speed.base_top
    maxSpeed = robot.speed.max_top
    minSpeed = robot.speed.min_top
    numberOfLinesRequiredForIntersectionMode = robot.number_of_lines_required_for_inersection_mode

    # Value used for the binary filter
    BIN_CUT = robot.line_finder.binary_cut

    # Loading PID values
    MultiCoefficient = robot.pid.total_top
    PCoefficient = robot.pid.pro_top
    DCoefficient = robot.pid.der_top
    offlineExponential = robot.pid.off_line_top

    #camera = PiCamera()
    #camera.color_effects = (128, 128)
    cameraResolution = robot.line_finder.resolution
    #camera.resolution = (cameraResolution[0], cameraResolution[1])
    rawCapture = PiRGBArray(camera,
                            size=(cameraResolution[0], cameraResolution[1]))

    # Check serial port issues
    # if there is no serial connection, this function
    # simply returned the untouched queue
    try:
        ser = serial.Serial("/dev/ttyS0", 9600)
        ser.flushInput()
        serialByteArray = []
    except serial.SerialException:
        rawCapture.truncate(0)
        return intersectionQueue

    # Changinig the mode to showing the frames or not
    if testMode:
        cv2.namedWindow("Original_frame", cv2.WINDOW_NORMAL)
        cv2.namedWindow("binary", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("binary", 100, 100)
        cv2.resizeWindow("Original_frame", 100, 100)

    # Reads width of the line from the file
    with open('LineWidth.txt') as f:
        black_line_width = int(f.readline())

    # Minimum width for a fork
    fork_min_width = robot.fork_black_line_min_width_multiplier * black_line_width
    post_line_width = robot.post_black_line_width * black_line_width

    # Kernel setup
    n = 3
    kernel = np.array([[0, 1 / n, 0] * n])

    # Camera initialization and start
    camera.capture(rawCapture, format='bgr')
    frame = rawCapture.array[:, :, 0]
    rawCapture.truncate(0)

    # Retrieving the height and the with of the frame
    height = len(frame)
    width = len(frame[0])

    # Sensor lines used for the line follower
    # You could change the distances
    linesY = np.linspace(
        2 / 3 * height - 5, height - 10, 5,
        dtype=int)  #np.linspace(20, height-5, 5, dtype = int)#

    # Required by the camera function to runcate every time
    rawCapture.truncate(0)

    # List of errors and times for PID plots
    if testMode:
        errorList = []
        timeList = []

    # Initializing distances for derivative calculation
    currentDeltaX = 0
    previousDeltaX = 0

    # Initializing times
    currentTime = time.time()
    frameEndTime = time.time()
    frameStartTime = time.time()

    # Intersection mode determins the process of the line following
    intersectionMode = False
    # This value changes based on the last element in the intersection queue
    intersectionDirection = None
    # Number of lines detectecing a intersection where the left adn right
    # spikes are more than a single line
    numberOfLinesDetectingIntersection = 0

    # Used to defer the intersections if detected too soon
    lineFollowingStartTime = time.time()

    lastIntersectionDetectionPeriod = intersectionPeriods.pop(0)
    lastIntersectionDetectedTime = time.time()

    startFollowTopLine = time.time()

    # There are two ways to exit this main loop
    # 1) The serial connection is lost
    # 2) queue of the turns has reached character "X"

    # Main for loop starting
    # Frame is taken as a 3 channgel grayscaled image
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):

        if time.time() - startFollowTopLine > timeout:
            serialByteArray = [0, 0, 0, 0]
            ser.write(serialByteArray)
            rawCapture.truncate(0)
            return intersectionQueue

        # Algorithm start time
        algorithmStartTime = time.time()

        # Sample frame taken time
        frameStartTime = time.time()
        frameTakingTime = frameStartTime - frameEndTime
        #print("Frame taking time:", frameTakingTime)

        # Appending time to list
        timeList.append(frameStartTime)

        # Assignet previous delta X used for the derivative
        previousDeltaX = currentDeltaX

        frameArray = frame.array
        # Creating a grayscale copy of the frame by taking only one of the channels
        # and only including the lines indicated for the line follwoing algorithm
        # This step is included to reduce the computer time
        frameCopy = frameArray.copy()[linesY]
        frameCopyTime = time.time()
        #print("transform time", frameCopyTime - algorithmStartTime)

        # Taking the kernel of the image
        kerneledImage = cv2.filter2D(frameCopy, -1, kernel)[:, :, 0]
        kernelTime = time.time()
        #print("kernel time: ",kernelTime-frameCopyTime)

        # Blurring the image
        blurredImage = cv2.GaussianBlur(kerneledImage, (5, 5), 0)
        blurTime = time.time()
        #print("blur time: ",blurTime-kernelTime)

        # Binary filter for the image
        ret, binaryImage = cv2.threshold(blurredImage, BIN_CUT, 255,
                                         cv2.THRESH_BINARY)
        binTime = time.time()
        #print("bin time: ",binTime-blurTime)

        # Creating the matrix
        pathMatrix = binaryImage

        # Median center value
        # This value is a cumulative results from all the lines of sensors
        currentDeltaX = 0

        # list of deltaXs
        # A list of the deltaXs indicated by each line
        deltaXList = []

        # We zero this value to increment it in the for loop to find out
        # if the number of sensor lines detecting intersection goes to zero
        # in which case we must quit the intersection mode
        numberOfLinesDetectingIntersection = 0

        if not intersectionMode:

            # A loop for the calculations for each line of the sensors
            for line_index in range(0, len(linesY)):
                lineY = linesY[line_index]
                line = pathMatrix[line_index]

                # taking the derivative of the lines
                dline = diff(line)

                if line[0] == 0:
                    dline[0] = 1
                if line[-1] == 0:
                    dline[-1] = 1

                # Here we can either choose the two heighest points
                # or we could simple trust that there will only be 2 values
                # This will indicate the two edges of the line
                #top_two_indices = sorted(range(len(dline)), key = lambda i: dline[i])[-2:]
                edgeIndices = [
                    i for i in range(0, len(dline)) if dline[i] != 0
                ]

                # Case for when two or more edges are detected
                if (len(edgeIndices) >= 2):
                    leftmostEdge = edgeIndices[0]
                    rightmostEdge = edgeIndices[-1]

                    lineWidthDetected = rightmostEdge - leftmostEdge
                    #print("Line width detected: ",lineWidthDetected)
                    #print(len(edgeIndices))
                    #print("_______________________________________________")

                    # Checking to see if a for is detected
                    if (lineWidthDetected >= fork_min_width
                            and len(edgeIndices) >= 4):

                        intersectionDirection = intersectionQueue[0]
                        numberOfLinesDetectingIntersection += 1

                        if (intersectionDirection == "L"):
                            thisLineDeltaX = rightmostEdge - width / 2
                        elif (intersectionDirection == "R"):
                            thisLineDeltaX = leftmostEdge - width / 2
                        elif (intersectionDirection == "X"):

                            serialByteArray = [100, 0, 100, 0]
                            ser.write(serialByteArray)

                            rawCapture.truncate(0)
                            return intersectionQueue

                        deltaXList.append(thisLineDeltaX)

                    elif (lineWidthDetected >= post_line_width):
                        intersectionDirection = intersectionQueue[0]
                        numberOfLinesDetectingIntersection += 1

                        if (intersectionDirection == "L"):
                            thisLineDeltaX = rightmostEdge - width / 2
                        elif (intersectionDirection == "R"):
                            thisLineDeltaX = leftmostEdge - width / 2
                        elif (intersectionDirection == "X"):
                            serialByteArray = [100, 0, 100, 0]
                            ser.write(serialByteArray)
                            rawCapture.truncate(0)
                            return intersectionQueue

                        deltaXList.append(thisLineDeltaX)

                    else:
                        # Finding the denter of the line
                        lineCenterX = int((leftmostEdge + rightmostEdge) / 2)

                        # Draw circle for showing
                        # cv2.circle(frame, (lineCenterX, lineY), 2, (255,0,0), -1)

                        thisLineDeltaX = rightmostEdge - width / 2
                        deltaXList.append(thisLineDeltaX)

                # Case for when only one edge is detected
                elif (len(edgeIndices) == 1):
                    if dline[0] == 0:
                        onlyEdge = edgeIndices[-1]
                    if dline[-1] == 0:
                        onlyEdge = edgeIndices[0]

                    # Cases for when the edge is to the left or the right
                    if (onlyEdge >= width / 2):
                        lineCenterX = onlyEdge + black_line_width / 2
                    if (onlyEdge < width / 2):
                        lineCenterX = onlyEdge - black_line_width / 2
                    thisLineDeltaX = lineCenterX - width / 2
                    deltaXList.append(thisLineDeltaX)
                    print("F**K THE POLICE")

                # Case for when we are offline
                # We will exponentially increase the delta values to return the line
                else:
                    thisLineDeltaX = offlineExponential * (abs(
                        previousDeltaX)) * (-1 if previousDeltaX < 0 else 1)
                    deltaXList.append(thisLineDeltaX)

            #going into intersection mode
            if (numberOfLinesDetectingIntersection >=
                    numberOfLinesRequiredForIntersectionMode
                    and time.time() - lastIntersectionDetectedTime >
                    lastIntersectionDetectionPeriod):

                #intersectionMode = True
                intersectionDirection = intersectionQueue.pop(0)
                intersectionSpeed = intersectionSpeeds.pop(0)
                intersectionTime = intersectionTimes.pop(0)
                lastIntersectionDetectionPeriod = intersectionPeriods.pop(0)
                thisTime = time.time()
                goLeft = int(intersectionDirection == 'L')
                while (time.time() - thisTime < intersectionTime):
                    ser.write([
                        intersectionSpeed, goLeft, intersectionSpeed,
                        int(not goLeft)
                    ])
                ser.write([
                    int(intersectionSpeed / 3),
                    int(not goLeft),
                    int(intersectionSpeed / 3), goLeft
                ])
                time.sleep(0.05)
                ser.write([0, 0, 0, 0])
                print("intersection mode timestamp", time.time())
                print("this intersection period was ",
                      time.time() - lastIntersectionDetectedTime)
                print("__________")
                lastIntersectionDetectedTime = time.time()

        if intersectionMode:

            # Important !
            # Pay attention that in this loop now, the
            # intersection direction is already determined
            # We must have poped it off in the previous iteration from
            # the intersectionQueue in order for the state of the machine
            # to change to intersection mode

            # Also note,
            # if the intersection direction indicated "X",
            # we should have quitted the program by now
            # and there is no need to check

            # A loop for the calculations for each line of the sensors
            for line_index in range(0, len(linesY)):
                lineY = linesY[line_index]
                line = pathMatrix[line_index]

                # taking the derivative of the lines
                dline = diff(line)

                if line[0] == 0:
                    dline[0] = 1
                if line[-1] == 0:
                    dline[-1] = 1

                # Here we can either choose the two heighest points
                # or we could simple trust that there will only be 2 values
                # This will indicate the two edges of the line
                #top_two_indices = sorted(range(len(dline)), key = lambda i: dline[i])[-2:]
                edgeIndices = [
                    i for i in range(0, len(dline)) if dline[i] != 0
                ]

                # Case for when two or more edges are detected
                if (len(edgeIndices) >= 2):
                    leftmostEdge = edgeIndices[0]
                    rightmostEdge = edgeIndices[-1]

                    lineWidthDetected = rightmostEdge - leftmostEdge
                    #print("Line width detected: ",lineWidthDetected)

                    # Checking to see if a for is detected
                    if (lineWidthDetected >= fork_min_width):

                        numberOfLinesDetectingIntersection += 1

                        if (intersectionDirection == "L"):
                            thisLineDeltaX = rightmostEdge - width / 2
                        elif (intersectionDirection == "R"):
                            thisLineDeltaX = leftmostEdge - width / 2
                        elif (intersectionDirection == "X"):
                            serialByteArray = [100, 0, 100, 0]
                            ser.write(serialByteArray)
                            rawCapture.truncate(0)
                            return intersectionQueue

                        deltaXList.append(thisLineDeltaX)

                    else:
                        # Finding the denter of the line
                        lineCenterX = int((leftmostEdge + rightmostEdge) / 2)

                        # Draw circle for showing
                        # cv2.circle(frame, (lineCenterX, lineY), 2, (255,0,0), -1)

                        thisLineDeltaX = lineCenterX - width / 2
                        deltaXList.append(thisLineDeltaX)

                # Case for when only one edge is detected
                elif (len(edgeIndices) == 1):

                    if dline[0] == 0:
                        onlyEdge = edgeIndices[-1]
                    if dline[-1] == 0:
                        onlyEdge = edgeIndices[0]

                    # Cases for when the edge is to the left or the right
                    if (onlyEdge >= width / 2):
                        lineCenterX = onlyEdge + black_line_width / 2
                    if (onlyEdge < width / 2):
                        lineCenterX = onlyEdge - black_line_width / 2
                    thisLineDeltaX = lineCenterX - width / 2
                    deltaXList.append(thisLineDeltaX)

                # Case for when we are offline
                # We will exponentially increase the delta values to return the line
                else:
                    thisLineDeltaX = offlineExponential * (abs(
                        previousDeltaX)) * (-1 if previousDeltaX < 0 else 1)
                    deltaXList.append(thisLineDeltaX)

            if (numberOfLinesDetectingIntersection == 0
                ):  #and time.time() - intersectionStartTime >= 0.3):
                print("exiting intersection mode!")
                intersectionMode = False

        # Takingn the median of the delta Xs
        currentDeltaX = statistics.median(deltaXList)
        #print("Delta X measured: ", currentDeltaX)

        forLoopTime = time.time()
        #print("for loop time: ", forLoopTime - binTime)

        #
        # PID calculations
        #

        # Finding the derivative time
        # The time it took from taking the last frame
        derivativeDeltaTime = frameTakingTime + (forLoopTime -
                                                 algorithmStartTime)

        # finding the derivative
        dXdT = (currentDeltaX - previousDeltaX) / derivativeDeltaTime

        # Finding the error value given the constants
        error_value = int(MultiCoefficient *
                          (DCoefficient * dXdT + PCoefficient * currentDeltaX))

        duty_cycle_left = baseSpeed + error_value
        duty_cycle_right = baseSpeed - error_value
        #print(duty_cycle_left, duty_cycle_right)

        # Reassigning the duty cycle values based on the cutoffs
        # Left Wheel
        if duty_cycle_left > maxSpeed:
            duty_cycle_left = maxSpeed
            #print("F**K LEFT")
        elif duty_cycle_left < minSpeed:
            duty_cycle_left = minSpeed
            #print("F**K LEFT")

        # Right wheel
        if duty_cycle_right > maxSpeed:
            duty_cycle_right = maxSpeed
            #print("F**K RIGTH")
        elif duty_cycle_right < minSpeed:
            duty_cycle_right = minSpeed
            #print("F**K RIGTH")

        calcTime = time.time()
        #print("Calculation time: ", calcTime - forLoopTime)

        # Serial communication to the BluePill
        serialByteArray.append(abs(duty_cycle_left))
        if (duty_cycle_left > 0):
            serialByteArray.append(1)
        else:
            serialByteArray.append(0)

        serialByteArray.append(abs(duty_cycle_right))
        if (duty_cycle_right > 0):
            serialByteArray.append(1)
        else:
            serialByteArray.append(0)

        #print("Byte array sent to the BluePill: ",serialByteArray)

        # This try cathc block will return the unfinished
        # queue in case the serial communication is lost in the middle
        try:
            ser.write(serialByteArray)
            serialByteArray = []
        except serial.SerialException:
            rawCapture.truncate(0)
            return intersectionQueue

        serialTime = time.time()
        #print("serial time: ", serialTime - calcTime)

        # Loop is now complete
        key = cv2.waitKey(1)

        # if the `q` key was pressed, break from the loop
        if key == "q":
            break

        algorithmEndTime = time.time()
        #print("Full algorithm time: ", algorithmEndTime - algorithmStartTime)

        # Showing the frame in test mode only
        if testMode:
            cv2.imshow("Original_frame", frameArray)
            cv2.imshow("binary", binaryImage)

        # Truncating reqiured for the frames
        rawCapture.truncate(0)

        # End of this frame
        frameEndTime = time.time()

    camera.close()
示例#46
0
    def run(self):
        """ initialize the camera and grab a
            reference to the raw camera capture """
        camera = PiCamera()
        camera.resolution = (640, 480)
        # camera.hflip = True
        # camera.vflip = True

        camera.awb_mode = 'off'
        # Start off with low gains
        rg, bg = (1, 1.8)
        camera.awb_gains = (rg, bg)

        rawCapture = PiRGBArray(camera)

        # allow the camera to warmup
        time.sleep(0.1)

        # grab an image from the camera
        camera.capture(rawCapture, format="bgr")

        # Show the full unbalanced image
        # image = rawCapture.array
        # cv2.imshow("Initial", image)

        rawCapture.seek(0)
        rawCapture.truncate()

        for i in range(10):
            camera.capture(rawCapture, format="bgr")
            # get the centre section, average RGB channels
            imagecentre = rawCapture.array[190:290, 270:370]

            # if i == 9:
            #   cv2.imshow("Image" + str(i), imagecentre)
            b, g, r = (np.mean(imagecentre[..., i]) for i in range(3))
            print('R:%5.2f, B:%5.2f = (%5.2f, %5.2f, %5.2f)' %
                  (rg, bg, r, g, b))

            if abs(r - g) > 20:
                if r > g:
                    rg -= 0.1
                else:
                    rg += 0.1
            if abs(b - g) > 20:
                if b > g:
                    bg -= 0.1
                else:
                    bg += 0.1

            if abs(r - g) > 2:
                if r > g:
                    rg -= 0.1
                else:
                    rg += 0.1
            if abs(b - g) > 1:
                if b > g:
                    bg -= 0.1
                else:
                    bg += 0.1
            camera.awb_gains = (rg, bg)
            rawCapture.seek(0)
            rawCapture.truncate()

        # display the image on screen and wait for a keypress
        # camera.capture(rawCapture, format="bgr")
        # final_image = rawCapture.array
        # cv2.imshow("Final", final_image)
        # cv2.waitKey(0)

        f = open('rbgains.txt', 'w')
        f.write("R:%5.2f\nB:%5.2f\n" % (rg, bg))
        f.close()
示例#47
0
    def aggiungi_persona(self):
        d = {}
        face_cascade = cv2.CascadeClassifier(
            'dataBase/haarcascade_frontalface_default_face.xml')

        #carico dizionario
        self.carica_personale(d)

        #verifica presenza nel database
        flag = False
        while (flag == False):
            print(" ")
            nome = input("inserisci nome e cognome: ")
            persone = d.values()
            if (nome not in persone):
                flag = True
            else:
                print("persona già in database... riprova")

        #assegnazione id persona
        listaKey = d.keys()
        nKey = len(listaKey)
        num = 1
        while num <= nKey:
            if (num not in listaKey):
                d[num] = nome  #aggiungo persona al database
                break
            num = num + 1

        #izializziamo la picamera
        camera = PiCamera()
        camera.resolution = (640, 480)
        camera.framerate = 32
        rawCapture = PiRGBArray(camera, size=(640, 480))
        time.sleep(0.1)

        #procedura riconoscimento volto
        print("")
        print("avvicinare il volto alla camera...")
        time.sleep(1)
        print("3...")
        time.sleep(1)
        print("2...")
        time.sleep(1)
        print("1...")
        time.sleep(1)
        print(" ")
        print('Rilevamento di 20 volti...')
        print(" ")

        #avviamo la picamera e creo database di volti
        cont = 0
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            #creazione immagine in array numpy
            image = frame.array
            #conversione immagine in scala di grigi
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            #troviamo il volto, faces memorizza le coordinate del volto usando il file xml
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            for (x, y, w, h) in faces:
                #salviamo il volto ritagliato in scala di grigi
                cv2.imwrite(
                    "dataBase/dataBaseVolti/User." + str(num) + "." +
                    str(cont + 1) + ".jpg", gray[y:y + h, x:x + w])
                cont = cont + 1
                print('volto numero ' + str(cont))
                rawCapture.truncate(0)
                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
                cv2.waitKey(100)

            #mostriamo i frame
            cv2.imshow("Frame", image)
            rawCapture.truncate(0)
            cv2.waitKey(1)
            if (cont >= 20):
                print('Fine rilevamento!')
                break

        #creo database binario
        self.crea_databaseBinario()

        cv2.destroyAllWindows()
        cv2.waitKey(1)
        cv2.waitKey(1)
        cv2.waitKey(1)
        cv2.waitKey(1)

        camera.close()
        self.salva_personale(d)
        d = {}
        domanda = input("Vuoi aggiungere un'altra persona? Y/N :")
        if (domanda == 'Y' or domanda == 'y'):
            self.aggiungi_persona()
        else:
            print("")
            print("fine opzioni")
            ang = 90 + ang
        setpoint = 100  ####### setpoint is used to make origin point out of x_min
        error = int(x_min - setpoint)
        ang = int(ang)
        Motor_speed(45, (error * kp) + (ang * ap))
        box = cv2.boxPoints(
            blackbox
        )  #obtaining points that serounds the minAreaRect to be able to draw it as a contour in the image
        box = np.int0(box)
        cv2.drawContours(
            image, [box], 0, (0, 0, 255),
            3)  #draw the contour to the image so that we can see it
        cv2.putText(image, str(ang), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 2)
        cv2.putText(image, str(error), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (255, 0, 0), 2)
        cv2.line(image, (int(x_min), 90), (int(x_min), 110), (255, 0, 0), 3)

    cv2.imshow("orginal with line", image_Binary
               )  #shows the variale image that holds the array of the image.
    rawCapture.truncate(
        0
    )  # the function truncate is used here to empty the buffer sothat the next fram can be written to it.
    key = cv2.waitKey(1) & 0xFF  # read a key
    if key == ord(
            "q"
    ):  # if the pressed key is the charachter q then the loop breaks and finishes
        break

GPIO.output(40, GPIO.LOW)  # turn the light of the camera off
示例#49
0
time.sleep(0.1)

for frameBGR in cam.capture_continuous(raw, format="bgr", use_video_port=True):
    imgBGR = frameBGR.array
    imgBW = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    listFace = faceCascade.detectMultiScale(
        imgBW,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        #v2.X
        #flags=cv2.cv.CV_HAAR_SCALE_IMAGE

        #v3.1+
        flags=cv2.CASCADE_SCALE_IMAGE)

    for (x, y, w, h) in listFace:
        cv2.rectangle(imgBGR, (x, y), (x + w, y + h), (255, 255, 0), 4)

    print len(listFace)
    cv2.imshow('Video', imgBGR)

    key = cv2.waitKey(1) & 0xFF

    raw.truncate(0)  # Clear stream

    if key == ord("q"):
        break
示例#50
0
class Camera(PiCamera):
    def __init__(self,
                 sonar=None,
                 clientAddr=None,
                 clientPort=5555,
                 resolution=(640, 480)):
        PiCamera.__init__(self)
        self.resolution = resolution
        self.mode = None
        self.sonar = sonar

        self.rawCapture = PiRGBArray(self)
        self.rawCapture.truncate(0)
        self.rawCapture.seek(0)

        # Start stream
        #self.stream()

    def stream(self, addr='192.168.0.169', port=5555):
        '''Send live stream'''

        # Now connect the camera socket
        context = zmq.Context()
        footageSocket = context.socket(zmq.PUB)
        footageSocket.connect('tcp://{}:{}'.format(addr, port))

        self.rawCapture.truncate(0)
        self.rawCapture.seek(0)

        #font = cv2.FONT_HERSHEY_SIMPLEX
        for frame in self.capture_continuous(output=self.rawCapture,
                                             format="bgr",
                                             use_video_port=True):

            image = frame.array

            # draw cross on image
            #cv2.line(image,(300,240),(340,240),(128,255,128),1)
            #cv2.line(image,(320,220),(320,260),(128,255,128),1)

            if self.mode == 'opencv':
                pass  #TODO: impliment open cv modes
            '''
            try:
                if self.sonar.distance < 8:
                    cv2.putText(image,'%s m'%str(round(self.sonar.distance,2)),
                                (40,40), font, 0.5,(255,255,255),
                                1,cv2.LINE_AA)
            except:
                pass
            '''

            # send image to client
            encoded, buffer = cv2.imencode('.jpg', image)
            jpg_as_text = base64.b64encode(buffer)
            footageSocket.send(jpg_as_text)
            #footageSocket.send(image)
            #send_array(footageSocket,image)

            self.rawCapture.truncate(0)
            self.rawCapture.seek(0)
示例#51
0
class Tracker:
    '''
        iFollow robot class
    '''
    def __init__(self, colors, frameSize):
        """
            Args: List of 2 color tuples: [(0,0,0),(255,255,255)]
        """
        self.colors = colors
        # capturing video through webcam
        self.camera = PiCamera()
        self.rawCapture = PiRGBArray(self.camera)
        self.status = "Running"
        self.minRadius = 20
        self.cameraWidth = frameSize

    def __str__(self):
        '''
        Object Representation
        '''
        print("Ok!")

    def prepareColors(self):
        colors = self.colors
        Min = colors[0]
        Max = colors[1]
        return (np.array(list(Min), np.uint8), np.array(list(Max), np.uint8))

    def detectObject(self, mask, frame):
        contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]
        if len(contours) > 0:
            c = max(contours, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            # only proceed if the radius meets a minimum size
            if radius > self.minRadius:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(x), int(y)), int(radius),
                           (255, 255, 255), 2)
                cv2.circle(frame, center, 2, (0, 255, 0), -1)
                return x
        return None

    def createMask(self, hsv, frame):
        masks = []
        color = self.prepareColors()
        # finding the range of red,blue and yellow color in the image
        mask = cv2.inRange(hsv, color[0], color[1])
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        # Morphological transformation, Dilation
        kernal = np.ones((5, 5), "uint8")
        mask = cv2.dilate(mask, kernal)
        #mask = cv2.bitwise_and(frame, frame, mask = mask)
        return mask

    def getCoordinate(self, blur, frame=np.array([False, False])):
        # keep looping
        # grab the current frame
        if not frame.all():
            stream = io.BytesIO()
            camera = self.camera
            camera.resolution = (400, 400)
            camera.start_preview()
            stream = PiRGBArray(camera)
            camera.capture(stream, format='bgr')
            # At this point the image is available as stream.array
            frame = stream.array
        #image = self.rawCapture.array
        # resize the frame, blur it, and convert it to the HSV color space
        frame = imutils.resize(frame, width=self.cameraWidth)
        frame = cv2.flip(frame, 1)
        blurred = cv2.GaussianBlur(frame, (11, 11), 0) if blur else frame
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        mask = self.createMask(hsv, frame)
        self.rawCapture.truncate(0)
        # Track each color:
        return self.detectObject(mask, frame)
def talker():
	
	#rospy.init_node('green_talker_rpi', anonymous=True)
	#pub = rospy.Publisher('green_chatter_rpi', ballCor, queue_size=50)
	r = rospy.Rate(10) #10hz
	# Initialize the Green ball color 
	#greenLower = (29,86,100)
	#greenUpper = (60,255,255)

        global msg
	pts = deque(maxlen=64)


	#Grab webCam
	#camera = cv2.VideoCapture(0)
        camera = PiCamera()
        camera.resolution = (640, 480)
        camera.framerate = 62
        rawCapture = PiRGBArray(camera, size=(640, 480))
        time.sleep(0.1)
        
#       msg.heading="RPI-1 talker started"
#       msg.color="RED"
# 	msg.x = 0
#	msg.y = 0
#	msg.radius = 0
#	msg.upperH = greenUpper[0]
#	msg.upperS = greenUpper[1]
#       msg.upperV = greenUpper[2]
#	msg.lowerH = greenLower[0]
#	msg.lowerS = greenLower[0]
#	msg.lowerV = greenLower[0]
        global detected
        detected = False

        # keep looping
        for myframe in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                # grab the current frame
                #(grabbed, frame) = camera.read()
                frame = myframe.array
                
		# resize the frame, blur it, and convert it to the HSV
		# color space
		frame = imutils.resize(frame, width=600)
		# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

		# construct a mask for the color "green", then perform
		# a series of dilations and erosions to remove any small
		# blobs left in the mask
		mask = cv2.inRange(hsv, greenLower, greenUpper)
		mask = cv2.erode(mask, None, iterations=2)
		mask = cv2.dilate(mask, None, iterations=2)

		# find contours in the mask and initialize the current
		# (x, y) center of the ball
		cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
			cv2.CHAIN_APPROX_SIMPLE)[-2]
		center = None

		# only proceed if at least one contour was found
		if len(cnts) > 0:
			# find the largest contour in the mask, then use
			# it to compute the minimum enclosing circle and
			# centroid
			c = max(cnts, key=cv2.contourArea)
			((x, y), radius) = cv2.minEnclosingCircle(c)
			M = cv2.moments(c)
			center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                        #rospy.loginfo(radius)

			# only proceed if the radius meets a minimum size
			if radius > 5:
				# draw the circle and centroid on the frame,
				# then update the list of tracked points
				cv2.circle(frame, (int(x), int(y)), int(radius),
					(0, 255, 255), 2)
				cv2.circle(frame, center, 5, (0, 0, 255), -1)
				msg.x=int(x)
				msg.y=int(y)
				msg.radius=int(radius)
				#rospy.loginfo(detected)
                                if detected == False:
                                        rospy.loginfo(" "+ msg.color+" ball detected ")
                                        detected = True
                                
				#detected = True
				
				        
                else:
                        #rospy.loginfo(detected)
                        if detected == True:
                                rospy.loginfo("Can't find the ball")
                                rospy.loginfo("Sending request to neighbours to find "+msg.color+" ball.")
                                #rospy.loginfo(msg)
                                pub.publish(msg)
                                detected = False
                                 
                                        
				#print "Publishing to Lap cam"
				#r.sleep()
				#return 0

		# update the points queue
		#pts.appendleft(center)

		# loop over the set of tracked points
		#for i in xrange(1, len(pts)):
			# if either of the tracked points are None, ignore
			# them
			#if pts[i - 1] is None or pts[i] is None:
			#	continue

			# otherwise, compute the thickness of the line and
			# draw the connecting lines
			#thickness = int(np.sqrt(64 / float(i + 1)) * 2.5)
			#cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

		# show the frame to our screen
		#cv2.imshow("Frame", frame)
                rawCapture.truncate(0)
    list_results = [[0] * 7 for x in xrange(num_pics + 1)]
    list_results[0] = [
        'Trial',
        't (take pic)',
        't (save pic)',
        't (SIFT)',
        'Found? (SIFT)',
        't (ArUco)',
        'Found? (ArUco)',
    ]
    for i in range(num_pics):
        print '%0.2f: Trial %03d' % ((time.time() - time_start), i)
        list_results[i + 1][0] = i

        time_meas = time.time()
        rawCapt.truncate(0)
        camera.capture(rawCapt, format='bgr', use_video_port=True)
        pic = rawCapt.array
        list_results[i + 1][1] = time.time() - time_meas

        time_meas = time.time()
        path = 'pic_%s_Trial%03d.jpg' % (filename, i)
        cv2.imwrite(path, pic)
        list_results[i + 1][2] = time.time() - time_meas

        list_results[i + 1][3], list_results[i + 1][4] = find_target(
            pic, 'sift')

        list_results[i + 1][5], list_results[i + 1][6] = find_target(
            pic, 'aruco')
示例#54
0
def cv_Demo2():
    rawCapture = PiRGBArray(camera)
    camera.iso = 100
    # Wait for the automatic gain control to settle
    time.sleep(2)
    global angle
    global marker_distance_cm
    # Now fix the values
    camera.shutter_speed = camera.exposure_speed
    camera.exposure_mode = 'off'
    g = camera.awb_gains
    camera.awb_mode = 'off'
    camera.awb_gains = g
    # allow the camera to warmup
    time.sleep(0.1)
    camera.framerate = 32
    aruco_dictionary = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    timer = 0
    xwidth = 54 * (math.pi / 180)
    ywidth = 41 * (math.pi / 180)
    marker_flag = 1
    # start continuous picture taking
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        marker_found = False

        # convert to grayscale
        image = frame.array
        cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            image, aruco_dictionary, parameters=parameters)
        if type(ids) == numpy.ndarray:
            print("Marker found")
            marker_found = True
            output_array = numpy.ndarray(len(ids))
            index = 0
            for i in ids:
                for j in i:
                    output_array[index] = j
                    print("Marker Indexes:", end=" ")
                    print(j)
                    #Determine angle of marcker form the center of the camera
                    xcenter = int(
                        (corners[0][0][0][0] + corners[0][0][2][0]) / 2)
                    ycenter = int(
                        (corners[0][0][0][1] + corners[0][0][2][1]) / 2)
                    yheight = abs(
                        int(corners[0][0][0][1] - corners[0][0][2][1]))
                    marker_distance_ft = 490 / yheight
                    print(yheight)
                    print("Marker Distance in ft: ", end="")
                    print(marker_distance_ft)
                    print("Rounding: ", end="")
                    print(round(marker_distance_ft))

                    print(marker_distance_cm)
                    #                    marker_distance_cm = [int(ele) for ele in str(marker_distance_cm) if ele.isdigit()]
                    #print("Marker Distacne in m: ", end="")
                    #print(marker_distance_m)
                    imageMiddleX = image.shape[1] / 2
                    xdist = (xcenter - image.shape[1] / 2)
                    ydist = (ycenter - image.shape[0] / 2)

                    xangle = (xdist / image.shape[1]) * xwidth
                    yangle = (ydist / image.shape[0]) * ywidth

                    # Calculate the angle from the z-axis to the center point
                    # First calculate distance (in pixels to screen) on z-axis
                    a1 = xdist / math.tan(xangle)
                    a2 = ydist / math.tan(yangle)
                    a = (a1 + a2) / 2
                    distance = math.sqrt(pow(xdist, 2) + pow(ydist, 2))
                    #Calculate final angle for each Aruco
                    angle = math.atan2(distance, a) * (180 / math.pi)
                    print("Correction: ", end="")
                    marker_distance_ft = marker_distance_ft * 0.96 - (
                        -2.5244 * pow(angle * math.pi / 180, 2) + 0.027)
                    print(marker_distance_ft)
                    marker_distance_cm = marker_distance_ft * 30.48 * 100
                    marker_distance_cm = int(marker_distance_cm)
                    if (xcenter > imageMiddleX):
                        angle *= -1
                        if (angle < -3.8 and angle > -4.3):
                            angle += 4
                    else:
                        angle += 4
                    if (angle > 0):
                        angle -= 2
                    display_angle = str(angle)
                    print("Angle from camera:", angle, "degrees")
                    lcd.message = ("Beacon Detected \nAngle: %s" %
                                   (display_angle))
                    marker_flag = 1
                    index += 1
                    print()
                    break
        rawCapture.truncate(0)
        if marker_found == False:
            print("No markers found")
            if marker_flag == 1:
                lcd.clear()
                marker_flag = 0
            lcd.message = ("Beacon Not\nDetected")
        else:
            break
示例#55
0
文件: 1.py 项目: ZYM-PKU/Raspberry-3B
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2  # 导入需要的库

camera = PiCamera()
camera.resolution = (640, 480)  # 设置分辨率
camera.framerate = 32  # 设置帧率
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)  # 等待摄像头模块初始化

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    image = frame.array
    cv2.imshow("Frame", image)  # 显示图像
    key = cv2.waitKey(1) & 0xFF  # 等待按键
    rawCapture.truncate(0)  # 准备下一副图像
    if key == ord("q"):
        break
示例#56
0
class Camera():  #Camera object - handles everything image related
    def __init__(self):
        print("initializing Pi Camera...")

        self.cap = PiCamera()  #create Raspberry Pi camera module
        self.cap.resolution = (640, 480)  #define capture resolution
        self.cap.rotation = 90  #compensate for camera orientation
        self.cap.framerate = 30  #define capture rate
        self.cap.led = False  #diasable camera status indicator LED
        self.rawCapture = PiRGBArray(self.cap,
                                     size=(640,
                                           480))  #image numpy array object

        self.frame = None
        self.captureThread = Thread(
            target=self.__captureFrame
        )  #frame retrieval thread - reduce I/O latency
        self.captureThread.daemon = True
        self.captureThread.do_run = True
        self.captureThread.start()

        self.net = cv2.dnn.readNetFromCaffe(
            "camera/deploy.prototext.txt",
            "camera/res10_300x300_ssd_iter_140000.caffemodel")
        self.minimumConfidence = 0.5

        print("done")

    def __captureFrame(self):  #frame retrieval thread
        for capture in self.cap.capture_continuous(
                self.rawCapture, format='bgr',
                use_video_port=True):  #capture frames in infinte iterator
            self.frame = capture.array
            self.rawCapture.truncate(0)

            if (self.captureThread.do_run == False):
                break

    def averageGraySpace(
            self, grayFrame
    ):  #calculate average light level from frame in gray space
        average = np.mean(grayFrame.flatten())
        return average

    def getFrame(self):  #retrieve frame from camera as an array
        return self.frame

    def convertGray(self, frame):  #convert RGB frame to gray space
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        return gray

    def getFaces(
            self,
            frame):  #find faces in frame array with haarcascade classifier
        self.net.setInput(
            cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                  (300, 300), (104.0, 177.0, 123.0)))
        faces = self.net.forward()
        return faces

    def saveFrame(self, frame, name="image"):  #save frame array to disk
        cv2.imwrite(name, frame)

    def __del__(self):  #close safely
        print("stopping Pi Camera...")
        self.captureThread.do_run = False
        self.captureThread.join()
        self.cap.close()
        print("done")
示例#57
0
class VideoStream:
    """Camera object"""
    def __init__(self, resolution=(640, 480), framerate=30, PiOrUSB=2, src=0):

        # Create a variable to indicate if it's a USB camera or PiCamera.
        # PiOrUSB = 1 will use PiCamera. PiOrUSB = 2 will use USB camera.
        self.PiOrUSB = PiOrUSB

        if self.PiOrUSB == 1:  # PiCamera
            # Import packages from picamera library
            from picamera.array import PiRGBArray
            from picamera import PiCamera

            # Initialize the PiCamera and the camera image stream
            self.camera = PiCamera()
            self.camera.resolution = resolution
            self.camera.framerate = framerate
            self.rawCapture = PiRGBArray(self.camera, size=resolution)
            self.stream = self.camera.capture_continuous(self.rawCapture,
                                                         format="bgr",
                                                         use_video_port=True)

            # Initialize variable to store the camera frame
            self.frame = []

        if self.PiOrUSB == 2:  # USB camera
            # Initialize the USB camera and the camera image stream
            self.stream = cv2.VideoCapture(src)
            ret = self.stream.set(3, resolution[0])
            ret = self.stream.set(4, resolution[1])
            #ret = self.stream.set(5,framerate) #Doesn't seem to do anything so it's commented out

            # Read first frame from the stream
            (self.grabbed, self.frame) = self.stream.read()

# Create a variable to control when the camera is stopped
        self.stopped = False

    def start(self):
        # Start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):

        if self.PiOrUSB == 1:  # PiCamera

            # Keep looping indefinitely until the thread is stopped
            for f in self.stream:
                # Grab the frame from the stream and clear the stream
                # in preparation for the next frame
                self.frame = f.array
                self.rawCapture.truncate(0)

                if self.stopped:
                    # Close camera resources
                    self.stream.close()
                    self.rawCapture.close()
                    self.camera.close()

        if self.PiOrUSB == 2:  # USB camera

            # Keep looping indefinitely until the thread is stopped
            while True:
                # If the camera is stopped, stop the thread
                if self.stopped:
                    # Close camera resources
                    self.stream.release()
                    return

                # Otherwise, grab the next frame from the stream
                (self.grabbed, self.frame) = self.stream.read()

    def read(self):
        # Return the most recent frame
        return self.frame

    def stop(self):
        # Indicate that the camera and thread should be stopped
        self.stopped = True
示例#58
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--label",
                        help="File path of label file.",
                        required=True)
    parser.add_argument("--top_k", help="keep top k candidates.", default=3)
    parser.add_argument("--threshold",
                        help="threshold to filter results.",
                        default=0.5,
                        type=float)
    parser.add_argument("--width",
                        help="Resolution width.",
                        default=640,
                        type=int)
    parser.add_argument("--height",
                        help="Resolution height.",
                        default=480,
                        type=int)
    args = parser.parse_args()

    # Initialize window.
    cv2.namedWindow(
        WINDOW_NAME,
        cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO)
    cv2.moveWindow(WINDOW_NAME, 100, 200)

    # Initialize engine.
    engine = DetectionEngine(args.model)
    labels = read_label_file(args.label) if args.label else None

    # Generate random colors.
    last_key = sorted(labels.keys())[len(labels.keys()) - 1]
    colors = visual.random_colors(last_key)

    elapsed_list = []
    resolution_width = args.width
    rezolution_height = args.height
    with picamera.PiCamera() as camera:

        camera.resolution = (resolution_width, rezolution_height)
        camera.framerate = 30
        _, width, height, channels = engine.get_input_tensor_shape()
        rawCapture = PiRGBArray(camera)

        # allow the camera to warmup
        time.sleep(0.1)

        try:
            for frame in camera.capture_continuous(rawCapture,
                                                   format="rgb",
                                                   use_video_port=True):
                rawCapture.truncate(0)

                # input_buf = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                image = frame.array
                im = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                input_buf = PIL.Image.fromarray(image)

                # Run inference.
                ans = engine.detect_with_image(
                    input_buf,
                    threshold=args.threshold,
                    keep_aspect_ratio=False,
                    relative_coord=False,
                    top_k=args.top_k,
                )
                elapsed_ms = engine.get_inference_time()

                # Display result.
                if ans:
                    for obj in ans:
                        label_name = "Unknown"
                        if labels:
                            label_name = labels[obj.label_id]
                        caption = "{0}({1:.2f})".format(label_name, obj.score)

                        # Draw a rectangle and caption.
                        box = obj.bounding_box.flatten().tolist()
                        visual.draw_rectangle(im, box, colors[obj.label_id])
                        visual.draw_caption(im, box, caption)

                # Calc fps.
                elapsed_list.append(elapsed_ms)
                avg_text = ""
                if len(elapsed_list) > 100:
                    elapsed_list.pop(0)
                    avg_elapsed_ms = np.mean(elapsed_list)
                    avg_text = " AGV: {0:.2f}ms".format(avg_elapsed_ms)

                # Display fps
                fps_text = "{0:.2f}ms".format(elapsed_ms)
                visual.draw_caption(im, (10, 30), fps_text + avg_text)

                # display
                cv2.imshow(WINDOW_NAME, im)
                if cv2.waitKey(10) & 0xFF == ord("q"):
                    break

        finally:
            camera.stop_preview()

    # When everything done, release the window
    cv2.destroyAllWindows()
	def showVideo(self,click_allow_arg):
		lower_IR = np.array([0,0,50]) # lower bound of CV filter
		upper_IR = np.array([255,255,255]) # upper bound of CV filter
		rawCapture = PiRGBArray(self.cam_handle, size=self.cam_res) # capture to array
		m = PyMouse() # make mouse object
		prev_maxLoc = (0,0) # init previous maxLoc
		try:
			for frame in self.cam_handle.capture_continuous(rawCapture, format = 'bgr', use_video_port=True):
				image = frame.array # get the array values for that frame
				mask = cv2.inRange(image, lower_IR, upper_IR) # mask according to lower/upper filter values
				(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(mask) # find min/max values of image
				true_pnt = self.get_true_point(maxLoc)
				print true_pnt #DEBUG INFO PRINTOUT
				adjusted_maxx = int(((true_pnt[0]) * self.output_res[0])/(self.screen_res[0])) # take the distance from the edge and perform dimensional analysis
				adjusted_maxy = int(((true_pnt[1]) * self.output_res[1])/(self.screen_res[1])) # take the distance from the edge and perform dimensional analysis
				if click_allow_arg: # if user wants to have clicks when IR found
					if maxLoc != (0,0): # if not on the default location			
						m.press(adjusted_maxx,adjusted_maxy, 1) # press the "mouse" button at location found
					elif prev_maxLoc != (0,0) and maxLoc == (0,0): # if the current value is the default and the last value wasn't the default, release
						m.release(prev_maxLoc[0],prev_maxLoc[1], 1) # release the "mouse" button
				else: # only move the mouse, no clicking
					if maxLoc != (0,0): # if not on the default location
						m.move(adjusted_maxx,adjusted_maxy) # move the mouse
				prev_maxLoc = (adjusted_maxx,adjusted_maxy) # set previous to be current max loc
				cv2.circle(image, maxLoc, 21, (255,0,0), 2) # place circle on brightest spot
				cv2.imshow("TestWindow", cv2.resize(image,(160,120))) # show the image in a tiny window
				cv2.waitKey(1) & 0xFF # needed for preview
				rawCapture.seek(0) # return to the 0th byte
				rawCapture.truncate() # clear array for next frame
				print "Mouse at: ", m.position() #DEBUG INFO PRINTOUT
		except KeyboardInterrupt: # used to quit the function
			rawCapture.seek(0) # return to the 0th byte
			rawCapture.truncate() # clear array for next frame
示例#60
-1
def main():
	
	print("Starting Program")
	
	# Init camera
	camera = PiCamera()
	camera.resolution = (640,480)
	camera.framerate = 24
	rawCapture = PiRGBArray(camera, size=(640,480))
	
	# allow camera to warmup
	time.sleep(0.1)
	
	# capture frames from camera
	for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
		image = frame.array
		
		cv2.imshow("Frame", image)
		key = cv2.waitKey(1) & 0xFF
		rawCapture.truncate(0)
		
		if key == ord("q"):
				break
	
	print("Completed!")
	
	return 0