Example #1
1
def main():
   global preview

   parse_args(sys.argv[1:])
   cam = PiCamera()
   cam.rotation = -90
   cam.resolution = (width, height)
   cam.framerate = 60
   rawCapture = PiRGBArray(cam, size=(width, height))

   startTime = time.time()
   endTime = time.time() + capTime
   frames = 0

   for image in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
      if image is None:
         print 'capture failed'
         break
      frames += 1
      frame = image.array

      if preview: 
         cv2.imshow("frame", frame)
         key = cv2.waitKey(1) & 0xFF


      if time.time() > endTime:
         break
      rawCapture.truncate(0)

   print "Average Framerate for " + str(frames) + \
      " frames was: " + str(float(frames) / capTime) + "fps"
   cv2.destroyAllWindows()
Example #2
0
        def runCam():
                # initialize the camera and grab a reference to the raw camera capture
                camera = PiCamera()
                camera.resolution = (640, 480)
                camera.vflip = True
                camera.framerate = 72
                rawCapture = PiRGBArray(camera, size=(640, 480))

                camera.start_preview()

                time.sleep(0.1)

                # capture frames from the camera
                for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                        # grab the raw NumPy array representing the image, then initialize the timestamp
                        # and occupied/unoccupied text
                        image = frame.array
                 
                        # show the frame
                        cv2.imshow("Frame", image)
                        key = cv2.waitKey(1) & 0xFF
                 
                        # clear the stream in preparation for the next frame
                        rawCapture.truncate(0)
                 
                        # if the `q` key was pressed, break from the loop
                        if key == ord("q"):
                                break
Example #3
0
class CameraThread (threading.Thread):
    def __init__(self):
    	threading.Thread.__init__(self)
        # Camera setup
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        self.camera.vflip = True
        self.camera.hflip = True
        self.camera.video_stabilization = True
        
        # Camera warmup
        time.sleep(0.1)
        
        # Camera configuration
        self.camera.exposure_mode = 'off'
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = 1.5
        self.camera.iso = 100
        self.camera.shutter_speed = 6660

        
    def run(self):
        global ALLSTOP
        global target
        global last_seen
        global last_time

        print "Camera thread start"
        

        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # Grab frame
    	    big = frame.array
    	    image = cv2.resize(big, (res_var, int(big.shape[0]*res_var/big.shape[1])), interpolation = cv2.INTER_AREA)
            image = image[30:127, :] #img[y:y+h, x:x+w]
            
            # Cleanup
            self.rawCapture.truncate(0)
            
    	    # Get HSV image
    	    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            
            # Color Filter
            target.there, coords, target.where, image = ColorFilter2(image, hsv, [red_lower, blue_lower], [red_upper, blue_upper], (0,255,0), minArea)
            
            if target.there:
                if target.where < center_range and target.where > -center_range:
                    last_seen = "center"
                elif target.where <= center_range:
                    last_seen = "right"
                elif target.where >= -center_range:
                    last_seen = "left"
                last_time = time.clock()
            
            #print "Target", target.there, last_time, last_seen

            if ALLSTOP:
                 break
class PiVideoStream:
	def __init__(self, resolution=(320, 240), framerate=32):
		# initialize the camera and stream
		self.camera = PiCamera()
		self.camera.resolution = resolution
		self.camera.framerate = framerate
		self.rawCapture = PiRGBArray(self.camera, size=resolution)
		self.stream = self.camera.capture_continuous(self.rawCapture,
			format="bgr", use_video_port=True)

		# initialize the frame and the variable used to indicate
		# if the thread should be stopped
		self.frame = None
		self.stopped = False
	
	def __init__(self, resolution=(320, 240), framerate=32, rotate=0):
		# initialize the camera and stream
		self.camera = PiCamera()
		self.camera.rotation = rotate
		self.camera.resolution = resolution
		self.camera.framerate = framerate
		self.rawCapture = PiRGBArray(self.camera, size=resolution)
		self.stream = self.camera.capture_continuous(self.rawCapture,
			format="bgr", use_video_port=True)

		# initialize the frame and the variable used to indicate
		# if the thread should be stopped
		self.frame = None
		self.stopped = False

	def start(self):
		# start the thread to read frames from the video stream
		t = Thread(target=self.update, args=())
		t.daemon = True
		t.start()
		return self

	def update(self):
		# keep looping infinitely until the thread is stopped
		for f in self.stream:
			# grab the frame from the stream and clear the stream in
			# preparation for the next frame
			self.frame = f.array
			self.rawCapture.truncate(0)

			# if the thread indicator variable is set, stop the thread
			# and resource camera resources
			if self.stopped:
				self.stream.close()
				self.rawCapture.close()
				self.camera.close()
				return

	def read(self):
		# return the frame most recently read
		return self.frame

	def stop(self):
		# indicate that the thread should be stopped
		self.stopped = True
Example #5
0
class FrameGrabber(threading.Thread):
    def __init__(self, frameQueue, angleQueue):
        threading.Thread.__init__(self)
        self.frameQueue = frameQueue
        self.angleQueue = angleQueue
        self.camera = PiCamera()
        self.camera.hflip = CAMERA_HFLIP
        self.camera.vflip = CAMERA_VFLIP          
        self.camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
        self.camera.framerate = CAMERA_FRAMERATE
        self.rawCapture = PiRGBArray(self.camera, size=(CAMERA_WIDTH, CAMERA_HEIGHT))
        self.startCaptureTime = time.time()
        self.sc = ServoController()

    def run(self):
        # allow the camera to warmup
        runtime = time.time()
        sleeptime = min(runtime - self.startCaptureTime,1)
        time.sleep(sleeptime)

        # capture frames from the camera
        while not shutdownEvent.isSet():
            angle = self.angleQueue.get(block=True)
            self.sc.setCameraAngle(angle)
            ts = time.time()
            self.camera.capture(self.rawCapture, format='bgr', use_video_port=True)
            self.frameQueue.put((ts,self.rawCapture.array,angle))
            self.rawCapture.truncate(0)
Example #6
0
class VideoThread(Thread):
	def __init__(self):
		Thread.__init__(self)
		self.camera = picamera.PiCamera()
		self.camera.vflip = True
		self.camera.framerate = 30
		self.camera.resolution = (640, 480)
		self.camera.use_video_port = True
		self.camera.quality = 85

		self.rawCapture = PiRGBArray(self.camera, size=(640, 480))

	def run(self):
		time.sleep(1) # warm up the camera

		for frame in camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
			image = frame.array

			cv2.imshow("Frame", image)
			key = cv2.waitKey(1) & 0xFF

			self.rawCapture.truncate(0)

			if key == ord("q"):
				break
Example #7
0
class ThePiCamera(Camera):
    """
    Class for Raspberry Pi camera frame retrieval.
    """
    def __init__(self):
        """
        Constructor for ThePiCamera. This initializes parameters for Raspberry
        Pi camera capture.
        """
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        time.sleep(0.1) # allow the camera to warm up

    def get_iterator(self):
        """
        Returns an iterator for obtaining a continuous stream of camera frames.
        """
        return self.camera.capture_continuous(self.rawCapture, format='bgr', use_video_port=True)

    def get_frame(self, raw_frame):
        """
        Retrieves the camera frame returned by the iterator, converted into a
        2D array.
        """
        array = raw_frame.array
        self.rawCapture.truncate(0)
        return array

    def destroy(self):
        """
        Cleans up memory used for Raspberry Pi camera capture.
        """
        self.camera.close()
Example #8
0
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        # self.video = cv2.VideoCapture(0)
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
        # --------------- ARW -------------
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        # Get a generator object that serves up the frames
        self.frame_gen = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)
         
    def __del__(self):
#        self.video.release()
        pass

    def get_frame(self):
        #success, image = self.video.read()
        #   We are using Motion JPEG, but OpenCV defaults to capture raw images,
        #   so we must encode it into JPEG in order to correctly display the
        #   video stream.
        #ret, jpeg = cv2.imencode('.jpg', image)
        #return jpeg.tobytes()
        # --------------- ARW -------------
        frame = self.frame_gen.next()                   # get next frame
        image = frame.array
        ret, jpeg = cv2.imencode('.jpeg', image )       # jpeg to buffer
        self.rawCapture.truncate(0)                     # clear stream in prep for next frame
        return jpeg.tobytes()
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        #self.video = cv2.VideoCapture(0)
        self.camera = PiCamera()
        #self.rawCapture = PiRGBArray(self.camera)
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
 
        
        # allow the camera to warmup
        time.sleep(0.1)

        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
    
    def __del__(self):
        #self.video.release()
        pass
    
    def get_frame(self):
        # grab an image from the camera
        self.camera.capture(self.rawCapture, format="bgr")
        image = self.rawCapture.array
        self.rawCapture.truncate(0) 
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tostring()
Example #10
0
def video_thread():
    # enable the thread to modify the global variable 'latest_video_frame': (this variable will be accessed by functions doing some sort of video analysis or video streaming)
    global latest_video_frame   
    
    # create an instance of the RPI camera class:
    camera = PiCamera() 
    
    # rotate the camera view 180 deg (I have the RPI camera mounted upside down):
    camera.hflip = True
    camera.vflip = True 
    
    # set resolution and frame rate:
    camera.resolution = (640, 480)
    camera.framerate = 30
    
    # create a generator 'video_frame_generator' which will continuously capture video frames 
    # from the camera and save them one by one in the container 'generator_output': ('video_frame_generator' is an infinte iterator which on every iteration (every time 'next()' is called on it, like eg in a for loop) gets a video frame from the camera and saves it in 'generator_output'))  
    generator_output = PiRGBArray(camera, size=(640, 480))
    video_frame_generator = camera.capture_continuous(generator_output, format="bgr", use_video_port=True)
    
    # allow the camera to warm up:
    time.sleep(0.1)
    
    for item in video_frame_generator:
        # get the numpy array representing the latest captured video frame from the camera
        # and save it globally for everyone to access:
        latest_video_frame = generator_output.array 
        
        # clear the output container so it can store the next frame in the next loop cycle:
        # (please note that this is absolutely necessary!)
        generator_output.truncate(0)        
        
        # delay for 0.033 sec (for ~ 30 Hz loop frequency):
        time.sleep(0.033) 
def rpiCapture(opts):

    global _frame
    global _frameCounter

    # initialize the camera and grab a reference to the raw camera capture
    camera= PiCamera()
    camera.resolution = CAPTURE_RES 
    camera.framerate = 4 
    rawCapture = PiRGBArray(camera, size=CAPTURE_RES)
    
    # allow the camera to warmup
    time.sleep(0.1)
     
    # capture frames from the camera
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True) :
	# grab the raw NumPy array representing the image, then initialize the timestamp
	# and occupied/unoccupied text
        with _frameLock :
            #copy frame handled by video capturing structure, so it won't be modified by next captures
            _frame = numpy.copy(frame.array)
            if(_frameCounter == 0) :
                initStream(frame, opts)
            _frameCounter += 1

	    # clear the stream in preparation for the next frame
        rawCapture.truncate(0)    
        
        if _stopThreads :
            return
Example #12
0
class image_publisher:

  def __init__(self):

    self.camera = PiCamera()
    self.camera.resolution = (1024, 768) #(1920, 1080)
    self.camera.ISO = 100
    self.camera.sa = 100
    self.camera.awb = "flash"
    self.camera.co = 100
    self.rawCapture = PiRGBArray(self.camera)
    
    time.sleep(0.1)

    self.image_pub = rospy.Publisher("image_topic",Image)
    self.small_image_pub = rospy.Publisher("small_image_topic",Image)
    self.bridge = CvBridge()

  def publishImage(self):
    for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
        try:
          cv_image = frame.array
        except CvBridgeError as e:
          print(e)
	#cv_image = (cv_image * 0.5.astype(umpy.uint8)
        self.rawCapture.truncate(0)
        try:
          self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
          small = cv2.resize(cv_image, (0,0), fx=0.5, fy=0.5)
          self.small_image_pub.publish(self.bridge.cv2_to_imgmsg(small, "bgr8"))
        except CvBridgeError as e:
          print("problem")
          print(e)
def handle_get_bubblescope_properties(req):
    with PiCamera() as camera:
        # Camera settings
        # I don't know what they mean
        camera.resolution = (img_width,img_height)
        camera.ISO = 100
        camera.sa = 100
        camera.awb = "flash"
        camera.co = 100

        raw_capture = PiRGBArray(camera)

        time.sleep(0.1)

        # Before doing anything, capture the first frame and find the inner circle
        camera.capture(raw_capture, format="bgr")
        image = raw_capture.array
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        circles = cv2.HoughCircles(image_gray, cv2.cv.CV_HOUGH_GRADIENT, 1, 200, param1=50, param2=40, minRadius=10, maxRadius=180)
        raw_capture.truncate(0)

        if circles is not None:
            x, y, r = circles[0][0]

            res = GetBubblescopePropertiesResponse()
            res.center = (x,y)
            res.inner_radius = r*1.2
            res.outer_radius = r*2.25
            return res
        else:
            print "no circle found\n"
            return GetBubblescopePropertiesResponse()
Example #14
0
class CamThread(Thread):
	""" Thread to continuously read incoming frames from the raspberry pi camera
		and send those frames to all the clients connected to the server.
	"""
	def __init__(self):
		print 'init CamThread'
		self.cam = PiCamera()
		self.cam.resolution = (480,320)
		self.cam.framerate = 25
		self.rawCap = PiRGBArray(self.cam)
		# Wait for the webcam to open
		sleep(0.2)

		self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

		Thread.__init__(self)
		self.daemon = True
		self.running = True

	def run(self):
		print 'CamThread running'
		for frame in self.cam.capture_continuous(self.rawCap, format='bgr', use_video_port=True):
			if not self.running:
				break

			img = self.rawCap.array
			rows, cols, channels = img.shape

			if rows > 0 and cols > 0:
				# 180 degrees rotation
				img = self.rotate(img, 180)

				# Serialize and send
				img_ser = self.encode(img, '.jpg')
				for c in connected_clients:
					self.send(img_ser, (c, CLIENT_PORT))
			# Clear the camera buffer begore grabbing the next frame
			self.rawCap.truncate(0)
		self.sock.close()
		self.cam.close()

	def encode(self, img, ext='.jpg'):
		return cv2.imencode(ext, img)[1].tostring()

	def rotate(self, frame, angle):
		rows, cols, channels = frame.shape
		RotMat = cv2.getRotationMatrix2D((cols/2, rows/2), angle, 1)
		return cv2.warpAffine(frame, RotMat, (cols, rows))

	def send(self, data, addr):
		# Send the data splitted in oackets of 1024 bytes
		while len(data) > 1024:
			self.sock.sendto(data[:1024], addr)
			data = data[1024:]
		if len(data) > 0:
			self.sock.sendto(data, addr)

	def stop(self):
		self.running = False
def make_photo():
    timestamp = int(time())
    camera.resolution = (1920,1080)
    rawCapture = PiRGBArray(camera)
    camera.capture('photo_'+str(timestamp)+'.png')
    rawCapture.truncate(0)
    camera.resolution = (640, 480)
    rawCapture = PiRGBArray(camera)
Example #16
0
class CameraThread (threading.Thread):
    def __init__(self):
    	threading.Thread.__init__(self)
        ## Camera
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        
        # Camera warmup
        time.sleep(0.1)

        # Init red & blue threads
        redThread = RedFilterThread(red_lower, red_upper, "red")
        redThread.daemon = True
        redThread.start()
        blueThread = BlueFilterThread(blue_lower, blue_upper, "blue")
        blueThread.daemon = True
        blueThread.start()
        
    def run(self):
        global ALLSTOP
        global targetPos
        global hasTarget
        global image
        global hsv

        print "Camera thread is running"

        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # Grab frame
    	    image = cv2.flip(frame.array,0)
    	    image = cv2.resize(image, (100, int(image.shape[0]*100/image.shape[1])), interpolation = cv2.INTER_AREA)

    	    # Get HSV image
    	    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            
            # Cleanup
            self.rawCapture.truncate(0)

            # Compare
            targetPosList = list(set(targetPos_red).intersection(targetPos_blue))
            if len(targetPosList) > 0:
                targetPos = targetPosList[0]
                hasTarget = True
                print "Match at", targetPos
            else:
                targetPos = 0
                hasTarget = False
                print "No match"
                print "Red", targetPos_red
                print "Blue", targetPos_blue

            if ALLSTOP:
                 break

            time.sleep(1)
Example #17
0
class OpenCVTEST:
    def __init__(self):
        # initialize the camera and grab a reference to the raw camera capture
        factor = 1.5
        self.camera = PiCamera()
        self.camera.resolution = (640, 480)
        self.camera.framerate = 32
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        # allow the camera to warmup
        time.sleep(0.1)
        cv2.namedWindow("Video")

        self.pictureBuffer = deque()

        self.currentPicture = None
        self.firstPicture = None

    def showPicture(self, picture):
        cv2.imshow("Video", picture)

    def addPicture(self, picture):
        self.pictureBuffer.append(picture)

    def getCurrentPicture(self):
        return self.currentPicture

    def getPicture(self):
        return self.pictureBuffer.popleft()

    def forePlay(self):
        number = 0
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            self.currentPicture = frame.array
            self.addPicture(self.currentPicture)
            self.rawCapture.truncate(0)

            number += 1
            if number >= 100:
                break
        self.play()

    def play(self):
        for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            self.currentPicture = frame.array
            self.addPicture(self.currentPicture)
            self.showPicture(self.getPicture())
            self.rawCapture.truncate(0)

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        print("Bye bye!")
def make_video():
    timestamp = int(time())
    camera.resolution = (1920,1080)
    rawCapture = PiRGBArray(camera)
    camera.start_recording('video_'+str(timestamp)+'.h264')
    sleep(10)
    camera.stop_recording()
    rawCapture.truncate(0)
    camera.resolution = (640, 480)
    rawCapture = PiRGBArray(camera)
Example #19
0
	def connect(self):
		sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

		server_address = (conf.ip, conf.videoport)

		sock.connect(server_address)

		# initialize the camera and grab a reference to the raw camera capture
		camera = PiCamera()
		camera.resolution = (640, 480)
		camera.framerate = 32
		rawCapture = PiRGBArray(camera, size=(640, 480))

		# allow the camera to warmup
		time.sleep(0.1)

		frm = 0

		# capture frames from the camera
		for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
			# grab the raw NumPy array representing the image, then initialize the timestamp
			# and occupied/unoccupied text
			image = frame.array

			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

			gray = cv2.flip(gray,0)
			gray = cv2.flip(gray,1)

			frm = frm + 1
			if (frm >= 256):
			   frm = 0

			data = np.zeros((640), dtype=np.uint8)
			data[0] = data[1] = data[2] = data[3] = data[5] = 32
			sent = sock.sendto(data, server_address)

			# for i in range(1,480):
			#    data = gray[i,:]
			#    sent = sock.sendto(data, server_address)

			data = gray.reshape(640*480,1)
		   	sent = sock.sendto(data, server_address)


			#cv2.imshow("My Image", gray)

			if cv2.waitKey(1) & 0xFF == ord('q'):
			  break

			# clear the stream in preparation for the next frame
			rawCapture.truncate(0)

			if (self.keeprunning == False):
			  break
Example #20
0
class PiVideoStream:
    def __init__(self, resolution=(640, 480), framerate=48):
        # initialize the camera and stream
        self.camera = PiCamera()
        #self.camera.video_stabilization = True
        self.camera.resolution = resolution
        self.camera.framerate = framerate
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
            format="bgr", use_video_port=True)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
        
    # Settings for consistent images capture
    # Must be called some times after the camera is started
    def consistent(self):
        self.camera.shutter_speed = self.camera.exposure_speed
        self.camera.exposure_mode = 'off'
        g = self.camera.awb_gains
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = g
Example #21
0
    def run(self):
        try:
            with picamera.PiCamera() as camera:
                camera.resolution = (self.CAMERA_WIDTH, self.CAMERA_HEIGHT)
                camera.framerate = 32
                rawCapture = PiRGBArray(camera, size=(self.CAMERA_WIDTH, self.CAMERA_HEIGHT))

                time.sleep(0.1)
                # Now fix the values
                camera.shutter_speed = camera.exposure_speed
                camera.exposure_mode = 'off'
                g = camera.awb_gains #TODO: make this fixed?
                camera.awb_mode = 'off'
                camera.awb_gains = g

                for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                    last_time = time.time()

                    image = frame.array
                    rawCapture.truncate(0)
                    if self.hsv:
                        hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
                        hsvImage = cv2.resize(hsvImage, (len(image[0]) / self.scale_down, len(image) / self.scale_down))
                        green_color_mask, blue_color_mask = self.run_hsv(hsvImage, self.callback)
                        cv2.imshow("hsv_image", hsvImage)
                    else:
                        self.run_bgr(image, self.callback)
                        # cv2.imshow("rgb_image", image)


                    # green_color_mask = cv2.inRange(image, self.green_lower_dark_bgr, self.green_upper_dark_bgr)
                    # cv2.imshow("green_color_mask", green_color_mask)
                    #
                    # cv2.waitKey(1)

                    # stop thread
                    if self._stopped():
                        self.__simLogger.debug("Stopping camera thread")
                        break

                    # sleep if processing of camera image was faster than minimum frames per second
                    sleep_time = float(MIN_FPS - (time.time() - last_time))
                    print "sleep time: ", sleep_time
                    if sleep_time > 0:
                        time.sleep(sleep_time)
            cv2.destroyAllWindows()
        except Exception as e:
            self.error_callback()
            print "Error: ", str(e), str(sys.exc_info()[0]) + ' - ' + traceback.format_exc()
            # self.__simLogger.critical("Camera exception: " + str(e) + str(
            # sys.exc_info()[0]) + ' - ' + traceback.format_exc())
        except (KeyboardInterrupt, SystemExit):
            self.error_callback()
            raise
Example #22
0
class PiVideoStream:
    def __init__(self, resolution=(320, 240), format = "bgr", framerate=30):
        # initialize the camera and stream
        self.camera = PiCamera()
        time.sleep(.2) #let camera init
        self.camera.resolution = resolution
        self.camera.framerate = framerate
        self.camera.framerate = 30
        self.camera.sharpness = -100
        self.camera.contrast = 50
        self.camera.saturation = 100
        self.camera.brightness = 40
        self.camera.awb_mode = 'off'
        self.camera.awb_gains = (1.29, 1.44)
        self.camera.shutter_speed = 800
        self.camera.exposure_mode = 'night'
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
            format=format, use_video_port=True)
        time.sleep(.2) #let camera init

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # flag thread to stop
        self.stopped = True
Example #23
0
def navigateImg():
    moveTime = 0.5
    sleepTime = 2
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))
    e1 = 20.00000
    e2 = 20.00000
    time.sleep(0.1)
    dst = (320, 240)
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        try:
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array
            w = 0
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            lower_blue = np.array([100,50,50])
            upper_blue = np.array([120,255,255])
            mask = cv2.inRange(hsv, lower_blue, upper_blue)
            loc = getCenter(mask)
            print(dst[1]-loc[1])
            print(dst[0]-loc[0])
            if(abs(dst[1]-loc[1]) > e1):
                if(dst[1] > loc[1]):
                    print('b')
                    backwards(20+int((dst[1]-loc[1])/3))
                else:
                    print('f')
                    forwards(20-int((dst[1]-loc[1])/3))
            else:
                print('fb good')
                w = w + 1
            if(abs(dst[0]-loc[0]) > e2):
                if(dst[0] > loc[0]):
                    print('l')
                    left(20+int((dst[0]-loc[0])/3))
                else:
                    print('r')
                    right(20-int((dst[0]-loc[0])/3))
            else:
                print('lf good')
                w = w + 1
            if w == 2:
                print('drop')
            time.sleep(moveTime)
            zero()
            time.sleep(sleepTime)
            rawCapture.truncate(0)
        except OSError:
            print('io')
class VideoStream(object):
    def __init__(self):
        self.camera = PiCamera()
        self.camera.resolution = (1920, 1080)
        self.preview_window = (0, 0, 640, 480)
        self.camera.framerate = 15
        self.stream = None
        self.rgb_array = PiRGBArray(self.camera, size=self.camera.resolution)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped

        self.frame = None
        self.preview_stopped = False
        self.capture_stopped = False

    def capture_hi_res(self):
        self.stream = self.camera.capture_continuous(self.rgb_array, use_video_port=True, splitter_port=2, format='bgr')
        Thread(target=self.capture_raw, args=()).start()

    def capture_raw(self):

        for f in self.stream:
            self.frame = f.array
            self.rgb_array.truncate(0)

            if self.capture_stopped:
                self.stream.close()
                self.rgb_array.close()
                self.camera.close()


    def stop_capture(self):
        self.capture_stopped = True

    def preview_stream(self):

        # start the thread to read frames from the video stream
        Thread(target=self.preview, args=()).start()

    def preview(self):
        self.camera.start_preview(fullscreen=False, window=self.preview_window)

        if self.preview_stopped:
            self.camera.stop_preview()

    def read(self):
        return self.frame

    def stop_preview(self):
        self.preview_stopped = True
Example #25
0
def camera(fov_v, fov_h, threshold, landing_area_x, landing_area_y):
    #make the bitstream for the radio message
    radio_stream = bitstream.BitStream()

    # init camera and get a reference to the raw capture
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))

    #time for the camera to warm up which is apparently a thing, and we'd probably only have to do it once rather than every picture
    time.sleep(0.1)

    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        #get the data from the drone
        altitude, drone_coords, heading = read_drone_data()

        #get an image from the camera
        edges = frame.array #TODO FROM CAMERA
        rawCapture.truncate(0)

        #do edge detection on the image
        minVal, maxVal = find_canny_thresholds(altitude)  #TODO something about these minVal, maxVal based on altitude
        edges = cv2.Canny(edges, minVal, maxVal)
        rows, cols = edges.shape

        #create the decision grid
        decision_grid, grid_cols, grid_rows, grid_pix_x, grid_pix_y = create_decision_grid(altitude, fov_h, rows, cols, landing_area_x, landing_area_y)

        #create the radio message to be sent
        radio_stream.write(altitude, np.uint8) #1 byte total
        radio_stream.write(heading, np.uint16) #3 bytes total
        radio_stream.write((grid_rows, grid_cols), np.uint16) #7 bytes total
        radio_stream.write(drone_coords, float) #23 bytes total

        for row in xrange(grid_rows):
            for col in xrange(grid_cols):
                decision_grid[row][col] = len(np.where(edges[row*grid_pix_y:row*grid_pix_y+grid_pix_y, col*grid_pix_x:col*grid_pix_x+grid_pix_x] > 0)[0])
                if decision_grid[row][col] >= threshold:
                    radio_stream.write(1, bool)
                else:
                    radio_stream.write(0, bool)

        #radio can only handle a byte as the smallest unit of information, so we have to pad the signal
        pad = 8 - ((grid_rows * grid_cols) % 8)
        if pad == 8:
            pad = 0
        for i in xrange(pad):
            radio_stream.write(0, bool)

        message = radio_stream.read(str, 23 + (grid_rows*grid_cols + pad)/8)
Example #26
0
class Camera(threading.Thread):

	def __init__(self):
		
		
		self.image = []
		self.angle = 0
		
		# initialize the camera and grab a reference to the raw camera capture
		self.camera = PiCamera()
		self.camera.resolution = (640, 480)
		self.camera.framerate = 32
		self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
		time.sleep(0.1)

		#Set up thread
		threading.Thread.__init__(self)
		self.threadID = 100
		self.name = "camera"
		#Thread exits when sent to True
		self.exitFlag = False
	 
	def run(self):
		# capture frames from the camera
		for frame in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
			# grab the raw NumPy array representing the image, then initialize the timestamp
			# and occupied/unoccupied text
			self.image = frame.array
		 
			# show the frame
			# cv2.imshow("Frame", image)
			gb_lane_cfg = LaneCfg
			proc = {'houghlinesP':detect_lane_over_houghlinesP}
			self.angle = proc[gb_lane_cfg['set']['proc']](gb_lane_cfg['set']['proc'], self.image, gb_lane_cfg)


			#utilities.log("angle " + str(self.angle))
		 
			# clear the stream in preparation for the next frame
			self.rawCapture.truncate(0)
		 
			# if the `q` key was pressed, break from the loop
			if self.exitFlag == True:
				break
		
		self.camera.close()

	def stop(self):
		print("stoping camera")
		self.exitFlag = True
Example #27
0
def camera_loop():
    use_pi = False
    try:
        from picamera import PiCamera
        from picamera.array import PiRGBArray
        use_pi = True
    except OSError:
        print('Couldn\'t load picamera, using OpenCV video capture')
        use_pi = False

    if use_pi:
        print('Opening camera')
        camera = PiCamera()
        rawCapture = PiRGBArray(camera)

        camera.resolution = (640, 480)
        time.sleep(1)

        camera.start_preview()
        camera.awb_mode = 'off'
        camera.exposure_mode = 'off'

        # use initial values from config file
        camera.awb_gains = (config['camera']['awb_red_gain'], config['camera']['awb_red_gain'])
        camera.shutter_speed = config['camera']['shutter_speed']
        camera.iso = config['camera']['iso']

        print('Opened camera')

        # capture frames from the camera
        for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image
            image = frame.array

            handle_image(image)

            # clear the stream in preparation for the next frame
            rawCapture.truncate(0)

            # set all the settings if they changed in the config
            camera.awb_gains = (config['camera']['awb_red_gain'], config['camera']['awb_red_gain'])
            camera.shutter_speed = config['camera']['shutter_speed']
            camera.iso = config['camera']['iso']
    else:
        print('Opening camera')
        capture = cv2.VideoCapture(0)
        while True:
            success, img = capture.read()
            if success:
                handle_image(img)
Example #28
0
class PiVideoStream(object):
    def __init__(self, resolution=(320, 240), frame_rate=32):
        # Init camera and Stream
        self.camera = PiCamera()
        self.camera.resolution = resolution
        self.camera.framerate = frame_rate
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)

        self.frame = None
        self.stopped = False

    def start(self):
        """
        Starts the update thread that reads the camera data
        :return:
        """
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        """
        Repeatedly stores the most recently captured frame
        :return:
        """
        for f in self.stream:
            # Store the current frame
            self.frame = f.array
            # Clear the array ready for the next frame
            self.rawCapture.truncate(0)

            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        """
        Gets the most recently read frame
        :return: Most recently captured RGB array from camera
        """
        return self.frame

    def stop(self):
        """
        Exits out of the update function/thread
        :return:
        """
        self.stopped = True
class VideoStream:
    def __init__(self):
        ''' Constructor '''

        self.camera = PiCamera() # Create PiCamera object
        self._rawCapture = PiRGBArray(self.camera, size=CAMERA_RES)

        # Setup camera settings
        self.camera.resolution = CAMERA_RES # Set camera resolution
        # self.camera.mode = CAMERA_MODE # Set the aspect ratio of the camera
        self.camera.framerate = CAMERA_FPS # Set camera fps
        # self.camera.shutter_speed = CAMERA_SHUTTER_SPEED # Set camera shutter speed
        # self.camera.brightness = CAMERA_BRIGHTNESS # Set camera brightness
        # self.camera.awb_mode = CAMERA_AWB_MODE # Set camera auto white balance

        # Create a thread for getting images from the camera
        self.thread = Thread(target=self.update, args=())
        self.frame = None
        self.running = False

        self.t0 = time.time()
        self.tt = self.t0;
        self.i = 0
        self.it = 0
        self.fps_list = []

        time.sleep(2) # Delay

    def start(self):
        ''' Begin reading the camera frames '''
        
        self.running = True # Sets main boolean to True
        
        self.thread.start()# Starts thread

        PRINT('Started video stream.', SUCCESS)
        
        return self

    def update(self):
        ''' The main thread loop for getting the current camera images '''

        # Main loop
        while self.running:
            self.i += 1
            self.it += 1
            
            # Get a picture from the camera and save it to a variable which can be accessed outside of the class
            self.camera.capture(self._rawCapture, format="bgr", use_video_port=True)
            self.frame = self._rawCapture.array

            # Clears stream for next frame
            self._rawCapture.truncate(0)

            # Updates t
            self.t = time.time()
            
            if self.t - self.t0 >= 1:
                self.fps_list += [self.i]
                
                # print(self.it / (self.t - self.t0), self.i, sum(self.fps_list) / len(self.fps_list))

                self.t0 = self.t
                self.i = 0

            time.sleep(0.02) # Delay
            
    def read(self):
        ''' Get the last frame the camera has read '''
        
        return self.frame

    def stop(self):
        ''' Stop capturing frames and stop running the thread '''
        
        self.running = False # Sets main boolean to False
        
        self.thread.join() # Ends thread
        self.camera.close() # Closes camera connection

        PRINT('Closed video stream.', SUCCESS)
# ---------------------------------------------------------
# TODO: feel free to change the size of the image you want
# and remember to change the image size in mbot_img_t.lcm
# ---------------------------------------------------------
width = 192
height = 144
# ---------------------------------------------------------

lc = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
pygame.init()
pygame.display.set_caption("MBot Tracking")
screen = pygame.display.set_mode([width, height])
camera = PiCamera()
camera.resolution = (width, height)
camera.framerate = 24
rawCapture = PiRGBArray(camera, size=(width, height))
time.sleep(0.5)

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    image = frame.array
    if (flip_h == 1 & flip_v == 0):
        image = cv2.flip(image, 1)
    elif (flip_h == 0 & flip_v == 1):
        image = cv2.flip(image, 0)
    elif (flip_h == 1 & flip_v == 1):
        image = cv2.flip(image, -1)

    timestamp = int(time.time() * 1000)
    img_msg = mbot_image_t()
from Object_detection import Objdet, timer
import cv2

#import RPi.GPIO as io
#io.setmode(io.BOARD)

test = Objdet("..\..\object_detection")

test.loadGraph("ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb",
               "..\..\object_detection\data\mscoco_label_map.pbtxt", 90)

from picamera.array import PiRGBArray
from picamera import PiCamera

camera = PiCamera()
rawCapture = PiRGBArray(camera)
camera.resolution = resolution

#yaw = io.PWM(12)
#yaw.start(50)

for i in range(250):
    camera.capture(rawCapture, "rgb")
    test.run_Detection(rawCapture.array())
    for i in enumerate(test.boxes[0]):
        if test.classes[0][i] == 1:
            mid = (test.boxes[0][i][3] - test.boxes[0][i][1]) / 2
            print(mid)

#yaw.stop()
Example #32
0
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import time
import io
import numpy
import mysql.connector as mariadb

camera = PiCamera()
stream = io.BytesIO()
camera.resolution = (320, 240)
camera.framerate = 30
Raw_Capture = PiRGBArray(camera, size=(320, 240))


def insertorUpdate(Id, Name):
    mariadb_connection = mariadb.connect(user='******',
                                         password='******',
                                         database='Face_Info')
    cursor = mariadb_connection.cursor()
    cmd = "SELECT * FROM Face_Information WHERE ID=" + str(Id)
    cursor.execute(cmd)
    isRecordExist = 0
    for row in cursor:
        isRecordExist = 1
    if (isRecordExist == 1):
        cmd = "UPDATE Face_Information SET Name=" + str(
            Name) + " WHERE ID=" + str(Id)
    else:
        cmd = "INSERT INTO Face_Information(ID,Name) VALUES(" + str(
            Id) + "," + str(Name) + ")"
Example #33
0
info_formatter = logging.Formatter('%(name)s - %(asctime)-15s - %(levelname)s: %(message)s')
# Set a custom formatter for data log
data_formatter = logging.Formatter('%(name)s , %(asctime)-15s , %(message)s')
# Logger objects creation
info_logger    = logzero.setup_logger(name='info_logger', logfile=dir_path+'/data01.csv', formatter=info_formatter)
data_logger    = logzero.setup_logger(name='data_logger', logfile=dir_path+'/data02.csv', formatter=data_formatter)

# Set up the camera
cam = PiCamera()
# Set the resolution
cam.resolution = CAM_RESOLUTION
# Set the framerate
cam.framerate = CAM_FRAMERATE

# Set rawCapture
rawCapture = PiRGBArray(cam, size = CAM_RESOLUTION)
#--------------------------------

# FUNCTIONS
#--------------------------------
def get_latlon():
    '''
    This function return a tuple with the position of the ISS.
    In particular it returns:
        - DMS position string with the coordinates in DMS format
          (with meridians and parallels reference -> N/S W/E)
        - latitude decimal degrees rounded by 8 decimal digits
        - longitude decimal degrees rounded by 8 decimal digits
    '''
    # Get the lat/lon values from ephem
    iss.compute()
Example #34
0
activity = "\r\n"
x=0
x1=0 +x
y=0
y1=0 + y
frameNo = 0
ballCounter = 0
videoReadyFrameNo = 0
video_preseconds = 3

with picamera.PiCamera() as camera:
    camera.resolution = setResolution()
    camera.video_stabilization = True
    camera.annotate_background = True
    camera.rotation = 180
    rawCapture = PiRGBArray(camera, size=camera.resolution)
    # setup a circular buffer
    # stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds)
    stream = picamera.PiCameraCircularIO(camera, size = 4000000)
    # video recording into circular buffer from splitter port 1
    camera.start_recording(stream, format='h264', splitter_port=1)
    #camera.start_recording('test.h264', splitter_port=1)
    # wait 2 seconds for stable video data
    camera.wait_recording(2, splitter_port=1)
    # motion_detected = False
    print(camera.resolution)

    for frame in camera.capture_continuous(rawCapture,format="bgr",  use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text???????????????????
        rawCapture.truncate()
Example #35
0
#cap = cv2.VideoCapture(0)

window = "Feed"
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)

bMin = 0
bMax = 255
gMin = 0
gMax = 255
rMin = 210
rMax = 255
minBGR = np.array([bMin, gMin, rMin])
maxBGR = np.array([bMax, gMax, rMax])

feed = PiRGBArray(camera, camera.resolution)
stream = io.BytesIO()

for rawFrame in camera.capture_continuous(feed,
                                          format="bgr",
                                          use_video_port=True):
    frame = rawFrame.array
    # empty = np.empty((1088,1920,3), dtype=np.uint8)
    # camera.capture(imageBGR, 'bgr')

    # write raw image to file
    # cv2.imwrite('rawImage.png',imageBGR)

    # convert original image to rgb
    # imageRGB = cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RG
    # print(type(frame))
Example #36
0
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import sys
import config

camera = PiCamera()
camera.resolution = (1024, 768)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(1024, 768))

time.sleep(1)

faceCascade = cv2.CascadeClassifier(config.HAAR_FACES)

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):

    image = frame.array

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=config.HAAR_SCALE_FACTOR,
        minNeighbors=config.HAAR_MIN_NEIGHBORS,
        minSize=config.HAAR_MIN_SIZE,
        flags=cv2.CASCADE_SCALE_IMAGE)
Example #37
0
    SERVER = socket.gethostbyname(socket.gethostname())
    ADDR = ("169.254.186.249", PORT)
    FORMAT = 'utf-8'
    DISCONNECT_MESSAGE = "!DISCONNECT"

    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # server.close()
    server.bind(ADDR)
print("port:", PORT)
# camera initialization
cam = PiCamera()
cam_res = (320, 240)
cam.resolution = cam_res
cam.framerate = 24
rawCapture = PiRGBArray(cam, size=cam_res)
FRAME_LENGTH = cam_res[0] * cam_res[1] * 3 + 163  # 230563


def run_client():
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
    ssh.connect("169.254.130.140", username="******", password="******")
    ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
        "python Documents/client_no_length.py")


def handle_client(conn, addr, q):
    global _finish

    print(f"[NEW CONNECTION] {addr} connected.")
def motionmain():
	pin_num = 22
	# filter warnings, load the configuration and initialize the Dropbox
	warnings.filterwarnings("ignore")

	#setup gpio
	GPIO1.setmode(GPIO1.BCM)
	# GPIO 23 & 17 set up as inputs, pulled up to avoid false detection.
	# Both ports are wired to connect to GND on button press.
	# So we'll be setting up falling edge detection for both
	GPIO1.setup(pin_num, GPIO1.IN, pull_up_down=GPIO1.PUD_UP)
	#dropbox:
	with open("/home/pi/Desktop/pisecuritysystem/permissions.json") as f:
		data = json.load(f)
	client = dbx.Dropbox(data['db-token'])

	# initialize the camera and grab a reference to the raw camera capture
	camera = PiCamera()
	#default 640x480 - decrease to go faster
	#motion-detect camera resolution
	camera.resolution = (640,480)
	rawCapture = PiRGBArray(camera, size=(640,480))

	# allow the camera to warmup, then initialize the average frame, last
	# uploaded timestamp, and frame motion counter
	print("[INFO] warming up...")
	time.sleep(2.5)
	avg = None
	lastUploaded = datetime.datetime.now()
	motionCounter = 0
	text = ""
	name = ""

	# capture frames from the camera
	for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	    # grab the raw NumPy array representing the image and initialize
		# the timestamp and occupied/unoccupied text
		frame = f.array
		timestamp = datetime.datetime.now()

		# resize the frame, convert it to grayscale, and blur it
		#frame=500 default, decrease it to go faster
		frame = imutils.resize(frame, width=500)
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		gray = cv2.GaussianBlur(gray, (21, 21), 0)

		# if the average frame is None, initialize it
		if avg is None:
			print("[INFO] starting background model...")
			avg = gray.copy().astype("float")
			rawCapture.truncate(0)
			continue

		# accumulate the weighted average between the current frame and
		# previous frames, then compute the difference between the current
		# frame and running average
		cv2.accumulateWeighted(gray, avg, 0.5)
		frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

		# threshold the delta image, dilate the thresholded image to fill
		# in holes, then find contours on thresholded image
		thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
		thresh = cv2.dilate(thresh, None, iterations=2)
		(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

		# loop over the contours
		for c in cnts:
			# if the contour is too small, ignore it
			if cv2.contourArea(c) < 5000:
				continue

			# compute the bounding box for the contour, draw it on the frame,
			# and update the text
			(x, y, w, h) = cv2.boundingRect(c)
			cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
			text = "!"

		# draw the text and timestamp on the frame
		ts = timestamp.strftime("%A_%d_m_%Y_%I:%M:%S%p")
		cv2.putText(frame, "{}".format(ts), (10, 20),
			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

		# check to see if the room is occupied
		if text == "!":
			# check to see if enough time has passed between uploads
			if (timestamp - lastUploaded).seconds >= 3.0:
				# increment the motion counter
				motionCounter += 1

				# check to see if the number of frames with consistent motion is
				# high enough
				if motionCounter >= 8: #originally 8
					print("Capturing image.")
					t = TempImage()
					cv2.imwrite(t.path, frame)
					name = "{base}{timestamp}".format(base="", timestamp=ts)
					os.rename(t.path[3:], "{new}.jpg".format(new=name))
					print("[UPLOAD] {}".format(ts))
					with open("/home/pi/Desktop/pisecuritysystem/{name}.jpg".format(name=name), "rb") as f:
						client.files_upload(f.read(), "/{name}.jpg".format(name=name), mute = True)
					os.remove("{name}.jpg".format(name=name))
					# update the last uploaded timestamp and reset the motion
					# counter
					lastUploaded = timestamp
					motionCounter = 0
					text=""

		# otherwise, the room is not occupied
		else:
			motionCounter = 0
			text=""

		# clear the stream in preparation for the next frame
		rawCapture.truncate(0)
		if GPIO1.input(pin_num) == False:
			print("button pressed")
			print("exit now")
			break
	GPIO1.cleanup()
	time.sleep(.25) #pause for .25 seconds
	camera.close()
	print("camera closed")
	time.sleep(.25)
Example #39
0
Kernel_size=15
low_threshold=5
high_threshold=10

rho=1
threshold=10
theta=np.pi/180
minLineLength=0
maxLineGap=0

#Initialize camera
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(640, 480))

for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    # CAPTURE FRAME-BY-FRAME
    frame = f.array
    time.sleep(0.1)
    #frame = imutils.resize(frame, width=500)
    #Convert to Grayscale
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #Blur image to reduce noise. if Kernel_size is bigger the image will be more blurry
    blurred = cv2.GaussianBlur(gray, (Kernel_size, Kernel_size), 0)
    
    #Perform canny edge-detection.
    #If a pixel gradient is higher than high_threshold is considered as an edge.
    #if a pixel gradient is lower than low_threshold is is rejected , it is not an edge.
    #Bigger high_threshold values will provoque to find less edges.
    FLOW = DropboxOAuth2FlowNoRedirect(CONF["dropbox_key"],
                                       CONF["dropbox_secret"])
    print "[INFO] Authorize this application: {}".format(FLOW.start())

    AUTH_CODE = raw_input("Enter auth code here: ").strip()

    # finish the authorization and grab the Dropbox client
    (ACCESS_TOKEN, USER_ID) = FLOW.finish(AUTH_CODE)
    CLIENT = DropboxClient(ACCESS_TOKEN)
    print "[SUCCESS] dropbox account linked"

# initialize the camera and grab a reference to the raw camera capture
CAMERA = PiCamera()
CAMERA.resolution = tuple(CONF["resolution"])
CAMERA.framerate = CONF["fps"]
RAW_CAPTURE = PiRGBArray(CAMERA, size=tuple(CONF["resolution"]))

# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print "[INFO] warming up..."
time.sleep(CONF["camera_warmup_time"])
AVG = None
LAST_UPLOADED = datetime.datetime.now()
MOTION_COUNTER = 0

STREAM = picamera.PiCameraCircularIO(CAMERA, seconds=20)
CAMERA.start_recording(STREAM, format='h264', quality=23)

# set up vlc subprocess for streaming
CMDLINE = [
    'cvlc', '-vvv', 'stream:///dev/stdin', '--sout',
Example #41
0
gp.output(16, True)
gp.output(21, True)
gp.output(22, True)

print(
    'Starting the Calibration just press the space bar to exit this part of the Programm\n'
)
print(
    'Push (s) to save the image you want and push (c) to see next frame without saving the image'
)

i = 0

camera = PiCamera()
camera.resolution = (2560, 720)
rawCapture = PiRGBArray(camera)

sleep(2)
k = 0
while True:
    # take an image from channel 1
    # Start Reading Camera images
    gp.output(7, False)
    gp.output(11, False)
    gp.output(12, True)
    time.sleep(0.1)
    camera.capture(rawCapture, format="bgr")
    frameR = rawCapture.array
    rawCapture.truncate(0)
    gp.output(7, False)
    gp.output(11, True)
Example #42
0
    def camera():
        global effect
        camera = PiCamera()
        camera.resolution = (640, 480)
        camera.shutter_speed = 0
        camera.framerate = 20
        camera.rotation = 0
    
        camera.brightness = 50    #(0 to 100)
        camera.sharpness = 0      #(-100 to 100)
        camera.contrast = 0       #(-100 to 100)
        camera.saturation = 0     #(-100 to 100)
        camera.iso = 0            #(automatic)(100 to 800)
        camera.exposure_compensation = 0   #(-25 to 25)
        camera.exposure_mode = 'auto'
        camera.meter_mode = 'average'
        camera.awb_mode = 'auto'
        camera.hflip = False
        camera.vflip = False
        camera.crop = (0.0, 0.0, 1.0, 1.0)
        rawCapture = PiRGBArray(camera, size=camera.resolution)  
        last_e ='none'
        camera_val = 0

        last_show_content_list = []
        show_content_list = []
        change_type_val  = []
        change_type_dict = {"resolution":[2592,1944], "brightness":50, "contrast":0, "sharpness":0, "saturation":0, "iso":0, "exposure_compensation":0, "exposure_mode":'auto', \
            "meter_mode":'average',"awb_mode":'auto',"drc_strength":'off'}
        ptStart_1 = (0,80)
        ptEnd_1 = (320, 80)
        ptStart_2 = (0,160)
        ptEnd_2 = (320,160)
        ptStart_3 = (107,0)
        ptEnd_3 = (107,240)
        ptStart_4 = (215,0)
        ptEnd_4 = (215,240)

        point_color = (100,100,100) # BGR
        thickness = 1 
        lineType = 8
        try:
            while True:
                for frame in camera.capture_continuous(rawCapture, format="rgb",use_video_port=True):# use_video_port=True
                    img = frame.array
                    img = cv2.resize(img, (320,240), interpolation=cv2.INTER_AREA)

                    img2gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    Ras_Cam.detect_obj_parameter['clarity_val'] = round(cv2.Laplacian(img2gray, cv2.CV_64F).var(),2)
                    img = Ras_Cam.human_detect_func(img)
                    # cv2.rectangle(img, (280,10), (310,20), (255,255,255))
                    # cv2.rectangle(img, (310,13), (311,17), (255,255,255))
                    # cv2.rectangle(img, (282,12), (int((1-round(4.3 - power_val(),3)) / 1 * 26 + 282),18), (0,255,0),thickness=-1)

                    if Ras_Cam.detect_obj_parameter['horizontal_line'] == True:
                        cv2.line(img, ptStart_1, ptEnd_1, point_color, thickness, lineType)
                        cv2.line(img, ptStart_2, ptEnd_2, point_color, thickness, lineType)
                        cv2.line(img, ptStart_3, ptEnd_3, point_color, thickness, lineType)
                        cv2.line(img, ptStart_4, ptEnd_4, point_color, thickness, lineType)
                    
                    # change_camera_setting
                    if Ras_Cam.detect_obj_parameter['change_setting_flag'] == True:
                        

                        # change_setting_cmd = "camera." + Ras_Cam.detect_obj_parameter['change_setting_type'] + '=' + str(Ras_Cam.detect_obj_parameter['change_setting_val'])

                        # change_type_dict[Ras_Cam.detect_obj_parameter['change_setting_type']] = Ras_Cam.detect_obj_parameter['change_setting_val']
                        # change_type_val.append(change_setting_cmd)
                        #
                        
                        change_val = Ras_Cam.detect_obj_parameter['change_setting_val']
                        change_type_name = Ras_Cam.detect_obj_parameter['change_setting_type']
                        if type(change_val) == str:
                            change_val = "\"" + str(Ras_Cam.detect_obj_parameter['change_setting_val']) + "\""
                            change_setting_cmd = "camera." + change_type_name + '=' +  change_val
                            # change_setting_cmd = "camera." + change_type_name + '=' + str(Ras_Cam.detect_obj_parameter['change_setting_val'])
                            # print(change_setting_cmd)
                            exec(change_setting_cmd)
                            # change_type_dict[change_type_name] = change_val.replace("'","")
                            change_type_dict[change_type_name] = change_val.replace("\"","")
                        else:
                            change_setting_cmd = "camera." + change_type_name + '=' + str(Ras_Cam.detect_obj_parameter['change_setting_val'])
                            # print(change_setting_cmd)
                            exec(change_setting_cmd)
                            change_type_dict[change_type_name] = Ras_Cam.detect_obj_parameter['change_setting_val']
                        Ras_Cam.detect_obj_parameter['change_setting_flag'] = False
                        # print(type(Ras_Cam.detect_obj_parameter['change_setting_val']))
                        # print(Ras_Cam.detect_obj_parameter['change_setting_val'])
                    if Ras_Cam.detect_obj_parameter['content_num'] != 0:

                        for i in range(Ras_Cam.detect_obj_parameter['content_num']):
                            exec("Ras_Cam.detect_obj_parameter['process_si'] = Ras_Cam.detect_obj_parameter['process_content_" + str(i+1) + "'" + "]")
                            cv2.putText(img, str(Ras_Cam.detect_obj_parameter['process_si'][0]),Ras_Cam.detect_obj_parameter['process_si'][1],cv2.FONT_HERSHEY_SIMPLEX,Ras_Cam.detect_obj_parameter['process_si'][3],Ras_Cam.detect_obj_parameter['process_si'][2],2)
                    
                    if Ras_Cam.detect_obj_parameter['setting_flag'] == True:
                        setting_type = Ras_Cam_SETTING[Ras_Cam.detect_obj_parameter['setting']]
                        if setting_type == "resolution":
                            Ras_Cam.detect_obj_parameter['setting_val'] = Ras_Cam.detect_obj_parameter['setting_resolution']
                            # print(Ras_Cam.detect_obj_parameter['change_setting_type'])
                            # print(list(Ras_Cam.detect_obj_parameter['setting_resolution']))
                            change_type_dict["resolution"] = list(Ras_Cam.detect_obj_parameter['setting_resolution'])
                            cv2.putText(img, 'resolution:' + str(Ras_Cam.detect_obj_parameter['setting_resolution']),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255),2)
                        # elif setting_type == "shutter_speed":  
                        #     change_type_dict["shutter_speed"] = change_type_dict["shutter_speed"]
                        #     cv2.putText(img, 'shutter_speed:' + str(Ras_Cam.detect_obj_parameter['change_setting_val']),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255),2)
                        else:
                            cmd_text = "Ras_Cam.detect_obj_parameter['setting_val'] = camera." + Ras_Cam_SETTING[Ras_Cam.detect_obj_parameter['setting']]
                            # print('mennu:',Ras_Cam.detect_obj_parameter['setting_val'])
                            # print("test: ",Ras_Cam_SETTING[Ras_Cam.detect_obj_parameter['setting']])
                            exec(cmd_text)
                            cv2.putText(img, setting_type + ': ' + str(Ras_Cam.detect_obj_parameter['setting_val']),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255),2)




                    e = EFFECTS[Ras_Cam.detect_obj_parameter['eff']]
                    # change_type_dict['ifx'] = EFFECTS[Ras_Cam.detect_obj_parameter['eff']]
                    
                    
                    if last_e != e:
                        camera.image_effect = e
                    last_e = e
                    if last_e != 'none':
                        cv2.putText(img, str(last_e),(0,15),cv2.FONT_HERSHEY_SIMPLEX,0.6,(204,209,72),2)

                        
                    if Ras_Cam.detect_obj_parameter['photo_button_flag'] == True:
                        camera.close()
                        break

    # def time_lapse_photography(flag):
    #     Ras_Cam.detect_obj_parameter['time_lapse_photography'] = flag

                    if Ras_Cam.detect_obj_parameter['time_lapse_photography'] == True:
                        camera.close()
                        # while True:
                        #     pass
                            
    
                    Ras_Cam.img_array[0] = img
                    rawCapture.truncate(0)
                    # print("FPS:",round(time.time() - s_time,2),camera.framerate)


                # camera = PiCamera()
                imu_x,imu_y = imu_rotate()
                # print("change_type_val:",change_type_val)
                for i in change_type_val:
                    exec(i)
                if imu_y < 35 and imu_y >-35 and imu_x <= 90 and imu_x > 45:
                    # if Ras_Cam.detect_obj_parameter['setting_resolution'][0] < 3040:
                    #     camera.resolution = (Ras_Cam.detect_obj_parameter['setting_resolution'][1],Ras_Cam.detect_obj_parameter['setting_resolution'][0])
                    # else:
                    # camera.resolution = (Ras_Cam.detect_obj_parameter['setting_resolution'][1],Ras_Cam.detect_obj_parameter['setting_resolution'][0])
                    # camera.rotation = 270
                    change_type_dict['rotation'] = 270
                    image_width, image_height = change_type_dict['resolution'][1],change_type_dict['resolution'][0]
                elif imu_y < 35 and imu_y >-35 and imu_x < -45 and imu_x >= -90:
                    # if Ras_Cam.detect_obj_parameter['setting_resolution'][0] < 3040:
                    #     camera.resolution = (Ras_Cam.detect_obj_parameter['setting_resolution'][1],Ras_Cam.detect_obj_parameter['setting_resolution'][0])
                    # else:
                    # camera.resolution = (Ras_Cam.detect_obj_parameter['setting_resolution'][1],Ras_Cam.detect_obj_parameter['setting_resolution'][0])
                    # camera.rotation = 90
                    image_width, image_height  = change_type_dict['resolution'][1],change_type_dict['resolution'][0]
                    change_type_dict['rotation'] = 90
                elif imu_y < -65 and imu_y >=-90 and imu_x < 45 and imu_x >= -45:
                    # camera.resolution = Ras_Cam.detect_obj_parameter['setting_resolution']
                    # camera.rotation = 180
                    image_width, image_height = change_type_dict['resolution'][0],change_type_dict['resolution'][1]
                    change_type_dict['rotation'] = 180
                else:
                    image_width, image_height = change_type_dict['resolution'][0],change_type_dict['resolution'][1]
                    change_type_dict['rotation'] = 0
                    # camera.resolution = Ras_Cam.detect_obj_parameter['setting_resolution']

                # camera.image_effect = e
                # rawCapture = PiRGBArray(camera, size=camera.resolution) 
                picture_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
                Ras_Cam.detect_obj_parameter['picture_path'] = '/home/pi/Pictures/rascam_picture_file/' + picture_time + '.jpg'
                # print(Ras_Cam.detect_obj_parameter['picture_path']) 


                # camera.close() 
                # print(camera.brightness,camera.sharpness,camera.contrast,camera.saturation,camera.iso,camera.exposure_compensation,camera.exposure_mode,camera.meter_mode,camera.awb_mode,camera.shutter_speed)
                a_t = "sudo raspistill" +  " -t 250"  + " -w " + str(image_width) + " -h " + str(image_height) + " -br " + str(change_type_dict['brightness']) + " -co " + str(change_type_dict['contrast']) \
                + " -sh " + str(change_type_dict['sharpness']) + " -sa " + str(change_type_dict['saturation']) + " -ISO " + str(change_type_dict['iso']) + " -ev " + str(change_type_dict['exposure_compensation']) + " -ex " + str(change_type_dict['exposure_mode']) + " -mm " + str(change_type_dict['meter_mode']) \
                + " -rot " + str(change_type_dict['rotation']) +" -ifx " + str(EFFECTS[Ras_Cam.detect_obj_parameter['eff']]) + " -awb " + str(change_type_dict['awb_mode']) + " -drc " + str(change_type_dict['drc_strength']) + " -o " + Ras_Cam.detect_obj_parameter['picture_path']

                # a_t = "sudo raspistill" +  " -t 250"  +  " -ss " + "0" + " -o " + Ras_Cam.detect_obj_parameter['picture_path']
                run_command(a_t)
                # camera.capture(Ras_Cam.detect_obj_parameter['picture_path'])
                # cv2.imread()
                if Ras_Cam.detect_obj_parameter['watermark_flag'] == True:
                    add_text_to_image(Ras_Cam.detect_obj_parameter['picture_path'],'Shot by Rascam','Sunfounder')

                if Ras_Cam.detect_obj_parameter['google_upload_flag'] == True:
                    upload(file_path='/home/pi/Pictures/rascam_picture_file/', file_name=picture_time + '.jpg')

                #init again
                # camera.close()
                camera = PiCamera()
                for i in change_type_dict.items():
                    if type(i[1]) != str:
                        change_setting_cmd = "camera." + i[0] + '=' + str(i[1])
                        # print(change_setting_cmd)
                        # exec(change_setting_cmd)
                    else:
                        change_setting_cmd = "camera." + i[0] + '=' + "'" + str(i[1]) + "'"
                        # print(change_setting_cmd)
                        # exec(change_setting_cmd)                        
                camera.resolution = (640,480)
                camera.shutter_speed = 0
                camera.framerate = 20
                camera.rotation = 0
                camera.image_effect = e
                rawCapture = PiRGBArray(camera, size=camera.resolution) 
                Ras_Cam.detect_obj_parameter['photo_button_flag'] = False
                   
        finally:
            camera.close()
SUIT_WIDTH = 70
SUIT_HEIGHT = 100

# If using a USB Camera instead of a PiCamera, change PiOrUSB to 2
PiOrUSB = 2

if PiOrUSB == 1:
    # Import packages from picamera library
    from picamera.array import PiRGBArray
    from picamera import PiCamera

    # Initialize PiCamera and grab reference to the raw capture
    camera = PiCamera()
    camera.resolution = (IM_WIDTH,IM_HEIGHT)
    camera.framerate = 10
    rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))

if PiOrUSB == 2:
    # Initialize USB camera
    cap = cv2.VideoCapture(0)

# Use counter variable to switch from isolating Rank to isolating Suit
i = 1

for Name in ['Ace','Two','Three','Four','Five','Six','Seven','Eight',
             'Nine','Ten','Jack','Queen','King','Spades','Diamonds',
             'Clubs','Hearts']:

    filename = Name + '.jpg'

    print('Press "p" to take a picture of ' + filename)
Example #44
0
GPIO.setup(in3, GPIO.OUT)
GPIO.setup(in4, GPIO.OUT)
pwm1 = GPIO.PWM(enable1, 100)  # Created a PWM object
pwm2 = GPIO.PWM(enable2, 100)
pwm1.start(40)  #right wheel
pwm2.start(40)  #left wheel
GPIO.output(in1, True)
GPIO.output(in2, False)
GPIO.output(in3, False)
GPIO.output(in4, True)

#initialisations
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 60
rawCapture = PiRGBArray(camera, size=(320, 240))
time.sleep(0.1)

initialised = False

turnlist = ["left"]
turnum = 0

#to check if prev state was straight
#prevstraight=True

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    image = frame.array
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#extracting single color from video (red ball)
# 1. Take each frame
# 2. Convert to BGR to HSV (Hue Saturation Value)
# 3. Threshold image for range of red
# 4. Extract red object

import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera

camera = PiCamera()
camera.resolution = (640,480)
camera.framerate = 28
frameCapture = PiRGBArray(camera,size = (640,480))

for frame in camera.capture_continuous(frameCapture,format = "bgr", use_video_port=True):

    image = frame.array

    image = cv2.flip(image,0) #flip image to correct way up
    image = cv2.flip(image,1) #flip image left to right
    #result = image #(for normal video)
    
    hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV) 
    #Hue (0,179), Saturation (0,255), Value (0,255)
    redLower = np.array([50,50,110])
    redUpper = np.array([240,240,130])

   #For adaptive thresholding. using mean method
PATH = 'data/dislike/train/like'

# jméno každého obrázku + číslo
img_name = 'like'

# první obrazek bude mít za jménem toto číslo
start_index = 150

# počet snímků pro zaznamenání
img_count = 200

# inicializace kamery
camera = PiCamera()
camera.resolution = res
camera.framerate = fps
rawCapture = PiRGBArray(camera, size=res)

# zastavení programu pro 0.1 sekundy, aby se kamera mohlo rozehřát
time.sleep(0.1)

# první loop
# slouží pouze k ukázání výstupu z kamery na displeji
for frame in camera.capture_continuous(rawCapture,
                                       format='bgr',
                                       use_video_port=True):

    # aktuální snímek jako numpy pole
    img = frame.array

    # převede snímek z RGB do grayscale
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# raspi cam lapse
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import datetime
import time

camera = PiCamera()
camera.resolution = (640, 480)

count_max = 10

if __name__ == '__main__':
    count = 0

    stream = PiRGBArray(camera)

    try:
        while True:
            camera.capture(stream, 'bgr', use_video_port=True)
            img = stream.array

            count += 1
            if count > count_max:
                now = datetime.datetime.now()
                filename = './log_' + now.strftime('%Y%m%d_%H%M%S') + '.jpg'
                print(filename)
                cv2.imwrite(filename, img)
                count = 0

            stream.seek(0)
Example #48
0
class VideoStream:
    """Camera object"""
    def __init__(self, resolution=(640, 480), framerate=30, PiOrUSB=1, src=0):

        self.resolution = resolution
        self.framerate = framerate
        # Create a variable to indicate if it's a USB camera or PiCamera.
        # PiOrUSB = 1 will use PiCamera. PiOrUSB = 2 will use USB camera.
        self.PiOrUSB = PiOrUSB
        self.src = src
        self.init()

    def restart(self):
        self.init()

    def init(self):
        if self.PiOrUSB == 1:  # PiCamera
            # Import packages from picamera library
            from picamera.array import PiRGBArray
            from picamera import PiCamera

            # Initialize the PiCamera and the camera image stream
            self.camera = PiCamera()
            self.camera.resolution = self.resolution
            self.camera.framerate = self.framerate
            self.rawCapture = PiRGBArray(self.camera, size=self.resolution)
            self.stream = self.camera.capture_continuous(self.rawCapture,
                                                         format="bgr",
                                                         use_video_port=True)

            # Initialize variable to store the camera frame
            self.frame = []

        if self.PiOrUSB == 2:  # USB camera
            # Initialize the USB camera and the camera image stream
            self.stream = cv2.VideoCapture(self.src)
            ret = self.stream.set(3, self.resolution[0])
            ret = self.stream.set(4, self.resolution[1])
            ret = self.stream.set(
                5, self.framerate
            )  #Doesn't seem to do anything so it's commented out

            # Read first frame from the stream
            (self.grabbed, self.frame) = self.stream.read()

# Create a variable to control when the camera is stopped
        self.stopped = False

    def start(self):
        # Start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):

        if self.PiOrUSB == 1:  # PiCamera

            # Keep looping indefinitely until the thread is stopped
            for f in self.stream:
                # Grab the frame from the stream and clear the stream
                # in preparation for the next frame
                self.frame = f.array
                self.rawCapture.truncate(0)

                if self.stopped:
                    # Close camera resources
                    self.stream.close()
                    self.rawCapture.close()
                    self.camera.close()

        if self.PiOrUSB == 2:  # USB camera

            # Keep looping indefinitely until the thread is stopped
            while True:
                # If the camera is stopped, stop the thread
                if self.stopped:
                    # Close camera resources
                    self.stream.release()
                    return

                # Otherwise, grab the next frame from the stream
                (self.grabbed, frame) = self.stream.read()
                if (self.grabbed):
                    self.frame = frame
                else:
                    self.stop()
                    self.stream.release()
                    self.restart()
                time.sleep(1 / self.framerate / 2)

    def read(self):
        # Return the most recent frame
        return self.frame

    def stop(self):
        # Indicate that the camera and thread should be stopped
        self.stopped = True
Example #49
0
 def __init__(self, resolution=(640, 480), framerate=32):
     # initialize the camera and grab a reference to the raw camera capture
     self.camera = PiCamera()
     self.resolution = resolution
     self.framerate = framerate
     self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
Example #50
0
import cv2
import time
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera


# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
	help="path to the input image")
args = vars(ap.parse_args())

# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(0.1)
 
# grab an image from the camera
camera.capture(rawCapture, format="bgr")
image = rawCapture.array

# load the image, convert it to grayscale, blur it slightly,
# and threshold it
#image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY_INV)[1]#60

Example #51
0
def setup():
    """ Initial setup """
    camera = PiCamera()
    rawCapture = PiRGBArray(camera)
    time.sleep(0.1)
Example #52
0
                "--face",
                required=True,
                help="path to where the face cascade resides")
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-d", "--display", help="display on or off")
args = vars(ap.parse_args())

# initialize the camera and grab a reference to the raw camera
# capture
camera = PiCamera()
camera.resolution = (camH, camW)
camera.framerate = FR  #32 by default, changed
# camera.vflip = True
# camera.hflip = True
camera.rotation = rotateAngle
rawCapture = PiRGBArray(camera, size=(camH, camW))

# construct the face detector and allow the camera to warm
# up
fd = FaceDetector(args["face"])
time.sleep(0.1)
lastTime = time.time()

# send to 0
ser.write('0\n')
# print ser.readline()

# capture frames from the camera
for f in camera.capture_continuous(rawCapture,
                                   format="bgr",
                                   use_video_port=True):
Example #53
0
class QRDetector(object):
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))
    time.sleep(0.5)

    app = Flask(__name__)

    @app.route('/stream')
    def stream():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    def gen():
        while True:
            frame = get_frame()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

    def get_frame():
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        frame = rawCapture.array
        decoded_objs = decode(frame)
        frame = display(frame, decoded_objs)
        ret, jpeg = cv2.imencode('.jpg', frame)
        rawCapture.truncate(0)

        return jpeg.tobytes()

    def decode(frame):
        decoded_objs = pyzbar.decode(frame, scan_locations=True)
        for obj in decoded_objs:
            print(datetime.now().strftime('%H:%M:%S.%f'))
            print('Type: ', obj.type)
            print('Data: ', obj.data)

        return decoded_objs

    def display(frame, decoded_objs):
        for decoded_obj in decoded_objs:
            left, top, width, height = decoded_obj.rect
            frame = cv2.rectangle(frame, (left, top),
                                  (left + width, height + top), (0, 255, 0), 2)

        return frame

    if __name__ == '__main__':
        app.run(host="0.0.0.0", debug=False, threaded=True)

    def __init__(self, flip=False):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        decoded_objs = self.decode(frame)
        frame = self.draw(frame, decoded_objs)
        return frame

    def decode(self, frame):
        decoded_objs = pyzbar.decode(frame, scan_locations=True)
        for obj in decoded_objs:
            print(datetime.now().strftime('%H:%M:%S.%f'))
            print('Type: ', obj.type)
            print('Data: ', obj.data)
        return decoded_objs

    def draw(self, frame, decoded_objs):
        for obj in decoded_objs:
            left, top, width, height = obj.rect
            frame = cv2.rectangle(frame, (left, top),
                                  (left + width, height + top), (0, 0, 255), 2)
            data = obj.data.decode('utf-8')
            cv2.putText(frame, data, (left, top), cv2.FONT_HERSHEY_PLAIN, 2,
                        (0, 0, 255))
        return frame
Example #54
0
    def run(self, callback=None):
        # Setup the camera
        self.camera = PiCamera()
        self.camera.resolution = (320, 240)
        self.camera.framerate = 60
        self.rawCapture = PiRGBArray(self.camera, size=(320, 240))

        # Load a cascade file for detecting faces
        self.face_cascade = cv2.CascadeClassifier(
            '/home/pi/opencv-3.1.0/data/lbpcascades/lbpcascade_frontalface.xml'
        )

        # Capture frames from the camera
        for frame in self.camera.capture_continuous(self.rawCapture,
                                                    format="bgr",
                                                    use_video_port=True):
            image = frame.array

            # Use the cascade file we loaded to detect faces
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = self.face_cascade.detectMultiScale(gray)

            if len(faces) == 0:
                #print ("No Face Found " )
                t = time.time() - self.t0
                if callback is not None:
                    callback(loop_counter=t)  #,rotation_x=last_x)

                #self.my_mini_brain.face_flag = "SERVO "
                if t > self.variable_of_time_change:
                    self.variable_of_time_change = t + int(
                        random.uniform(0, 1) * 10.0) + 5.0
                    #self.app.random_pos()

            else:
                print("Found " + str(len(faces)) + " face(s)")
                #self.my_mini_brain.face_flag = "FACE "
                n_face = 0
                # Draw a rectangle around every face and move the motor towards the face
                for (x, y, w, h) in faces:
                    n_face += 1
                    cv2.rectangle(image, (x, y), (x + w, y + h),
                                  (100, 255, 100), 2)
                    #cv2.putText( image, "Face No." + str( len( faces ) ), ( x, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )

                    tx = x + w / 2
                    ty = y + h / 2
                    if n_face == 1:
                        #HORIZONTAL

                        if (self.cx - tx > 10
                                and self.my_mini_brain.xdeg <= 190):
                            #self.my_mini_brain.xdeg += 5
                            #self.my_mini_brain.update_head_position()
                            pass
                        elif (self.cx - tx < -10
                              and self.my_mini_brain.xdeg >= 90):
                            #self.my_mini_brain.xdeg -= 5
                            #self.my_mini_brain.update_head_position()
                            pass
                        #VERTICAL

                        if (self.cy - ty > 10 and self.my_mini_brain.ydeg >=
                                self.my_mini_brain.y_min_deg):
                            #self.my_mini_brain.ydeg += 5
                            #self.my_mini_brain.update_head_position()
                            pass
                        elif (self.cy - ty < -10 and self.my_mini_brain.ydeg <=
                              self.my_mini_brain.y_max_deg):
                            #self.my_mini_brain.ydeg -= 5
                            #self.my_mini_brain.update_head_position()
                            pass

            # Calculate and show the FPS
            self.fps = self.fps + 1
            sfps = self.fps / (time.time() - self.t_start)
            cv2.putText(image, "FPS : " + str(int(sfps)), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

            # Show the frame
            cv2.imshow("Frame", image)
            cv2.waitKey(1)

            # Clear the stream in preparation for the next frame
            self.rawCapture.truncate(0)
Example #55
0
from picamera import PiCamera
from picamera.array import PiRGBArray
import numpy as np
import time

contour_area = 0
max_area = 0
contour_index = 0
contour = 0
i = 0
###############################################################################################################

cam = PiCamera()
cam.resolution = (320, 240)
cam.framerate = 60
raw_cap = PiRGBArray(cam, (320, 240))
frame_cnt = 0
for frame in cam.capture_continuous(raw_cap,
                                    format="bgr",
                                    use_video_port=True,
                                    splitter_port=2,
                                    resize=(320, 240)):
    #time.sleep(1.0)
    #image = frame.next()
    #extract opencv bgr array of color frame
    color_image = frame.array
    #cv2.imshow("Input Video",color_image)
    #cv2.waitKey(1)
    ################################################################################################################

    img = color_image.copy()
Example #56
0
from openpyxl import load_workbook
from xlutils.copy import copy
from xlwt import Workbook

import cv2
from pyimagesearch.centroidtracker import CentroidTracker
from summary import summary

from picamera.array import PiRGBArray
from picamera import PiCamera

# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (400, 300)
camera.framerate = 15
rawCapture = PiRGBArray(camera, size=(400, 300))

# Initializing XLS file for data

Data_base = xlrd.open_workbook('Data_base.xls')
data = Data_base.sheet_by_index(0)
Feature = copy(Data_base)
sheetfeature = Feature.get_sheet(0)
row = int(data.cell_value(1, 7))

# initialize our centroid tracker and frame dimensions
ct = CentroidTracker()
(H, W) = (None, None)

# Initialize Age & Gender model Parameters along with other variables
Example #57
0
    authCode = raw_input("Enter auth code here:").strip()

    # 完成会话授权并获取客户端
    (accessToken, userID) = flow.finish(authCode)
    client = Dropbox(accessToken)
    print "[SUCCESS] dropbox account linked"

if conf["use_pi"]:
    from picamera.array import PiRGBArray
    from picamera import PiCamera

    # 初始化摄像头,并获取一个指向原始数据的引用
    camera = PiCamera()
    camera.resolution = tuple(conf["resolution"])
    camera.framerate = conf["fps"]
    rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))

    # 等待摄像头模块启动,随后初始化平均帧,最后,上传时间戳,以及运动帧数计数器
    print "[INFO] warming up..."
    time.sleep(conf["camera_warmup_time"])

    # 从摄像头逐帧捕获图像
    for f in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
        # 抓取原始Numpy数组来表示图像并初始化时间戳以及occupied/unoccupied文本
        frame = f.array
        deal_frame(frame, client, rawCapture)

        # 退出控制
        key = cv2.waitKey(1)
Example #58
0
import cv2
import RPi.GPIO as GPIO

from threading import Thread

from picamera.array import PiRGBArray
from picamera import PiCamera
import time

# initialize the camera and grab a reference to the raw camera capture
resX = 240
resY = 180
camera = PiCamera()
camera.resolution = (resX, resY)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(resX, resY))

print(time.strftime("%H_%M_%S"))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(
    time.strftime("%H_%M_%S") + '.avi', fourcc, 20.0, (resX, resY))

# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
detectFlag = 0
detectCounter = [0]
# allow the camera to warmup
time.sleep(0.1)

GPIO.setmode(GPIO.BOARD)
Example #59
-1
def main():
	
	print("Starting Program")
	
	# Init camera
	camera = PiCamera()
	camera.resolution = (640,480)
	camera.framerate = 24
	rawCapture = PiRGBArray(camera, size=(640,480))
	
	# allow camera to warmup
	time.sleep(0.1)
	
	# capture frames from camera
	for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
		image = frame.array
		
		cv2.imshow("Frame", image)
		key = cv2.waitKey(1) & 0xFF
		rawCapture.truncate(0)
		
		if key == ord("q"):
				break
	
	print("Completed!")
	
	return 0
	def showVideo(self,click_allow_arg):
		lower_IR = np.array([0,0,50]) # lower bound of CV filter
		upper_IR = np.array([255,255,255]) # upper bound of CV filter
		rawCapture = PiRGBArray(self.cam_handle, size=self.cam_res) # capture to array
		m = PyMouse() # make mouse object
		prev_maxLoc = (0,0) # init previous maxLoc
		try:
			for frame in self.cam_handle.capture_continuous(rawCapture, format = 'bgr', use_video_port=True):
				image = frame.array # get the array values for that frame
				mask = cv2.inRange(image, lower_IR, upper_IR) # mask according to lower/upper filter values
				(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(mask) # find min/max values of image
				true_pnt = self.get_true_point(maxLoc)
				print true_pnt #DEBUG INFO PRINTOUT
				adjusted_maxx = int(((true_pnt[0]) * self.output_res[0])/(self.screen_res[0])) # take the distance from the edge and perform dimensional analysis
				adjusted_maxy = int(((true_pnt[1]) * self.output_res[1])/(self.screen_res[1])) # take the distance from the edge and perform dimensional analysis
				if click_allow_arg: # if user wants to have clicks when IR found
					if maxLoc != (0,0): # if not on the default location			
						m.press(adjusted_maxx,adjusted_maxy, 1) # press the "mouse" button at location found
					elif prev_maxLoc != (0,0) and maxLoc == (0,0): # if the current value is the default and the last value wasn't the default, release
						m.release(prev_maxLoc[0],prev_maxLoc[1], 1) # release the "mouse" button
				else: # only move the mouse, no clicking
					if maxLoc != (0,0): # if not on the default location
						m.move(adjusted_maxx,adjusted_maxy) # move the mouse
				prev_maxLoc = (adjusted_maxx,adjusted_maxy) # set previous to be current max loc
				cv2.circle(image, maxLoc, 21, (255,0,0), 2) # place circle on brightest spot
				cv2.imshow("TestWindow", cv2.resize(image,(160,120))) # show the image in a tiny window
				cv2.waitKey(1) & 0xFF # needed for preview
				rawCapture.seek(0) # return to the 0th byte
				rawCapture.truncate() # clear array for next frame
				print "Mouse at: ", m.position() #DEBUG INFO PRINTOUT
		except KeyboardInterrupt: # used to quit the function
			rawCapture.seek(0) # return to the 0th byte
			rawCapture.truncate() # clear array for next frame