def main(): checkImagePath() with picamera.PiCamera() as camera: #set up initial camera settings camera.resolution = fullFrameSize camera.exposure_mode = 'auto' camera.video_stabilization = True stream1 = picamera.PiCameraCircularIO(camera, seconds=5) try: camera.start_recording(stream1, format='h264') motion = Motion(camera) camera.iso = 300 time.sleep(2) camera.exposure_mode = "off" g = camera.awb_gains camera.awb_mode = 'off' camera.awb_gains = g X_Train = motion.gatherTrainingData(iterations) #mu = calculateMu(X_Train) #sigma2 = motion.calculateSigma2(mu,X_Train) (mu, sigma2) = updateDistribution(X_Train) printMessage("Checking For Movement") motionStopCount = 0 motionInProgress = False X_Train = np.matrix(np.zeros(X_Train.shape)) i = 0 while True: time.sleep(sleepTime) (prob, xi) = motion.checkForMotion(mu, sigma2) probabilityString = "Probability: " + str(1 - prob) #if not anomalous, add to new batch of training examples if (prob > epsilon): X_Train[i] = xi i += 1 if ((i % X_Train.shape[0]) == 0): i = 0 (mu, sigma2) = updateDistribution(X_Train, mu, sigma2) #motion detected on this frame if (prob < epsilon): #motion just started if motionInProgress == False: motionInProgress = True printMessage(motionUni + " Motion Detected! " + showTime(), bottom=False) stream2 = io.BytesIO() camera.split_recording(stream2) filename = getVideoName(motionPath) #everytime motion is detected, restart motionStopCount motionStopCount = 0 #motion is in progress but no longer being detected elif motionInProgress == True: motionStopCount += 1 # we've gone framesToStopMotion w/out detecting motion if motionStopCount > framesToStopMotion: motionInProgress = False printMessage(stopUni + " Motion Stopped " + showTime() + ' ', top=False) camera.split_recording('/dev/null') printMessage('Saving to ' + filename) writeVideo(stream1, stream2, filename) camera.split_recording(stream1) printSameLine(probabilityString) finally: camera.stop_recording() print('') print('ending') return
########## Main ########## camera = picamera.PiCamera() # function1: capturing still image #camera.capture("/home/pi/Desktop/stillimage.jpg") # function2: feeding data to motion detection analysis, in port 2 motionAnalysisPort=2 camera.start_recording( '/dev/null', splitter_port=motionAnalysisPort, resize=(640,480), format='h264', motion_output=DetectMotion(camera, size=(640,480)) ) camera.wait_recording(3, splitter_port=motionAnalysisPort) # function3: recroding HD video to upload and notify stream = picamera.PiCameraCircularIO(camera, seconds=10) camera.start_recording(stream,format="h264", splitter_port=1) camera.wait_recording(10) camera.stop_recording() #write_video(stream) Timer(2,write_video, (stream)) sleep(10) del camera
def __init__(self, resolution=(1280, 960), framerate=10, pre_event_time=60, post_event_time=60, storage='/home/pi/Videos/', draw_rect=True, bg_decay=0.1, bg_framerate=1, bg_images=1, threshold=50, min_area=10, max_area=10000): """Wrapper for the picamera that detects bumblebees. Parameters ---------- resolution : Array like Array like object with length two. Sets the resolution of the camera framerate : int The framerate of the video. pre_event_time : int No. of seconds of video to save before event happened. post_event_time : int No. of seconds of video to save after event happened. storage : str The directory to save images and videos. draw_rect : bool Wether or not to draw rectangles around all detected bumblebees. bg_decay : float The decay constant for the background computations. bg_framerate : int or float The framerate that is used to form initial background. bg_images : int Total no. of images used to form initial background. """ super(BeeCamera, self).__init__(resolution=resolution, framerate=framerate) self.zoom = (0.15, 0.3, .8, .65) # Set instance variables. self.video_buffer = picamera.PiCameraCircularIO( camera=self, seconds=pre_event_time + post_event_time) self.background = self.create_background(bg_framerate, bg_images) self.bg_framerate = bg_framerate self.bg_images = bg_images self.storage = storage if not os.path.exists(storage): os.mkdir(storage) self.pre_event_time = pre_event_time self.post_event_time = post_event_time self.image = self.capture_image() self.draw_rect = draw_rect self.thresh = threshold # Start video recording. self.start_recording(self.video_buffer, format='h264') self.wait_recording(2) self.decay = bg_decay # Create object detector self.bumblebee_detector = BumblebeeDetector(filter_area=True, min_area=min_area, max_area=max_area, filter_circ=False, filter_convex=False)
if vector_count > 11: self.detected = time.time() self.motion = True else: self.motion = False return self.motion if __name__ == "__main__": if not os.path.exists('./temp'): os.makedirs('./temp') with picamera.PiCamera(framerate=24) as camera: camera.resolution = (600, 480) ring_buffer = picamera.PiCameraCircularIO(camera, seconds=10, bitrate=1000000) detector = MotionDetector(camera) camera.start_recording(ring_buffer, motion_output=detector, format='h264', bitrate=1000000) print("Started Camera") try: while True: print("Main Loop") camera.wait_recording(1) if not detector.motion: print("no motion") continue
#========= Global variables ======== CAMERA_OUT_PATH = '/home/pi/Desktop/' WORKING_DIR = "/home/pi/Desktop/PiCameraNotifier/" LOG_FILE_PATH = WORKING_DIR + 'run.log' VIDEO_RECORDING_PORT = 1 MOTION_ANALYSIS_PORT = 2 logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE_PATH, level=logging.INFO) logging.info("=========== app launched ========") camera = picamera.PiCamera() camera.annotate_background = True stream = picamera.PiCameraCircularIO( camera, seconds=10, bitrate=1300000) # estimated base on H.264 encoded data per frame scheduler = sched.scheduler(time.time, time.sleep) def didReceiveCommand(command): global notificationHandler if command == "@check": logging.info("get system info") process = subprocess.Popen([WORKING_DIR + 'systemInfo.sh'], stdout=subprocess.PIPE) out, err = process.communicate() pushData = {'type': 'TEXT_MESSAGE', 'text': out} notificationHandler.pushToMobile(pushData) if command == "@snap": fileName = time.strftime("%Y%m%d_%I:%M:%S%p") # '20170424_12:53:15AM'
def main(): checkImagePath() with picamera.PiCamera() as camera: #set up initial camera settings camera.resolution = fullFrameSize camera.exposure_mode = 'auto' camera.video_stabilization = True stream1 = picamera.PiCameraCircularIO(camera, seconds=5) #camera.start_recording(stream1, format='h264') try: camera.start_recording(stream1, format='h264') motion = Motion(camera) camera.iso = 300 time.sleep(2) camera.exposure_mode = "off" g = camera.awb_gains camera.awb_mode = 'off' camera.awb_gains = g motion.gatherTrainingData(iterations) i = 0 mu = motion.calculateMu() sigma2 = motion.calculateSigma2(mu) #movementBuffer increases the average changes of each section mu += movementBuffer printMessage("Checking For Movement") motionStopCount = 0 motionInProgress = False while True: time.sleep(sleepTime) prob = motion.checkForMotion(mu, sigma2) probabilityString = "Probability of Movement: " + str( 1 - prob) + " " #motion detected if (prob < epsilon): #motion just started if motionInProgress == False: motionInProgress = True printMessage("Motion Detected! " + showTime() + motionUni, bottom=False) stream2 = io.BytesIO() camera.split_recording(stream2) filename = getVideoName(motionPath) #everytime motion is detected, restart motionStopCount motionStopCount = 0 #motion is in progress but no longer being detected elif motionInProgress == True: motionStopCount += 1 # we've gone framesToStopMotion w/out detecting motion if motionStopCount > framesToStopMotion: motionInProgress = False printMessage("Motion Stopped " + showTime() + stopUni, top=False) camera.split_recording('/dev/null') printMessage('Saving to ' + filename) writeVideo(stream1, stream2, filename) camera.split_recording(stream1) printSameLine(probabilityString) finally: camera.stop_recording() print('') print('ending') return
def __init__(self): self.frame = None # seconds = the duration of the memory held in the stream memory self.buffer = picamera.PiCameraCircularIO(camera, seconds=35) self.condition = Condition()
def main(): with picamera.PiCamera() as camera: camera.resolution = REC_RESOLUTION camera.framerate = REC_FRAMERATE # Let the camera settle for a bit. This avoids detecting motion when # it's just the white balance and exposure settling. time.sleep(2) # Set up all the stuff we need: an overlay to indicate when we're # recording, the ring-buffer we want to record to when we haven't # detected motion, the file-object we want to record video to when # we *have* detected motion, and finally the motion detector itself camera.start_preview() recording_overlay = create_recording_overlay(camera) ring_buffer = picamera.PiCameraCircularIO(camera, seconds=REC_SECONDS, bitrate=REC_BITRATE) file_number = 1 file_output = io.open(FILE_PATTERN % file_number, 'wb', buffering=FILE_BUFFER) motion_detector = MotionDetector(camera) # Start recording data to the ring buffer and the motion detector # at the specified bitrates camera.start_recording(ring_buffer, format='h264', bitrate=REC_BITRATE, intra_period=REC_FRAMERATE, motion_output=motion_detector) try: while True: # Motion not detected state: # In this state we just wait around for the motion detector to # notice something. We check whether the last motion detected # timestamp occurred in the last second print('Waiting for motion') while motion_detector.detected < time.time() - 1: camera.wait_recording(1) # Transition to motion detected state: # Show the recording indicator, copy the content of the ring # buffer to the output file, then split the recording to the # output file. Note: because this is a file *we* opened # (instead of picamera opening it for us when we specify a # filename), we get to control when it closes, and picamera # doesn't move the file-pointer except when writing to it print('Motion detected (%d vectors)' % motion_detector.vector_count) print('Recording to %s' % file_output.name) recording_overlay.layer = 3 with ring_buffer.lock: for frame in ring_buffer.frames: if frame.frame_type == picamera.PiVideoFrameType.sps_header: ring_buffer.seek(frame.position) break while True: buf = ring_buffer.read1() if not buf: break file_output.write(buf) camera.split_recording(file_output) # Clear the ring buffer (the easiest way to do this is simply # to reconstruct it). Note to self: add a clear() method to # the next version... ring_buffer = picamera.PiCameraCircularIO(camera, seconds=REC_SECONDS, bitrate=REC_BITRATE) # Motion detected state: # Wait for REC_SECONDS without motion while motion_detector.detected > time.time() - REC_SECONDS: camera.wait_recording(1) # Transition back to motion not detected state: # Split the recording back to the ring buffer, hide the # recording indicator, and open the next output file recording_overlay.layer = 1 camera.split_recording(ring_buffer) file_number += 1 file_output.close() file_output = io.open(FILE_PATTERN % file_number, 'wb', buffering=FILE_BUFFER) finally: camera.stop_recording()
import picamera camera = picamera.PiCamera(resolution=(640, 480)) stream = picamera.PiCameraCircularIO(camera) camera.start_recording("recording.h264", format='h264') camera.start_preview() try: while True: camera.wait_recording() finally: camera.stop_recording() camera.stop_preview()
def enableCircularCameraRecording(piVideoStream): #enable circular stream camera = piVideoStream.camera stream = picamera.PiCameraCircularIO(camera, seconds=30) camera.start_recording(stream, format='h264') return stream
def run(self): try: log.info("CamProcessorServer Started") camera_name = config.config['camera']['name'] with CamMotionDetector(self._storage_path) as camMotionDetector: # Take a shot at boot, them every hour on the hour log.info("Start After Boot Picture") # LEDProcessor.green(True) with picamera.PiCamera() as camera: nowtime = datetime.now() imageFilename = "%s-%04d%02d%02d-%02d%02d%02d-Boot.jpg" % ( camera_name, nowtime.year, nowtime.month, nowtime.day, nowtime.hour, nowtime.minute, nowtime.second) imageFilepath = self._periodic_path + imageFilename camera.resolution = (2592, 1944) # camera.hflip = True # camera.vflip = True #camera.exif_tags['EXIF.UserComment'] = b'Garden' camera.start_preview() # Camera warm-up time time.sleep(2) # LEDProcessor.green(False) camera.capture(imageFilepath) # LEDProcessor.green(True) remotePath = ("%04d" % (nowtime.year), "%02d" % (nowtime.month), "%02d" % (nowtime.day)) FtpUploader.upload_move(remotePath, imageFilename, imageFilepath) # LEDProcessor.green(False) # LEDProcessor.red(False) # LEDProcessor.blue(False) self.lastPictureTime = datetime.utcnow().hour while not self._terminate: log.info("Start Motion detection") with picamera.PiCamera() as camera: #camera.led = False # LEDProcessor.camera(False) camera.resolution = (1280, 720) # camera.hflip = True # camera.vflip = True camera.framerate = 6 #camera.video_stabilization = True stream = picamera.PiCameraCircularIO(camera, seconds=5) camera.start_recording(stream, format='h264') try: while not self._terminate: camera.wait_recording(0.2) if camMotionDetector.detect_motion( camera, True): self.process_motion( camera, stream, camMotionDetector) # 'datetime.utcnow().hour' take about 25 usec # 'time.gmtime()[3]' take about 26 usec utc_now_hour = datetime.utcnow().hour if (utc_now_hour != self.lastPictureTime): self.lastPictureTime = utc_now_hour break except Exception as e: log.exception("Motion detection") finally: camera.stop_recording() if (self._terminate): log.info("CamProcessorServer terminated %d" % (self._terminate)) break # Take a shot at boot, them every hour on the hour log.info("Start Periodic Picture") # LEDProcessor.green(True) with picamera.PiCamera() as camera: nowtime = datetime.now() log.info('Periodic Picture') imageFilename = "%s-%04d%02d%02d-%02d0000.jpg" % ( camera_name, nowtime.year, nowtime.month, nowtime.day, nowtime.hour) imageFilepath = self._periodic_path + imageFilename camera.resolution = (2592, 1944) # camera.hflip = True # camera.vflip = True #camera.exif_tags['EXIF.UserComment'] = b'Garden' camera.start_preview() # Camera warm-up time time.sleep(1) # LEDProcessor.green(False) camera.capture(imageFilepath) # LEDProcessor.green(True) remotePath = ('Periodic', "%04d" % (nowtime.year), "%02d" % (nowtime.month), "%02d" % (nowtime.day)) FtpUploader.upload_move(remotePath, imageFilename, imageFilepath) # LEDProcessor.green(False) except EnvironmentError as e: log.exception("CamProcessorServer Daemon failed: %d (%s)" % (e.errno, e.strerror)) except Exception as e: log.exception("CamProcessorServer Daemon failed (Exception)") finally: log.info("CamProcessorServer Daemon Exit.")
# initialize the camera and grab a reference to the raw camera capture CAMERA = PiCamera() CAMERA.resolution = tuple(CONF["resolution"]) CAMERA.framerate = CONF["fps"] RAW_CAPTURE = PiRGBArray(CAMERA, size=tuple(CONF["resolution"])) # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print "[INFO] warming up..." time.sleep(CONF["camera_warmup_time"]) AVG = None LAST_UPLOADED = datetime.datetime.now() MOTION_COUNTER = 0 STREAM = picamera.PiCameraCircularIO(CAMERA, seconds=20) CAMERA.start_recording(STREAM, format='h264', quality=23) # set up vlc subprocess for streaming CMDLINE = [ 'cvlc', '-vvv', 'stream:///dev/stdin', '--sout', '#standard{access=http,mux=ts,dst=192.168.0.8:8160}', ':demux=h264' ] STREAMER = subprocess.Popen(CMDLINE, stdin=subprocess.PIPE) POS = None STREAM_INTERVAL = 0.1 DETECTION_INTERVAL = 0.1 COUNT = 0 print 'Start streaming' while True:
if AllowRetrigger == False: motiondetbef = True return True else: motiondetbef = False return False camera = picamera.PiCamera() camera.resolution = (1640, 1232) if (vFlip == 'yes'): camera.vflip = True if (hFlip == 'yes'): camera.hflip = True camera.exposure_mode = expMode stream = picamera.PiCameraCircularIO(camera, seconds=videobufferlength) camera.start_recording(stream, format=VideoFormat) try: while True: camera.wait_recording(1) if motion_detected(): if (lightsOn == 'yes'): os.system(lightsOnCmd) print "Capturing" # Keep recording for 10 seconds and only then write the # stream to disk camera.wait_recording(VideoLength) fname = time.strftime(video_directory + VideoFileName) print 'Saving' stream.copy_to(fname) print "Done saving"
def run(self): jEvent = {} time.sleep(1800) # create the circular buffer stream = picamera.PiCameraCircularIO(camera, seconds=BUFFER_SECS, bitrate=CAMERA_BITRATE, splitter_port=MONITOR_SPLITTER) # write a linear stream to store the after after = io.BytesIO() # start recording to the circular buffer self.camera.start_recording(stream, format='h264', bitrate=CAMERA_BITRATE, splitter_port=MONITOR_SPLITTER) # set up the IR sensor GPIO.setup(SENSOR_CHANNEL, GPIO.IN) self.camera.wait_recording(BUFFER_SECS, splitter_port=MONITOR_SPLITTER) try: # enter the loop while camera: if not GPIO.input(SENSOR_CHANNEL): GPIO.wait_for_edge(SENSOR_CHANNEL, GPIO.RISING) # set the timestamp jEvent['timestamp'] = int(time.time() * 1000) jEvent['datetime'] = time.strftime('%Y-%m-%d %H:%M') # As soon as we detect motion, split the recording to # record the frames "after" motion self.camera.split_recording(after, splitter_port=MONITOR_SPLITTER) # build the output filename outputFile = str(int(time.time())) # Write the "before" motion to disk as well beforeFileSize, beforeFileName = write_video( stream, outputFile + '.h264') # print '# Wait until motion is no longer detected' GPIO.wait_for_edge(SENSOR_CHANNEL, GPIO.FALLING) # calculate the activity duration jEvent['duration'] = int( (int(time.time() * 1000) - jEvent['timestamp']) / 1000) # then split recording back to the in-memory circular buffer self.camera.split_recording(stream, splitter_port=MONITOR_SPLITTER) # Write the "after" motion to disk afterFileSize, afterFileName = write_video( after, 'after' + outputFile + '.h264') if afterFileSize > 2500000: jEvent['filename'] = outputFile + '.mpg' ## concatenate two file concatSuccess = concat([beforeFileName, afterFileName], jEvent['filename']) if concatSuccess: jEvent['YTid'] = pushToYouTube(jEvent['filename']) if jEvent['YTid']: removeFiles([ beforeFileName, afterFileName, jEvent['filename'] ]) else: print 'Problem writing the compiled file,' + outputFile + '... do it manually later' else: removeFiles([beforeFileName, afterFileName]) jEvent['source'] = APP_NAME jEvent['activity'] = 'Movement' jEvent['mood'] = 'Active Baby!' jEvent['location'] = 'Cot' # the sensor takes about 10 seconds to turn off so # if the duration is less than 10 it is a hangover # from the last event or too short to bother if jEvent['duration'] > 10: logToNodeRed(jEvent) jEvent = {} finally: self.camera.stop_recording() GPIO.cleanup()
# After testing this on the pi, 30 seconds takes up approx 40 to 50% of the memory on a Pi Zero streamMaxSeconds = 30 # Set resolutions up for capture and detection captureWidth = 1024 captureHeight = 768 # Setup Camera camera = PiCamera() camera.resolution = (captureWidth, captureHeight) camera.rotation = 270 camera.framerate = 30 # Setup Circular stream to monitor for motion motionMonitorStream = picamera.PiCameraCircularIO(camera, seconds=streamMaxSeconds) print("The size is " + str(motionMonitorStream.size)) camera.start_recording( motionMonitorStream, format='h264', motion_output=MotionDetector(camera) ) print("Started monitoring for motion") while True: time.sleep(60 * 10) print("The size of stream before clear " + str(motionMonitorStream.size)) motionMonitorStream.clear() print("The size of stream after clear " + str(motionMonitorStream.size)) print("Cleared out stream")
def roadtrip_handling(ser, q, event, q_crash): # this happens when mode is ROAD_R while event.isSet(): # check for that if (q.queue[0] == "ROAD_R"): print('roadtrip triggered') # if in roadtrip mode # set up date = str(datetime.now().month) + str(datetime.now().day) + str( datetime.now().year) + "_" + str( datetime.now().hour) + "_" + str( datetime.now().minute) + "_" + str( datetime.now().second) filename = '//home//pi//Design//piCam//Video//' + 'MM' + date txtfile = filename + '.txt' f = open(txtfile, 'w+') # open txt file while True: try: camera = picamera.PiCamera() camera.resolution = (640, 480) camera.framerate = 30 break except: continue camera.start_recording(filename + '.h264', format='h264') loc_temp = None # placeholder loc variable while (q.queue[0] == "ROAD_R"): # while true camera.wait_recording(1) # set up last known location try: loc = parse_data(ser) if (loc != None): loc_temp = loc # set last known location f.write(loc) f.write('\n') except: pass # do crash handling if (len(q_crash.queue) != 0): # this is so we dont get weird timing errors crash_event = q_crash.queue[ 0] # get last crash event w/o removal if crash_event.isSet(): msg = "Last known location: " + str(loc_temp) print(msg) # send text sendText(9319933268, "Crash detected. " + msg) q_crash.queue.clear() # clear queue time.sleep(0.05) # when state changes # need to account for keyboard interrupt :0 camera.stop_recording() stream = picamera.PiCameraCircularIO(camera, seconds=30) camera.start_recording(stream, format='h264') camera.close() call([ "MP4Box", "-fps", str(30), "-add", filename + '.h264', filename + '.mp4' ]) call(["rm", filename + '.h264']) f.close() print('closed file') time.sleep(0.025)
#========= Global variables ======== CAMERA_OUT_PATH = '/home/pi/Desktop/' WORKING_DIR = "/home/pi/Desktop/PiCameraNotifier/" LOG_FILE_PATH = WORKING_DIR + 'run.log' VIDEO_RECORDING_PORT = 1 MOTION_ANALYSIS_PORT = 2 logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE_PATH, level=logging.INFO) logging.info("=========== app launched ========") camera = picamera.PiCamera() camera.annotate_background = True stream = picamera.PiCameraCircularIO(camera, seconds=10, bitrate=1300000) scheduler = sched.scheduler(time.time, time.sleep) def didReceiveCommand(command): global notificationHandler if command == "@check": logging.info("get system info") process = subprocess.Popen([WORKING_DIR + 'systemInfo.sh'], stdout=subprocess.PIPE) out, err = process.communicate() pushData = {'type': 'TEXT_MESSAGE', 'text': out} notificationHandler.pushToMobile(pushData) if command == "@snap": fileName = time.strftime("%Y%m%d_%I:%M:%S%p") # '20170424_12:53:15AM' captureImage(fileName)
# Wipe the circular stream once we're done stream.seek(0) stream.truncate() os.system('clear') print( "Motion Detection") print ("----------------") print (" ") with picamera.PiCamera() as camera: camera.resolution = (video_width, video_height) camera.framerate = 25 camera.rotation = video_rotation camera.video_stabilization = True camera.annotate_background = True # setup a circular buffer stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds) # hi resolution video recording into circular buffer from splitter port 1 camera.start_recording(stream, format='h264', splitter_port=1) #camera.start_recording('test.h264', splitter_port=1) # low resolution motion vector analysis from splitter port 2 camera.start_recording('/dev/null', splitter_port=2, resize=(motion_width,motion_height) ,format='h264', motion_output=MyMotionDetector(camera, size=(motion_width,motion_height))) # wait some seconds for stable video data camera.wait_recording(2, splitter_port=1) motion_detected = False print ("Motion Capture ready!") try: while True: # motion event must trigger this action here camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if motion_detected:
def monitor(self, startdelay=0): if startdelay > 0: time.sleep(startdelay) self.status.setIndex(1, wv.myagents.app) # waiting self.lastactive.setValue(time.time(), wv.myagents.app) picam = self.app.startCamera() splitter_port = self.app._getSplitterPort(self) resize = [self.rec_width.getValue(), self.rec_height.getValue()] camsize = self.app.camres() if resize == camsize: dsize = resize resize = None else: if camsize[0] < resize[0]: resize[0] = camsize[0] if camsize[1] < resize[1]: resize[1] = camsize[1] dsize = resize self.log( wv.loglvls.INFO, 'triggered video using port %d and resize to %d / %d' % (splitter_port, dsize[0], dsize[1])) pretime = self.recordback.getValue() if pretime > 0: circstream = picamera.PiCameraCircularIO( picam, seconds=pretime + 1, splitter_port=splitter_port) picam.start_recording(circstream, format='h264', sps_timing=self.withsps, resize=resize, splitter_port=splitter_port) recorder = 'circ' else: recorder = 'none' circstream = None self.settrigger(self.recordnow, lambda wable: wable.getIndex() == 1) if self.cpudetect.getIndex() == 1: self.settrigger(self.app.activities['cpumove'].status, lambda wable: wable.getIndex() == 2) if self.gpiodetect.getIndex() == 1: self.settrigger(self.app.activities['triggergpio'].status, lambda wable: wable.getIndex() == 2) time.sleep(.1) aftertimeout = None while self.running: try: x = self.actionq.get(True, 2) except queue.Empty: pass for trigid, bits in self.activetriggers.items(): if bits[1](bits[0]): trig = True # set trig if any active trigger set break else: trig = False if trig: aftertimeout = None # reset aftertimeout so it restarts when trigger stops if recorder == 'circ': #switch to file fpath = self.makefilename() recordingstart = time.time() recordingsequ = 1 vformat = '.' + self.format.getValue() postpath = fpath.with_name( fpath.name + '%03d' % recordingsequ).with_suffix(vformat) self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>> trig split recording to %s' % postpath) picam.split_recording(str(postpath), splitter_port=splitter_port) prepath = fpath.with_name(fpath.name + '%03d' % 0).with_suffix(vformat) circstream.copy_to(str(prepath), seconds=self['recordback'].getValue()) self.processfiles((True, prepath)) circstream.clear() recorder = 'afile' self.recordcount.increment(agent=wv.myagents.app) self.lasttrigger.setValue(time.time(), wv.myagents.app) self.status.setIndex(2, wv.myagents.app) # recording elif recorder == 'none': # start recording to file fpath = self.makefilename() prepath = None postpath = fpath.with_name( fpath.name + '%03d' % 0).with_suffix('.' + self.format.getValue()) self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>trig start recording to file %s' % postpath) picam.start_recording(str(postpath), resize=resize, sps_timing=self.withsps, splitter_port=splitter_port) recordingstart = time.time() recordingsequ = 0 recorder = 'afile' self.recordcount.increment(agent=wv.myagents.app) self.lasttrigger.setValue(time.time(), wv.myagents.app) self.status.setIndex(2, wv.myagents.app) # recording else: # already recording to file - carry on picam.wait_recording( splitter_port=splitter_port ) # carry on recording - check for split recording file if self.splitrecord.getValue() > 0.01: splitsecs = round(self.splitrecord.getValue() * 60) if time.time() > recordingstart + splitsecs: vformat = '.' + self.format.getValue() postpath = fpath.with_name( fpath.name + '%03d' % (recordingsequ + 1)).with_suffix(vformat) picam.split_recording(str(postpath), splitter_port=splitter_port) if vformat == '.h264': self.processfiles( (True, fpath.with_name(fpath.name + '%03d' % recordingsequ). with_suffix(vformat))) recordingsequ += 1 recordingstart = time.time() self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>trig split recording and continue' ) else: self.log(wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>trig check and continue') else: # no triggers present (now) - what were we doing? if recorder == 'circ': if self.recordback.getValue( ) == 0: # no re-trigger time now so close that down self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig stop circ recorder') picam.stop_recording(splitter_port=splitter_port) circstream = None recorder = None else: self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig circ check and continue' ) picam.wait_recording( splitter_port=splitter_port ) # carry on recording to circ buff elif recorder == 'none': if self.recordback.getValue( ) > 0: # turn on circ buffer record self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig start circ recording') circstream = picamera.PiCameraCircularIO( picam, seconds=pretime + 1, splitter_port=splitter_port) picam.start_recording(circstream, resize=resize, format='.' + self.format.getValue(), sps_timing=self.withsps, splitter_port=splitter_port) recorder = 'circ' else: self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig carry on not recording') pass # nothing to do here else: # we're recording to file if aftertimeout is None: self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig start post record timeout' ) aftertimeout = time.time() + self.recordfwd.getValue() elif time.time( ) < aftertimeout: # waiting for post trigger timeout self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig and waiting for timeout - carry on' ) else: # were done now - go back to waiting state pretime = self.recordback.getValue() if pretime > 0: if circstream is None: self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig split recording to circ buffer - making circ buffer' ) circstream = picamera.PiCameraCircularIO( picam, seconds=pretime + 1, splitter_port=splitter_port) else: self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig split recording to circ buffer - re-use circ buffer' ) picam.split_recording(circstream, splitter_port=splitter_port) recorder = 'circ' else: picam.stop_recording(splitter_port=splitter_port) circstream = None self.log( wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>not trig stop recording') recorder = 'none' self.status.setIndex(1, wv.myagents.app) if self.format.getValue() == 'h264': self.processfiles( (False, fpath.with_name(fpath.name + '%03d' % recordingsequ). with_suffix('.h264'))) if recorder == 'circ': picam.stop_recording(splitter_port=splitter_port) circstream = None recorder = 'none' elif recorder == 'afile': # we're recording to file picam.stop_recording(splitter_port=splitter_port) circstream = None recorder = 'none' self.log(wv.loglvls.DEBUG, '>>>>>>>>>>>>>>>>>final stop recording') if self.format.getValue() == 'h264': self.processfiles( (False, fpath.with_name(fpath.name + '%03d' % recordingsequ).with_suffix('.h264'))) if not self.procthread is None: self.procqueue.put('stop') self.app._releaseSplitterPort(self, splitter_port) self.status.setIndex(0, wv.myagents.app) self.lastactive.setValue(time.time(), wv.myagents.app) self.cleartrigger('*') self.monthread = None
import picamera import time import io cam = picamera.PiCamera(framerate=10) cam.start_preview(alpha=0) #invisible preview - better for autofokus? time.sleep(1) preview_stream = picamera.PiCameraCircularIO(cam, seconds=1) cam.start_recording(preview_stream, format='mjpeg', resize=(1024, 600)) cam.wait_recording(3) for f in preview_stream.frames: print(f) last_frame = list(preview_stream.frames)[-1] #last frame data = io.BytesIO() preview_stream.copy_to(data, first_frame=last_frame)
def cam_vars(): camera.resolution = (1920, 1080) camera.framerate = QUES[1]['framerate'] camera.shutter_speed = QUES[1]['framedur'] camera.ISO = QUES[1]['ISO'] camera.hflip = True camera.vflip = True camera.exposure_mode = 'off' batch_ID = ('{0:02d}'.format(batch_num)) print "Waiting for TTL on port 17" FN[0]['recfile'] = FN[0]['recdir'] + "/" + QUES[0]['sessionID'] +"_"+QUES[2]['host_name']+ "_" + str(batch_ID) + ".h264" with picamera.PiCamera() as camera: stream = picamera.PiCameraCircularIO(camera, seconds=(QUES[1]['trialdur'])/1000) cam_vars() camera.start_preview() camera.preview.window = 0,0,960 camera.wait_recording(120) camera.stop_recording() camera.stop_preview() for frame in stream.frames: if frame.header: stream.seek(frame.position) break with io.open('FN[0]['recfile']', 'wb') as output: while True: data = stream.read1() if not data: break
kernel = np.ones((3, 3), np.uint8) #erode for 2 iterations erosion = cv2.erode(modified_img, kernel, iterations=2) #dilate once dilation = cv2.dilate(erosion, kernel, iterations=1) return dilation if __name__ == "__main__": with picamera.PiCamera() as camera: #with array.PiRGBArray(camera) as output: with picamera.PiCameraCircularIO(camera, size=1) as stream: ip = ImageProc() #connect to server socket (laptop) client_socket = socket.socket() client_socket.connect((IP, PORT)) connection = client_socket.makefile('wb') img_counter = 0 encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] while True: sleep(.1) #set cameraonfiguration
def main(): args = get_args() parser = configparser.ConfigParser() path = args.config if not (isfile(path) and access(path, R_OK)): print(path + " is not a valid config file. Creating " + DEFAULT_CONFIG) create_config(DEFAULT_CONFIG, parser) path = DEFAULT_CONFIG parser.read(path) options = merge_two_dicts(dict(parser.items('jeepers')), vars(args)) logging_files = setup_logger(options) logging.getLogger('jeepers').info("==================================") logging.getLogger('jeepers').info("Started jeepers") logging.getLogger('jeepers').info("Set up logging") if options['daemon']: spawn_daemon(file_handlers=logging_files) logging.getLogger('jeepers').info("Split off daemon") # TODO: Implement program # Create camera with picamera.PiCamera() as camera: camera.resolution = (1280, 720) # Start recording high-res circularIO hi_res_stream = picamera.PiCameraCircularIO(camera, seconds=20) camera.start_recording(hi_res_stream, format="h264") # Start recording low-res stream motion_detector = image_processor.MotionDetector((640, 384)) # Enter main loop camera.start_preview() while True: camera.wait_recording(0.1) camera.capture(motion_detector, format="bgr", splitter_port=0, use_video_port=True, resize=(640, 384)) if motion_detector.motion: camera.annotate_text = "Motion Detected" else: camera.annotate_text = "" # If motion # Split circularIO connection = network.get_connection(parser) # Send circularIO to network hi_res_stream.copy_to(connection) # replace circularIO stream with network stream camera.stop_recording() camera.start_recording(connection, format="h264") camera.wait_recording(20) # Extend loop delay to four minutes # If not motion # Close the network stream # Replace the network stream with circularIO connection.close() handlers = logging.getLogger('jeepers').handlers for handler in handlers: handler.close() logging.getLogger('jeepers').removeHandler(handler) logging.shutdown()
def main(args=None): parser = _make_argument_parser() args = parser.parse_args(args) # Reconstruct the input resolution to include color channel input_res = (args.input_resolution[0], args.input_resolution[1], 3) SINGLE_FRAME_SIZE_RGB = input_res[0] * input_res[1] * input_res[2] # Initialize the camera, set the resolution and framerate try: camera = picamera.PiCamera() except picamera.exc.PiCameraMMALError: print("\nPiCamera failed to open, do you have another task using it " "in the background? Is your camera connected correctly?\n") sys.exit("Connect your camera and kill other tasks using it to run " "this sample.") # Initialize the buffer for picamera to hold the frame # https://picamera.readthedocs.io/en/release-1.13/api_streams.html?highlight=PiCameraCircularIO stream = picamera.PiCameraCircularIO(camera, size=SINGLE_FRAME_SIZE_RGB) # All essential camera settings camera.resolution = input_res[0:2] camera.framerate = args.camera_frame_rate camera.brightness = args.camera_brightness camera.shutter_speed = args.camera_shutter_speed camera.video_stabilization = args.camera_video_stablization # Record to the internal CircularIO camera.start_recording(stream, format="rgb") # Load model model = xnornet.Model.load_built_in() if "person" not in model.classes: sys.exit(model.name + " doesn't classify 'person', exiting.") print("Xnor CLI Surveillance Demo") print("Model: {}".format(model.name)) print(" version {!r}".format(model.version)) # A counter that will record the consecutive number of frames that person is # detected person_detected = 0 detected_last_frame = False bounding_boxes = [] while person_detected < args.detection_confidence: detected_this_frame = False # Get the frame from the CircularIO buffer. cam_buffer = stream.getvalue() # The camera has not written anything to the CircularIO yet # Thus no frame is been captured if len(cam_buffer) != SINGLE_FRAME_SIZE_RGB: continue # Passing corresponding RGB model_input = xnornet.Input.rgb_image(input_res[0:2], cam_buffer) # Evaluate results = model.evaluate(model_input) for result in results: local_person_detected = False if type(result) is xnornet.BoundingBox: local_person_detected = result.class_label.label == 'person' elif type(result) is xnornet.ClassLabel: local_person_detected = result.label == 'person' else: raise ValueError("Unsupported xnornet inference result") # If we already detected person in this frame, we don't want to # over count. if local_person_detected and not detected_this_frame: if person_detected < args.detection_confidence: # If we haven't confirmed, then increase our confidence. if detected_last_frame: person_detected += 1 # If we didn't confirm last frame but we detected in # this frame, we want to reset our confidence. else: person_detected = 1 # We detected a person in this frame if local_person_detected: detected_this_frame = True # Update the history detected_last_frame = detected_this_frame # If it's a detection model, we want to save the coordinates of the # bounding box for drawing purpose if we confirmed that a person is # detected. if type(result) is xnornet.BoundingBox and \ person_detected >= args.detection_confidence: bounding_boxes.append(result.rectangle) if person_detected >= args.detection_confidence: # Classification model if len(bounding_boxes) == 0: print("Person detected!") else: # Detection model print("{} person detected!".format(len(bounding_boxes))) image = _convert_to_pillow_img(cam_buffer, input_res) if not (args.no_draw_bounding_box) and len(bounding_boxes) != 0: image = _draw_bounding_box(image, bounding_boxes, input_res, args.bounding_box_color) _save_image_to_disk(image, args.output_filename) else: print("Detecting...") print("Cleaning up...") camera.stop_recording() camera.close()
y1 = 0 + y frameNo = 0 ballCounter = 0 ballFrameNo = 1 videoReadyFrameNo = 0 video_preseconds = 3 with picamera.PiCamera() as camera: camera.resolution = setResolution() camera.video_stabilization = True camera.annotate_background = True camera.rotation = 180 rawCapture = PiRGBArray(camera, size=camera.resolution) # setup a circular buffer # stream = picamera.PiCameraCircularIO(camera, seconds = video_preseconds) stream = picamera.PiCameraCircularIO(camera, size=3000000) # video recording into circular buffer from splitter port 1 camera.start_recording(stream, format='h264', splitter_port=1) #camera.start_recording('test.h264', splitter_port=1) # wait 2 seconds for stable video data camera.wait_recording(2, splitter_port=1) # motion_detected = False print(camera.resolution) for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image, then initialize the timestamp # and occupied/unoccupied text??????????????????? rawCapture.truncate() rawCapture.seek(0)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--classfile', '-c', dest='classfile', required=True) parser.add_argument('--threshold', '-t', dest='threshold', required=False, type=float, default=0.5) parser.add_argument('--out_dir', '-o', dest='out_dir', required=False, type=str, default='./') parser.add_argument('--capture_delay', dest='capture_delay', required=False, type=float, default=5.0) parser.add_argument('--capture_length', dest='capture_length', required=False, type=int, default=20) parser.add_argument('--debug', '-d', dest='debug', required=False, action='store_true') # Crop box in fraction of the image width. By default full camera image is processed. parser.add_argument('--cropbox_left', dest='cropbox_left', required=False, type=float, default=0.0) parser.add_argument('--cropbox_right', dest='cropbox_right', required=False, type=float, default=1.0) parser.add_argument('--cropbox_top', dest='cropbox_top', required=False, type=float, default=0.0) parser.add_argument('--cropbox_bottom', dest='cropbox_bottom', required=False, type=float, default=1.0) parser.set_defaults(debug=False) args = parser.parse_args() # There are two models available for image classification task: # 1) MobileNet based (image_classification.MOBILENET), which has 59.9% top-1 # accuracy on ImageNet; # 2) SqueezeNet based (image_classification.SQUEEZENET), which has 45.3% top-1 # accuracy on ImageNet; model_type = image_classification.MOBILENET # Read the class list from a text file with open(args.classfile) as f: classes = [line.strip() for line in f] print('Starting camera detection, using the following classes:') for label in classes: print(' ', label) print('Threshold:', args.threshold) print('Debug mode:', args.debug) print('Capture Delay:', args.capture_delay) debug_out = args.out_dir if args.debug else '' with ImageInference(image_classification.model(model_type)) as inference: with picamera.PiCamera(resolution=(1920, 1080)) as camera: stream = picamera.PiCameraCircularIO(camera, seconds=args.capture_length) camera.start_recording(stream, format='h264') while True: detection, image, inference_data = detect_object( inference, camera, classes, args.threshold, debug_out, (args.cropbox_left, args.cropbox_right), (args.cropbox_top, args.cropbox_bottom)) if detection: detect_time = int(time.time()) camera.wait_recording(args.capture_delay) video_file = 'capture_%d.mpeg' % detect_time image_file = 'capture_%d.jpg' % detect_time stream.copy_to(os.path.join(args.out_dir, video_file)) stream.flush() debug_output(image, inference_data, args.out_dir, image_file) print('Wrote video file to', os.path.join(args.out_dir, video_file)) camera.wait_recording( max(args.capture_length - args.capture_delay, 0))
def get_video(self): # Define Variables capture_time = self.video_capture_length preroll = self.video_preroll capture_video = self.capture_video camera_res = (self.camera_res_len, self.camera_res_wid) image = numpy.empty((camera_res[1], camera_res[0],3), dtype=numpy.uint8) capture_counter = 0 # Set up Circular Buffer Settings video_stream = picamera.PiCameraCircularIO(camera_device, seconds=capture_time) camera_device.start_preview() camera_device.start_recording(video_stream, format='h264') my_now = datetime.now() while True: if capture_counter < 8: # Set up a waiting time difference my_later = datetime.now() difference = my_later-my_now seconds_past = difference.seconds camera_device.wait_recording(1) logging.debug('Analyzing Surroundings') if seconds_past > preroll+1: # Take Picture for the Model camera_device.capture(image,'bgr', resize=camera_res, use_video_port=True) camera_device.wait_recording(1) # Take Picture for Azure image_name = "image-{0}.jpg".format(my_later.strftime("%Y%m%d%H%M%S")) image_path = "{0}/{1}".format(SCRIPT_DIR, image_name) camera_device.capture(image_path) camera_device.wait_recording(1) print("Prediction Threshold: {}".format(self.prediction_threshold)) # Make Prediction with the first picture logging.debug('Prediction Captured') word, predict_value = self.model_predict(image) # Give time here for model predictions camera_device.wait_recording(3) logging.debug('Prediction Returned') my_now = datetime.now() if word is None: logging.debug('No Event Registered') capture_video = False # Format specifically for the Good Folder bad_image_folder = "{0}/badimages".format(self.picture_container_name) # Send Picture to the Bad Images Folder on Azure that can be used to retrain self.azure_upload_from_path(bad_image_folder, image_name, image_path, 'image/jpeg') elif word is not None and predict_value < self.prediction_threshold: logging.debug('Prediction Value Too Low') capture_video = False # Format Specifically for the Good FOlder bad_image_folder = "{0}/badimages".format(self.picture_container_name) # Send Picture to the Bad Images Folder on Azure that can be used to retrain self.azure_upload_from_path(bad_image_folder, image_name, image_path, 'image/jpeg') camera_device.wait_recording(2) else: # See what we got back from the model logging.debug('Event Registered') capture_video=True print('Prediction(s): {}'.format(word)) # Format specifically for the Good Folder good_image_folder = "{0}/goodimages".format(self.picture_container_name) # Send the Picture to the Good Images Folder on Azure self.azure_upload_from_path(good_image_folder, image_name, image_path, 'image/jpeg') camera_device.wait_recording(2) # Once it is uploaded, delete the image os.remove(image_path) break # If we don;t break by finidng the right predicition stay in the loop seconds_past = 0 capture_counter = capture_counter + 1 # Delete the image from the OS folder to save space os.remove(image_path) else: camera_device.stop_recording() return ## Create diretory to save the video that we get if we are told to capture video start_time = my_later base_dir = SCRIPT_DIR video_dir = "myvideos" video_dir_path ="{0}/{1}".format(base_dir, video_dir) if not os.path.exists(video_dir_path): os.makedirs(video_dir_path) video_start_time = start_time - timedelta(seconds=preroll) ## We will have two seperate files, one for before and after the event had been triggered #Before: before_event = "video-{0}-{1}.h264".format("before", video_start_time.strftime("%Y%m%d%H%M%S")) before_event_path = "{0}/{1}/{2}".format(base_dir, video_dir, before_event) before_mp4 = before_event.replace('.h264', '.mp4') before_mp4_path = "{0}/{1}/{2}".format(base_dir, video_dir, before_mp4) before_path_temp = "{0}.tmp".format(before_mp4_path) # After: after_event = "video-{0}-{1}.h264".format("after", video_start_time.strftime("%Y%m%d%H%M%S")) after_event_path = "{0}/{1}/{2}".format(base_dir, video_dir, after_event) after_mp4 = after_event.replace('.h264', '.mp4') after_mp4_path = "{0}/{1}/{2}".format(base_dir, video_dir, after_mp4) after_path_temp = "{0}.tmp".format(after_mp4_path) # Full combined video path full_path = "video-{0}-{1}.mp4".format("full", video_start_time.strftime("%Y%m%d%H%M%S")) full_video_path = "{0}/{1}/{2}".format(base_dir, video_dir, full_path) # Create a json file to a reference the given event json_file_name = "video-description-{0}.json".format(video_start_time.strftime("%Y%m%d%H%M%S")) json_file_path = "{0}/{1}/{2}".format(base_dir,video_dir, json_file_name) if capture_video == True: # Save the video to a file path specified camera_device.split_recording(after_event_path) video_stream.copy_to(before_event_path, seconds=preroll) camera_device.wait_recording(preroll+5) # Convert to MP4 format for viewing self.save_video(before_event_path, before_path_temp, before_mp4_path) self.save_video(after_event_path, after_path_temp, after_mp4_path) # Upload Before Videos to Azure Blob Storage before_video_folder = "{0}/{1}".format(self.video_container_name, 'beforevideo') self.azure_upload_from_path(before_video_folder, before_mp4, before_mp4_path, 'video/mp4') # Upload After Videos to Azure Blob Storage after_video_folder = "{0}/{1}".format(self.video_container_name, 'aftervideo') self.azure_upload_from_path(after_video_folder, after_mp4, after_mp4_path, 'video/mp4') # Combine the two mp4 videos into one and save it full_video = "MP4Box -cat {0} -cat {1} -new {2}".format(before_mp4_path, after_mp4_path, full_video_path) self.run_shell(full_video) logging.debug('Combining Full Video') # Upload Video to Azure Blob Storage full_video_folder = "{0}/{1}".format(self.video_container_name, 'fullvideo') self.azure_upload_from_path(full_video_folder, full_path, full_video_path, 'video/mp4') # Create json and fill it with information self.write_json_to_file(video_start_time, word, predict_value, full_path, json_file_path) # Upload Json to Azure Blob Storge self.azure_upload_from_path(self.json_container_name, json_file_name, json_file_path, 'application/json') # End Things shutil.rmtree(video_dir_path) camera_device.stop_recording()
def main(ashow=True, debug=False, fastmode=False, wsserver=None, logfilename=None): global config global rerun_main rerun_main = False show = 1 if ashow else 0 # here we go logging.info("<<<<<<<<<<<<<< starting tracker >>>>>>>>>>>>") if fastmode == False: fastmode = config.conf['fastMode'] if debug: config.conf['debug'] = True # I am testing if camera,capture is fast enough for us capture = True # where are we logging.info(get_raspi_revision()) # get screen resolution (0,0) if no monitor is connected screen_w, screen_h = get_screen_resolution() # preview #preview = False if(screen_w == 0 and screen_h == 0) else config.conf['preview'] preview = config.conf['preview'] # annotation an_height = 24 an_black = picamera.Color('black') an_white = picamera.Color('white') #- open picamera device with picamera.PiCamera() as camera: #- determine camera module revision = camera._revision.upper() logging.info("camera chip: %s" % revision) if revision == 'OV5647': # V1 module # 1280x720 has a bug. (wrong center value) if fastmode: # Full FOV! Area = 1200 resx = 640 # 40 resy = 480 # 30 fps = 90 mode = 7 an_height = 16 else: # Full FOV Area = 4800 resx = 1280 # 80 resy = 960 # 60 fps = 42 mode = 4 elif revision == 'IMX219': # V2 module if fastmode: # 50% FOV :-/ Area = 1200 #resx = 640 # 40 #resy = 480 # 30 #fps = 120 #mode = 7 #an_height = 16 # 80% FOV resx = 928 # 960 # 60 # 58 Area = 2552 resy = 704 # 720 # 45 # 44 fps = 90 mode = 6 else: # full FOV Area = 102x56 = 5712 resx = 1632 # 102 resy = 896 # 56 fps = 40 mode = 5 else: raise ValueError('Unknown camera device') # evaluate crossing parameters ycross = config.conf['yCross'] xcross = config.conf['xCross'] # fastmode: if needed force x/ycross to middle of the screen if fastmode: if ycross > 0: # force ycross to the middle ycross = resy / 32 config.conf['yCross'] = int(ycross) logging.warning("fastmode: force yCross to %d" % ycross) if xcross > 0: # force xcross to the middle xcross = resx / 32 config.conf['xCross'] = int(xcross) logging.warning("fastmode: force xCross to %d" % xcross) #- check if the crossing line is in the center (this is not needed. if ycross > 0 and ycross != (resy / 32): logging.warning("Y crossing %d expected but %d given!" % (resy / 32, ycross)) if xcross > 0 and xcross != (resx / 32): logging.warning("X crossing %d expected but %d given!" % (resx / 32, xcross)) # setup camera resolution logging.info("camera resolution: %dx%d" % (resx, resy)) camera.resolution = (resx, resy) # debugging mode if show: preview = True camera.framerate = 25 x_disp = config.conf['offsetX'] y_disp = config.conf['offsetY'] width = resy / 2 height = resx / 2 #if fastmode: # width = resy # height = resx display = picamtracker.Display(caption='piCAMTracker', x=x_disp, y=y_disp, w=width, h=height) else: display = None camera.sensor_mode = mode if fastmode: camera.framerate_range = (60, fps) else: camera.framerate_range = (25, fps) logging.info("warm-up 2 seconds...") # setup serial port #serialPort = picamtracker.SerialIO.SerialCommunication(port=config.conf['serialPort'],options=config.conf['serialConf']) # setup GPIO greenLED = picamtracker.GPIOPort.gpioPort( config.conf['greenLEDPort'], is_active_low=config.conf['ledActiveLow'], duration=config.conf['signalLength'], start_blinks=3) redLED = picamtracker.GPIOPort.gpioPort( config.conf['redLEDPort'], duration=config.conf['signalLength'], is_active_low=config.conf['ledActiveLow']) sleep(1.0) picamtracker.GPIOPort.statusLED(config.conf['statusLEDPort'], on=True) logging.info("starting camera ...") # setup preview if preview: cl = np.zeros((resy, resx, 3), np.uint8) ycross = config.conf['yCross'] if ycross > 0: if ycross >= int(resy / 16): ycross = int(resy / 32) ym = 16 * ycross cl[ym, :, :] = 0xff #horizantal line xcross = config.conf['xCross'] if xcross > 0: if xcross >= int(resx / 16): xcross = int(resx / 32) xm = 16 * xcross cl[:, xm, :] = 0xff #vertical line #- preview settings px = int(config.conf['previewX']) py = int(config.conf['previewY']) if fastmode: pw = int(resx) ph = int(resy) else: pw = int(resx / 2) ph = int(resy / 2) rotation = int(config.conf['viewAngle']) if rotation == 90 or rotation == 270: hh = pw pw = ph ph = hh logging.info("preview w: %d, h: %d" % (pw, ph)) camera.start_preview(fullscreen=False, window=(px, py, pw, ph), rotation=rotation) #camera.preview.fullscreen = False if show: camera.preview.alpha = 192 else: camera.preview.alpha = 255 #- overlay settings overlay = camera.add_overlay(source=cl.tobytes(), size=(resx, resy), format='rgb') overlay.fullscreen = False overlay.alpha = 32 overlay.layer = 3 overlay.window = (px, py, pw, ph) overlay.rotation = rotation # set exposure mode #camera.exposure_mode = 'auto' camera.exposure_mode = 'sports' camera.exposure_compensation = config.conf["exposure"] # >>> debug # camera.annotate_frame_num = True # setup UDP broadcaster if 'IPUDPBEEP' in config.conf and re.match('.*\.255$', config.conf['IPUDPBEEP']): udpThread = picamtracker.UDPBeep.udpBeep(config.conf['IPUDPBEEP'], 4445) udpThread.event.set() else: udpThread = None # setup used objects vstream = picamera.PiCameraCircularIO( camera, seconds=config.conf['videoLength']) writer = picamtracker.Writer(camera, stream=vstream, config=config, wsserver=wsserver) vwriter = picamtracker.vWriter(stream=vstream, config=config) tracker = picamtracker.Tracker(camera, greenLed=greenLED, redLed=redLED, config=config, udpThread=udpThread, capture=capture) # assign external command interface cmds = picamtracker.CommandInterface(config=config) cmds.subscribe(tracker.set_maxDist, 'maxDist') cmds.subscribe(tracker.set_trackMaturity, 'trackMaturity') cmds.subscribe(tracker.testCrossing, 'testBeep') cmds.subscribe(config.set_storeParams, 'storeParams') cmds.subscribe(setFastMode, 'fastMode') cmds.subscribe(setLoggingLevel, 'loggingLevel') # enable overwritten camera's analyse callback with picamtracker.MotionAnalyser(camera, tracker, display, show, config, vwriter) as output: prctl.set_name('python') # local variables loop = 0 t_wait = 0.5 old_frames = 0 auto_mode = -1 #if fastmode: # auto_mode = 10 last_auto_mode = time() fps = 25.0 # start camera camera.annotate_text_size = an_height camera.annotate_foreground = an_white if camera.analog_gain > 5 else an_black camera.start_recording(output=vstream, format='h264', level='4.2', motion_output=output) # assign external commands to internal functions cmds.subscribe(output.set_vMax, 'vMax') cmds.subscribe(output.set_vMin, 'vMin') cmds.subscribe(output.set_maxArea, 'maxArea') cmds.subscribe(output.set_minArea, 'minArea') cmds.subscribe(output.set_sadThreshold, 'sadThreshold') cmds.subscribe(output.set_debug, 'debug') cmds.subscribe(output.set_baseB, 'baseB') cmds.subscribe(output.set_exposure, 'exposure') cmds.subscribe(output.set_extend, 'extension') # assign GPIO pin to enable debugging if config.conf['debugInputPort']: picamtracker.GPIOPort.addCallback( config.conf['debugInputPort'], output.debug_button) try: # go into endless camera recording loop and wake up every t_wait seconds while True: global temp loop += 1 # check temperature/light every minute if loop % 120 == 0: temp = get_temp() camera.annotate_foreground = an_white if camera.analog_gain > 5 else an_black logging.debug( "analog_gain: %3.1f exposure_speed: %d (%3.1f fps)" % (float(camera.analog_gain), camera.exposure_speed, fps)) # update statistics every second if loop & 1: add_text = "" sep = "" if tracker.noise > 0.8: add_text += " NOISY" sep = " +" if camera.analog_gain > 7: add_text = add_text + sep + " DARK" if temp > max_temp: add_text = add_text + sep + " HOT (%4.1f)" % temp if len(add_text): add_text += " !" frames = output.processed_frames fps = (frames - old_frames) / (2 * t_wait) old_frames = frames camera.annotate_text = "%s (%3.1f fps) %s" % ( dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), fps, add_text) # check for restart if rerun_main: break # crossing event happend? delay, frame, motion = tracker.getStatus() if frame != 0: # if crossing detected -> take a snapshot of the event #t0 = time() if capture: writer.update_hits(delay, frame, motion, tracker.image.copy()) else: writer.takeSnapshot(delay, frame, motion) tracker.releaseLock() #print("capture time: %4.2fms" % (1000.0 * (time() - t0))) # check for USB stick every 60 seconds camera.wait_recording(t_wait) except KeyboardInterrupt: logging.error("Got keyboard interrupt") pass finally: # stop camera and preview #serialPort.terminated = True greenLED.terminated = True redLED.terminated = True if udpThread: udpThread.terminated = True camera.stop_recording() if preview: camera.stop_preview() camera.remove_overlay(overlay) # stop all threads if display is not None: display.terminated = True cmds.stop() tracker.stop() writer.stop() vwriter.stop() # wait and join threads sleep(0.5) if display is not None: display.join() #serialPort.join() if udpThread: udpThread.join() greenLED.join() redLED.join() cmds.join() tracker.join() writer.join() vwriter.join() picamtracker.GPIOPort.statusLED(config.conf['statusLEDPort'], on=False) picamtracker.GPIOPort.cleanup() #config.write() logging.info("<<<<<<<<<<<< tracker ended >>>>>>>>>>>>") return rerun_main