def __init__(self, are_some_movement): logger.info("Building binoculars") self._lens = PiCamera() self._lens.rotation = 180 self.buffer = PiCameraCircularIO(self._lens, seconds=PRESECONDS) self.watcher = Watcher(self._lens, are_some_movement) self.is_recording = False
def __init__(self, camera, storage, h264_args, temporary_recordings_output_path="./temp_recordings/", record_seconds_after_motion=12, max_recording_seconds=600, record_seconds_before_motion=5, ffmpeg_path="/usr/local/bin/ffmpeg", convert_h264_to_mp4=True): self.camera = camera self.storage = storage self.h264_args = h264_args self.temporary_recordings_output_path = temporary_recordings_output_path self.record_seconds_after_motion = record_seconds_after_motion self.max_recording_seconds = max_recording_seconds self.timer = 0 self.record_seconds_before_motion = record_seconds_before_motion self.ffmpeg_path = ffmpeg_path self.convert_h264_to_mp4 = convert_h264_to_mp4 # Make sure PiCameraCircularIO contains at least 20 seconds of footage. Since this is the minimum for it work. if record_seconds_before_motion > 20: delayed_storage_length_seconds = record_seconds_before_motion else: delayed_storage_length_seconds = 20 # Create the delayed frames stream. self.delayed_recording_stream = PiCameraCircularIO( self.camera, seconds=delayed_storage_length_seconds) # For some reason the PiCameraCircularIO has to be on splitter_port 1. Splitter port 2 or 3 doesn't work. self.camera.start_recording(self.delayed_recording_stream, splitter_port=1, **h264_args)
class Binoculars(object): def __init__(self, are_some_movement): logger.info("Building binoculars") self._lens = PiCamera() self._lens.rotation = 180 self.buffer = PiCameraCircularIO(self._lens, seconds=PRESECONDS) self.watcher = Watcher(self._lens, are_some_movement) self.is_recording = False def start_watching(self): self._lens.start_recording(self.buffer, format='h264', resize=RECORD_RESOLUTION, splitter_port=1) self._lens.start_recording('/dev/null', format='h264', resize=MOTION_RESOLUTION, splitter_port=2, motion_output=self.watcher) def start_recording(self, filename): self.is_recording = True self._lens.split_recording(filename, splitter_port=1) def stop_recording(self): self.is_recording = False self._lens.split_recording(self.buffer, splitter_port=1) def save_buffer(self, filename): self.buffer.copy_to(filename) def get_image(self): stream = io.BytesIO() self._lens.capture(stream, format='jpeg') stream.seek(0) image = Image.open(stream) return image def record_video(self, time=10): filename = datetime.now().strftime("record-%Y%m%d-%H:%M:%S.h264") self._lens.start_recording(os.path.join(SAVE_FOLDER, filename)) self._lens.wait_recording(time) self._lens.stop_recording() def take_picture(self): logger.info("Taking picture") i.get_image() filename = datetime.now().strftime("capture-%Y%m%d-%H:%M:%S.jpg") image.save(os.path.join(SAVE_FOLDER, filename))
def __init__(self, size=10, frameRate=40, hflip=False, vflip=False): """A wrapper class for the Raspberry Pi camera using the picamera python library. The size parameter sets the camera resolution to size * (64, 48).""" self.active = False try: if type(size) is not int: raise TypeError("Size must be an integer") elif 1 <= size and size <= 51: self.size = size self.hRes = size * 64 self.vRes = size * 48 else: raise ValueError("Size must be in range 1 to 51") except TypeError or ValueError: raise self.picam = PiCamera() self.picam.resolution = (self.hRes, self.vRes) self.picam.framerate = frameRate self.picam.hflip = hflip self.picam.vflip = vflip time.sleep(1) self.stream = PiCameraCircularIO(self.picam, seconds=1) self.frameRateTimer = timer.Timer() self.frameRateFilter = filters.Filter1D(maxSize=21) self.start()
def __init__(self, width=640, height=480): frame_rate_calc = 1 freq = cv2.getTickFrequency() font = cv2.FONT_HERSHEY_SIMPLEX self.IM_WIDTH = width self.IMG_HEIGHT = height self.objectsOfInterest = [ 'person', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'bear', 'teddy bear' ] self.minScore = 0.40 ipv4 = os.popen('ip addr show eth0').read().split("inet ")[1].split( "/")[0] self.camId = ipv4[-3:] self.imageSaveDeltaSeconds = 10 self.videoLoopSeconds = 5 self.videoPreRecordSeconds = 1.5 self.lastImageSaveTime = time.time() self.lastVideoSaveTime = time.time() self.saveVideoAtTime = time.time() self.videoLoopFlag = 0 # used to indicate that the current stream will need to be recorded # load in the graph and muddle with the default graph # this setup allows the same session to be used throughout # tf_session is *not* thread-safe self.graph = tf.Graph() with self.graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') self.tf_session = tf.Session(graph=self.graph) self.camera = PiCamera(resolution=(width, height), framerate=30) #Brittany added for circular stream self.stream = PiCameraCircularIO(self.camera, seconds=5) #self.videoLoopSeconds) #self.camera.stop_recording() self.camera.start_recording(self.stream, format='h264') # Capture and set all to zero to get a blank array of the right size self._frame = PiRGBArray(self.camera, size=(width, height)) self._frame.truncate(0)
def start_buffer_recording(camera, BITRATE, SHADOWPLAY_TIME): print("Creating shadowplay circular buffer...") shadowplay_stream = PiCameraCircularIO( camera, seconds=ceil(SHADOWPLAY_TIME / 4.2) ) #divide by 4 because bitrate is so low it would record way more (fourth of normal bitrate so 10s buffer would actually be 40s) camera.start_recording( shadowplay_stream, format='h264', bitrate=BITRATE, splitter_port=1) #,bitrate=BITRATE) #,splitter_port=2) return shadowplay_stream
def start(cam): # setup globals global camera, stream camera = cam stream = PiCameraCircularIO(camera, seconds=record_time_before_detection) camera.start_recording(stream, format='h264') camera.wait_recording(5) # make sure recording is loaded print("recording initialized")
def __init__(self): # TODO: Make resolution configurable # Initialize the camera self.camera = PiCamera() self.camera.resolution = (1280, 720) # Record to a circular stream for retroactive recording self.stream = PiCameraCircularIO(self.camera, seconds=sum( CameraHandler.retroactive_time)) # Start recording self.camera.start_recording(self.stream, format='h264')
def run(self): with PiCamera() as camera: count = 0 camera.resolution = (1280, 720) stream = PiCameraCircularIO(camera, seconds=10) camera.start_recording(stream, format='h264') try: camera.wait_recording(3) while True: camera.wait_recording(0) if self.detect_motion(camera): print('Motion detected!') if not self.dryrun: timestamp = datetime.now().isoformat('_') # As soon as we detect motion, split the recording to # record the frames "after" motion camera.split_recording( '{}_capture_{:04d}.h264'.format( timestamp, count)) # Write the 10 seconds "before" motion to disk as well stream.copy_to('{}_before_{:04d}.h264'.format( timestamp, count), seconds=10) stream.clear() # Wait until motion is no longer detected, then split # recording back to the in-memory circular buffer while self.detect_motion(camera): camera.wait_recording(0) print('Motion stopped!') camera.split_recording(stream) count += 1 if user_freespace_gb() < 0.5: print("CLosing program to avoid using all free space.") print("There is {} GB remaining.".format( user_freespace_gb())) break except KeyboardInterrupt: print("Stopped") finally: camera.stop_recording() print("Motion detection values:") print("\tMin: {}".format(self.result_min)) print("\tMax: {}".format(self.result_max)) print("\tAvg: {}".format(self.result_avg))
def __init__(self, size=10, frameRate=40, horizontalflip=False, verticalflip=False): from picamera import PiCamera, PiCameraCircularIO self.active = False try: if type(size) is not int: raise TypeError("Size must be an integer") elif 1 <= size and size <= 51: self.size = size self.horizontal_resolution = size * 64 self.vertical_resolution = size * 48 else: raise ValueError("Size must be in range 1 to 51") except TypeError or ValueError: raise self.picam = PiCamera() self.picam.resolution = (self.horizontal_resolution, self.vertical_resolution) self.picam.framerate = frameRate self.picam.horizontalflip = horizontalflip self.picam.verticalflip = verticalflip time.sleep(1) self.stream = PiCameraCircularIO(self.picam, seconds=1) self.frameRateTimer = Timer() self.frameRateFilter = Filter1D(maxSize=21) self.start()
## Set initial values (while camera warms up) result, magnitude = False, 0 with PiCamera() as camera: with DetectMotion(camera) as output: global current_status """Set initial twilight settings and camera attributes. Setup stream, start recording, then wait five seconds for the camera to warm up (reduce false positives).""" prior_status, illumination = _check_twilight() #print prior_status, illumination camera = _set_attributes(prior_status, camera)[1] sleep(5) stream = PiCameraCircularIO(camera, seconds=seconds) camera.start_recording(stream, format=formats[0], motion_output=output) try: while True: """Update twilight status and check if current status is the same as prior status. If it is, then simply check for motion. If not, then daylight has changed and we must update camera attributes.""" current_status, illumination = _check_twilight(illumination) if current_status == prior_status: """Implement camera.wait_recording(1) to wait on the video encoder for 1 timeout second. Modify timeout seconds as necessary for your specific application.""" if result: """If result, motion detected. Capture start time, set file name, record magnnitude of motion, and
import io import random from picamera import PiCamera, PiCameraCircularIO sensor = DistanceSensor(24, 23) def motion_detected(): print("detected") return sensor.distance <= 0.2 print("start") camera = PiCamera() stream = PiCameraCircularIO(camera, seconds=20) camera.start_recording(stream, format='h264') print("ready") try: while True: print("while") # camera.wait_recording(1) if motion_detected(): print('motion detected') camera.start_preview() camera.wait_recording(10) camera.stop_preview() stream.copy_to('motion.h264') finally: camera.stop_recording()
def setup_email(account, password): server = smtplib.SMTP("smtp.gmail.com", 587) server.ehlo() server.starttls() server.login(account, password) return server if __name__ == "__main__": doorbell = Doorbell() stream_seconds = 5 try: print("creating camera") camera = PiCamera() stream = PiCameraCircularIO(camera, seconds=stream_seconds) camera.start_recording(stream, format='h264') print("creating twilio client") twilio_client = Client(config.TWILIO_SID, config.TWILIO_TOKEN) print("creating S3 client") s3 = boto3.resource("s3", aws_access_key_id=config.AWS_KEY_ID, aws_secret_access_key=config.AWS_SECRET) print("connecting to SMTP server") smtp = setup_email(config.EMAIL_ACCT, config.EMAIL_PASSWORD) doorbell.add_handler(LEDHandler(OUTPUT_PIN)) doorbell.add_handler( SmsHandler(s3, twilio_client, camera, recipients=config.SMS_RECIPIENTS))
class TensorCamera: """Wrapping in a class to capture the tensor and camera state in order to avoid some verbose functions or structures""" def __init__(self, width=640, height=480): frame_rate_calc = 1 freq = cv2.getTickFrequency() font = cv2.FONT_HERSHEY_SIMPLEX self.IM_WIDTH = width self.IMG_HEIGHT = height self.objectsOfInterest = [ 'person', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'bear', 'teddy bear' ] self.minScore = 0.40 ipv4 = os.popen('ip addr show eth0').read().split("inet ")[1].split( "/")[0] self.camId = ipv4[-3:] self.imageSaveDeltaSeconds = 10 self.videoLoopSeconds = 5 self.videoPreRecordSeconds = 1.5 self.lastImageSaveTime = time.time() self.lastVideoSaveTime = time.time() self.saveVideoAtTime = time.time() self.videoLoopFlag = 0 # used to indicate that the current stream will need to be recorded # load in the graph and muddle with the default graph # this setup allows the same session to be used throughout # tf_session is *not* thread-safe self.graph = tf.Graph() with self.graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') self.tf_session = tf.Session(graph=self.graph) self.camera = PiCamera(resolution=(width, height), framerate=30) #Brittany added for circular stream self.stream = PiCameraCircularIO(self.camera, seconds=5) #self.videoLoopSeconds) #self.camera.stop_recording() self.camera.start_recording(self.stream, format='h264') # Capture and set all to zero to get a blank array of the right size self._frame = PiRGBArray(self.camera, size=(width, height)) self._frame.truncate(0) def capture_and_tell(self): """Uses the tensorflow graph to capture an image, do magic, ask the sensor board for the distance from an object, performs some business logic, and then send an update to the sensor board with brake status returns the system status for brake, distance, and image. This is to be used by a server component""" # clear the buffer frame used for capture self._frame.truncate(0) self.camera.capture(self._frame, format="rgb", use_video_port=True) frame = np.copy(self._frame.array) frame.setflags(write=1) graph_ops = self.graph.get_operations() with self.graph.as_default(): output_dict = run_inference_for_single_image( frame, graph_ops, self.tf_session) # print(category_index) # print(output_dict['detection_scores']) # print(output_dict['detection_classes']) now = datetime.now() dateString = now.strftime("%Y%m%d%H%M%S") #timeString_return=now.strftime("%Y/%m/%d%H%M%S") detectionString = '' for i, score in enumerate(output_dict['detection_scores']): if score >= self.minScore: detectionClass = output_dict['detection_classes'][i] dname = category_index[detectionClass]['name'] print(dname, score) detectionString += dname + '_' #print(dateString, detectionString) fname = dateString + detectionString + self.camId if detectionString: print(dateString, detectionString) #if enough time has elapsed save an image if time.time( ) - self.lastImageSaveTime > self.imageSaveDeltaSeconds: self.lastImageSaveTime = time.time() #save an image self.camera.capture(recording_folder + '/' + fname + '.jpg') print('time to save an image add the code') #if the video loop flag is unset and enough time has passed set it #don't actually save the video here if not self.videoLoopFlag: if time.time( ) - self.lastVideoSaveTime > self.videoLoopSeconds: self.videoLoopFlag = 1 self.saveVideoAtTime = time.time() + ( self.videoLoopSeconds - self.videoPreRecordSeconds) self.videoName = dateString + detectionString + self.camId #lets save a video if it's time if self.videoLoopFlag and (time.time() > self.saveVideoAtTime): self.videoLoopFlag = 0 #camera.wait_recording(self.videoLoopSeconds) self.stream.copy_to(recording_folder + '/' + self.videoName + '.h264') print('time to save a video ', self.videoName) # Draw labeled bounding boxes for any detected objects whose score is greater than 0.3 vis_util.visualize_boxes_and_labels_on_image_array( frame, output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores'], category_index, instance_masks=output_dict.get('detection_masks'), use_normalized_coordinates=True, line_thickness=6, min_score_thresh=0.3) # Debug # from matplotlib import pyplot # pyplot.imshow(frame) # pyplot.savefig("test.png") # putting this in the main loop and requiring it for every frame # could faster, but this is low hanging fruit. Does not respond # well to network hiccups # Update the brake based on the detection and distance logic # send_brake does not send a signal to the light if not needed (i.e. no state change) # NOTE: Here we've used a static threshold for both object distance and the detection score for when to signal # the brake because those have seemed to work well in our testing. Obviously we don't have a real vehicle # to test this on, but if we did, then we could easily adjust this behavior accordingly. For example, # we could make the score threshold based on the object distance, or store the last distance measurement # or two (using variables on our TensorCamera class) to determine whether and how fast an object was # approaching. brake = 'break string' self._frame.truncate(0) # Invert the frame to RGB and then encode it into # a png that can be transmitted as raw bytes to be displayed on the head unit rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) (_, buf) = cv2.imencode('.png', rgb_frame) if detectionString: break_string = detectionString.replace('_', ' ') else: break_string = 'NA' # return the dashboard payload return {'image': buf.tobytes(), 'distance': "", 'brake': break_string}
class Recorder: def __init__(self, camera, storage, h264_args, temporary_recordings_output_path="./temp_recordings/", record_seconds_after_motion=12, max_recording_seconds=600, record_seconds_before_motion=5, ffmpeg_path="/usr/local/bin/ffmpeg", convert_h264_to_mp4=True): self.camera = camera self.storage = storage self.h264_args = h264_args self.temporary_recordings_output_path = temporary_recordings_output_path self.record_seconds_after_motion = record_seconds_after_motion self.max_recording_seconds = max_recording_seconds self.timer = 0 self.record_seconds_before_motion = record_seconds_before_motion self.ffmpeg_path = ffmpeg_path self.convert_h264_to_mp4 = convert_h264_to_mp4 # Make sure PiCameraCircularIO contains at least 20 seconds of footage. Since this is the minimum for it work. if record_seconds_before_motion > 20: delayed_storage_length_seconds = record_seconds_before_motion else: delayed_storage_length_seconds = 20 # Create the delayed frames stream. self.delayed_recording_stream = PiCameraCircularIO( self.camera, seconds=delayed_storage_length_seconds) # For some reason the PiCameraCircularIO has to be on splitter_port 1. Splitter port 2 or 3 doesn't work. self.camera.start_recording(self.delayed_recording_stream, splitter_port=1, **h264_args) # Method to call when there is motion. # This will start the recording if it hadn't already been started. # Extend the recording if the recording has already started. def report_motion(self): if self.timer == 0: self.timer = self.record_seconds_after_motion self._start_recording() else: self.timer = self.record_seconds_after_motion # Starts the recording. def _start_recording(self): # Create the filename and path. current_time_string = str(datetime.datetime.now())[11:13] + "-" + str(datetime.datetime.now())[14:16] \ + '-' + str(datetime.datetime.now())[17:19] if not os.path.isdir( os.path.join(get_exec_dir(), self.temporary_recordings_output_path)): os.mkdir( os.path.join(get_exec_dir(), self.temporary_recordings_output_path)) output_file_name = os.path.join(get_exec_dir(), self.temporary_recordings_output_path, current_time_string) print('Started recording ' + output_file_name) # record the frames "after" motion self.camera.split_recording(output_file_name + '_after.h264', splitter_port=1, seconds=10) # Write the 10 seconds "before" motion to disk as well self.delayed_recording_stream.copy_to( output_file_name + '_before.h264', seconds=self.record_seconds_before_motion) # Clear the delayed recording stream. self.delayed_recording_stream.clear() threading.Thread(target=self._start_countdown, args=(output_file_name, ), daemon=True).start() # Starts counting down from record_seconds_after_movement after movement is detected. # Stop recording if the timer gets to 0. def _start_countdown(self, output_file_name): self.timer = self.record_seconds_after_motion recorded_time = 0 while self.timer > 0 and not recorded_time > self.max_recording_seconds: time.sleep(1) recorded_time += 1 self.timer -= 1 # split the recording back to the delayed frames stream. self.camera.split_recording(self.delayed_recording_stream, splitter_port=1) # Merge the two recordings. file_path = self._merge_recordings(output_file_name) # Put the h264 recording into an mp4 container. if self.convert_h264_to_mp4: file_path = self._put_in_mp4_container(file_path) # Store the recording in the right place. self.storage.store(file_path) # Merge the two h264 recordings and delete the old h264 files. def _merge_recordings(self, output_file_name): with open(output_file_name + "_before.h264", 'rb') as before: with open(output_file_name + "_after.h264", 'rb') as after: with open(output_file_name + ".h264", 'ab') as new: new.write(before.read()) new.write(after.read()) # Remove the separate files. try: os.remove(output_file_name + "_before.h264") os.remove(output_file_name + "_after.h264") except Exception as e: print(e) return output_file_name + ".h264" # Put the h264 recording into an mp4 container. def _put_in_mp4_container(self, file_path): output_file_path = file_path.replace("h264", "mp4") # ffmpeg -i "before.h264" -c:v copy -f mp4 "myOutputFile.mp4" subprocess.call([ '{}'.format(self.ffmpeg_path), '-i', '{}'.format(file_path), '-c:v', 'copy', '-f', 'mp4', '{}'.format(output_file_path) ], stdin=subprocess.PIPE) # Remove h264 file try: os.remove(file_path) except Exception as e: print(e) return output_file_path