def click(): """Take a picture! You can customize this function to do whatever you want to the picture. Run it from the terminal with python3 this_script.py """ try: camera = start_camera() camera.annotate_background = Color('pink') camera.annotate_foreground = Color('blue') camera.annotate_text_size = 30 date_str = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S') camera.annotate_text = "Took a picture at " camera.annotate_text += date_str camera.image_effect = 'none' # choices include none, negative, solarize, sketch, denoise, # emboss, oilpaint, hatch, gpen, pastel, watercolor, film, blur, # saturation, colorswap, washedout, posterise, colorpoint, cartoon sleep(2) # wait for the camera to be ready camera.capture(get_full_image_name('img_%s.jpg' % date_str)) print('You took a photo!') finally: # even if picture fails, make sure the next person can use the camera camera.close()
def capture(now): try: camera.resolution = (3280, 2464) # less resolution but full sensor 1640x1232 # full resolution: 3280x2464 # off auto night nightpreview backlight spotlight sports snow beach verylong fixedfps antishake fireworks camera.exposure_mode = 'antishake' # off auto sunlight cloudy shade tungsten fluorescent incandescent flash horizon camera.awb_mode = 'horizon' # init subtitle camera.annotate_text_size = 50 camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') date_time = now.strftime("%Y-%m-%d_%H-%M-%S-%f") print('date: ' + date_time) camera.annotate_text = "HORIZONCAM: " + date_time camera.capture('/home/pi/shots/horizon_' + date_time + '.jpg') #for speed in speedlist: #setting shutter speed in microseconds #camera.shutter_speed = speed #print(speed) #camera.capture('/home/pi/shots/earthcam_' + date_time + '(' + str(speed) + ').jpg') finally: camera.close
def _initialize_camera(config_camera): logging.info("Initialize Camera") # Parse configuration options resolution_height = config_camera["resolution_height"] resolution_width = config_camera["resolution_width"] annotate_text_size = config_camera["annotate_text_size"] annotate_text = config_camera["annotate_text"] annotate_foreground = config_camera["annotate_foreground"] annotate_background = config_camera["annotate_background"] camera = PiCamera() logging.debug("Image Resolution: Height=[%s]; Width=[%s]" % (resolution_height, resolution_width)) camera.resolution = (resolution_height, resolution_width) # turn camera to black and white camera.color_effects = (128, 128) camera.contrast = config_camera["contrast"] camera.brightness = config_camera["brightness"] camera.exposure_mode = 'auto' if config_camera["annotate"]: logging.debug("Set annotate text size to [%s]" % (annotate_text_size)) camera.annotate_text_size = annotate_text_size camera.annotate_foreground = Color(annotate_foreground) camera.annotate_background = Color(annotate_background) text = ' ' + annotate_text + ' ' logging.debug("Annotate text is [%s]" % (text)) camera.annotate_text = text logging.info("Start Camera Preview") #camera.start_preview() return camera
def videoRecording(totalTime): camera = PiCamera() currentTime = datetime.now() currentTime = currentTime.strftime("%Y:%m:%d:%I:%M:%S") print("Start time:" + currentTime) camera.annotate_background = Color("green") camera.annotate_foreground = Color("yellow") camera.annotate_text = "start time: " + currentTime camera.annotate_text_size = 50 currentTime = datetime.now() currentTime = currentTime.strftime("%Y:%m:%d:%I:%M:%S") startTime = currentTime print("Video start:" + startTime) camera.start_preview() camera.start_recording("./allVideoRecording/%s.h264" % startTime) sleep(totalTime) camera.stop_recording() camera.stop_preview() currentTime = datetime.now() currentTime = currentTime.strftime("%Y:%m:%d:%I:%M:%S") print("Video end:" + currentTime) camera.close() return startTime
def update_annotation(self): while self.camera.recording: self.camera.annotate_background = Color('blue') self.camera.annotate_foreground = Color('yellow') self.camera.annotate_text = datetime.datetime.now().strftime("%Y%m%d, %H:%M:%S") self.camera.annotate_text_size = 20 sleep(1)
def main(): rotation = 0 sleeptime = 3 run_program = True try: opts, args = getopt.getopt(sys.argv[1:], "r:s:", ["rotation=", "sleeptime="]) except getopt.GetoptError as err: print(str(err)) sys.exit(2) for opt, arg in opts: if opt in ("-r", "--rotation"): rotation = arg elif opt in ("-s", "--sleeptime"): sleeptime = arg print("Rotation : " + str(rotation)) print("Sleeptime : " + str(sleeptime)) while run_program: try: camera = PiCamera() camera.rotation = int(rotation) # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes #camera.resolution = (3280, 2464) camera.resolution = (1600, 1200) #camera.resolution = (1920, 1080) # 3280x2464 which is 4:3 camera.annotate_text_size = 64 camera.annotate_background = Color('black') camera.annotate_foreground = Color('white') for i in range(7200): sleep(int(sleeptime)) filenamestamp = datetime.datetime.fromtimestamp( time.time()).strftime('%Y_%m_%d_%H_%M_%S') annotationstamp = datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d %H:%M:%S') camera.annotate_text = annotationstamp + ' ' + str(i) # Capture options: # - quality has range from 1 to 100, default is 85. 1 is low quality and 100 is high quality camera.capture('/home/pi/github/ip-cam/upl/%s_py.jpeg' % filenamestamp, resize=(1600, 1200), quality=20) #camera.capture('/home/pi/github/ip-cam/upl/%s_py.jpeg' % filenamestamp, quality=20) #camera.capture('/home/pi/github/ip-cam/upl/%s_py.jpeg' % filenamestamp, quality=10) else: camera.close() except KeyboardInterrupt: print("Exception: ", sys.exc_info()) run_program = False except: print("Exception: ", sys.exc_info()) finally: camera.close() if run_program: print("Restart program.") print("End program.")
def __init__(self): # configuration data_dir = '/home/pi/rpi/projects/rpicam/data' self.config = CameraRoomConfig() if isfile(config_filename): self.config.load(config_filename) self.config.video_dir = data_dir + '/' + self.config.data[ 'project_name'] self.config.clip_length = 10 self.config.record_countdown = 3 self.config.replay_count = 3 self.config.live_mode_idle_timeout = 30 # time that it sits on the "press button to record" message until it goes back to playing past clips self.config.video_replay_msg_wait = 2 # time that the 'video will be replayed' message appears in between plays for the new recording self.config.default_fgcolor = 'yellow' self.config.default_bgcolor = 'blue' self.config.default_camera_text_size = self.config.data[ 'annotate_text_size'] self.config.screen_width = self.config.data['resolution'][0] self.config.screen_height = self.config.data['resolution'][1] self.config.background_color_rgb = (0, 0, 0) self.config.text_color_rgb = (255, 255, 255) # init logging logging.basicConfig( format= '%(asctime)-15s:%(levelname)s:%(filename)s#%(funcName)s(): %(message)s', level=logging.DEBUG, filename='/home/pi/rpi/projects/rpicam/log/camera-room.log') logging.debug("begin main") # init storage if not isdir(self.config.video_dir): makedirs(self.config.video_dir) # init subproc handle self.subproc = None self.devnull = open(os.devnull, "w") # initialize screen display pygame.init() pygame.mouse.set_visible(False) self.font = pygame.font.Font(None, 52) logging.debug("Initializing display at %sx%s" % self.config.data['resolution']) self.display = pygame.display.set_mode(self.config.data['resolution'], pygame.FULLSCREEN) self.display_message("initializing camera...") # initialize camera self.camera = PiCamera(framerate=30, resolution=self.config.data['resolution']) self.camera.rotation = self.config.data['rotation'] self.camera.annotate_background = Color(self.config.default_bgcolor) self.camera.annotate_foreground = Color(self.config.default_fgcolor) self.camera.annotate_text_size = self.config.default_camera_text_size # apply config to camera if isfile(config_filename): self.config.setCamera(self.camera) self.config.apply() # initialize keyboard listener self.enter_pressed = False self.listener = Listener(on_release=self.on_release) self.listener.start()
def capture(self): tstamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') self.fname = self.path + tstamp + '.jpg' self.camera.annotate_text = datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S') self.camera.annotate_text_size = 36 self.camera.annotate_foreground = Color('black') self.camera.annotate_background = Color('white') self.camera.capture(self.fname)
def __init__(self): self.camera = picamera.PiCamera() self.camera.resolution = (1920, 1080) self.camera.framerate = 15 self.camera.annotate_text = "Drone Video" self.camera.annotate_background = Color('blue') self.camera.annotate_foreground = Color('yellow')
def update_annotation(self): while self.camera.recording: if self.timestamp == True: self.camera.annotate_background = Color(self.timestamp_bgcolor) self.camera.annotate_foreground = Color( self.timestamp_fontcolor) self.camera.annotate_text = datetime.datetime.now().strftime( self.timestamp_format) self.camera.annotate_text_size = self.timestamp_fontsize sleep(1)
def start_camera_preview(): # camera.start_preview(alpha=100) camera.start_preview() # camera.brightness = 50 # camera.contrast=50 camera.annotate_background = Color('black') camera.annotate_foreground = Color('white') camera.annotate_text = " Recording in progress " sleep(5) camera.stop_preview()
def annotation(string, size): cam.annotate_text = string cam.annotate_text_size = size cam.annotate_background = Color('blue') cam.annotate_foreground = Color('yellow') cam.start_preview() cam.capture('/home/pi/parth/effect_annotation_size.jpg') time.sleep(3) cam.stop_preview() return
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_path', required=True, help='Path to converted model file that can run on VisionKit.') parser.add_argument('--label_path', required=True, help='Path to label file that corresponds to the model.') parser.add_argument('--input_height', type=int, required=True, help='Input height.') parser.add_argument('--input_width', type=int, required=True, help='Input width.') parser.add_argument('--input_layer', required=True, help='Name of input layer.') parser.add_argument('--output_layer', required=True, help='Name of output layer.') parser.add_argument('--num_frames', type=int, default=None, help='Sets the number of frames to run for, otherwise runs forever.') parser.add_argument('--input_mean', type=float, default=128.0, help='Input mean.') parser.add_argument('--input_std', type=float, default=128.0, help='Input std.') parser.add_argument('--input_depth', type=int, default=3, help='Input depth.') parser.add_argument('--threshold', type=float, default=0.1, help='Threshold for classification score (from output tensor).') parser.add_argument('--top_k', type=int, default=3, help='Keep at most top_k labels.') parser.add_argument('--preview', action='store_true', default=False, help='Enables camera preview in addition to printing result to terminal.') parser.add_argument('--show_fps', action='store_true', default=False, help='Shows end to end FPS.') args = parser.parse_args() model = inference.ModelDescriptor( name='mobilenet_based_classifier', input_shape=(1, args.input_height, args.input_width, args.input_depth), input_normalizer=(args.input_mean, args.input_std), compute_graph=utils.load_compute_graph(args.model_path)) labels = read_labels(args.label_path) with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera: if args.preview: camera.start_preview() with inference.CameraInference(model) as camera_inference: for result in camera_inference.run(args.num_frames): processed_result = process(result, labels, args.output_layer, args.threshold, args.top_k) send_signal_to_servos(processed_result[0]) message = get_message(processed_result, args.threshold, args.top_k) if args.show_fps: message += '\nWith %.1f FPS.' % camera_inference.rate print(message) if args.preview: camera.annotate_foreground = Color('black') camera.annotate_background = Color('white') # PiCamera text annotation only supports ascii. camera.annotate_text = '\n %s' % message.encode( 'ascii', 'backslashreplace').decode('ascii') if args.preview: camera.stop_preview()
def configureCamera(self, camera): camera.annotate_foreground = Color('white') camera.annotate_background = Color('black') camera.resolution = (IMG_WIDTH, IMG_HEIGHT) if self.doorStatus == DoorStatus.CLOSED: camera.contrast = 100 camera.brightness = 80 camera.framerate = Fraction(1, 6) camera.iso = 800 camera.exposure_mode = 'night' camera.shutter_speed = 4000000 time.sleep(5)
def camContrast(): camera.start_preview() camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') for i in range(-100, 100): camera.contrast = i camera.annotate_text = "Contrast = " + str(i) sleep(0.15) camera.stop_preview() camera.contrast = 0 camera.annotate_background camera.annotate_foreground
def camBrightness(): camera.start_preview() camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') for i in range(100): camera.brightness = i camera.annotate_text = "Brightness = " + str(i) sleep(0.15) camera.stop_preview() camera.brightness = 50 camera.annotate_background camera.annotate_foreground
def camera_pic(): try: #alpha: Preview halb-durchsichtig starten (0-255) alpha = 200 camera.start_preview(fullscreen=True) # Text konfigurieren camera.annotate_text_size = 160 #6-160 camera.annotate_background = Color('black') camera.annotate_foreground = Color('white') # Kamera wartet 2 Sekunden, bevor der Countdown startet time.sleep(2) # Countdown läuft runter 5.. 4.. for i in range(5, 2, -1): camera.annotate_text = "%s" % i time.sleep(.5) camera.annotate_text = "" # Pfad für die Bilder erstellen pfad_temp = pfad_pics + '/pics_session' os.mkdir(pfad_temp) # bei 3 startet die Kamera mit der Aufnahme for i in range(num_pic): camera.capture(pfad_temp + '/image{0:02d}.jpg'.format(i)) motor.forward(0.001, 25) time.sleep(0.2) #Preview wird beendet camera.stop_preview() #motor zurücksetzen motor.backwards(0.001, 100) motor.setStep(0, 0, 0, 0) # Funktion gif aufrufen, temporären Pics-session-Ordner übergeben pfad_gif_return = gif(pfad_temp) # Inhalt des Pics-session-Ordner löschen for root, dirs, files in os.walk(pfad_temp, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) # Pics-session-Ordner löschen os.rmdir(pfad_temp) return pfad_gif_return except KeyboardInterrupt: #bei Unterbrechen des Programms mittels ctrl+c wird die preview beendet camera.stop_preview()
def get_image(): # camera.start_preview() # activate camera # sleep(3) # PiCamera documentation recommends at least 2 seconds to allow auto adjust now = strftime('%m%d%Y-%H%M') # output is MMDDYYYY-HHMM camera.resolution = (1920, 1080) # set resolution camera.brightness = 50 # set brightness camera.annotate_text_size = 30 camera.annotate_foreground = Color('white') camera.annotate_background = Color('black') log() # get fresh readings before taking photo camera.annotate_text = now + Cr + Tep + Cr + Hum + Cr + Pres # text overlay goes here filename = "/home/pi/tlapse/" + now + ".jpg" # output is MMDDYYYY-HHMMSS.jpg camera.capture(filename) # take the photo and save as filename
def take_photo(): ####################################### #Initialization ####################################### mycamera.start_preview() mycamera.annotate_background = Color('white') mycamera.annotate_foreground = Color('red') mycamera.annotate_text = " SWS3009B - 2019" sleep(5) name_time = time.ctime() picture_name = '/home/pi/Desktop/camera/cat' + str(name_time) + '.jpg' mycamera.capture(picture_name) mycamera.stop_preview()
def takePicture(fileName): camera.resolution = (1920, 1080) camera.start_preview() camera.image_effect = "film" camera.annotate_background = Color('black') camera.annotate_foreground = Color('yellow') camera.contrast = 100 camera.iso = 800 camera.saturation = -90 camera.annotate_text_size = 70 camera.annotate_text = "Pi rover view" time.sleep(2) camera.capture(fileName) camera.stop_preview()
def show(): camera = PiCamera() gps = "5sec" camera.hflip = True #camera.brightness = 50 os.system("sudo ./hub-ctrl -h 1 -P 2 -p 1") camera.start_preview() camera.annotate_background = Color('red') camera.annotate_foreground = Color('white') camera.annotate_text = gps sleep(5) camera.stop_preview() os.system("sudo ./hub-ctrl -h 1 -P 2 -p 0") camera.close()
def camPicCapture(numberOfPics, delay): camera.start_preview() camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') camera.annotate_text = " Hello duckpin world " + dt.datetime.now( ).isoformat('0') for i in range(numberOfPics): sleep(delay) camera.annotate_text = " Hello duckpin1 world pic " + dt.datetime.now( ).isoformat('0') camera.capture('/home/pi/Shared/images/aimage%s.jpg' % i) camera.stop_preview() camera.annotate_text = "" camera.annotate_background camera.annotate_foreground
def camVideoCapture(numberOfVideos, videoLength, delayBetweenVideos): camera.start_preview() camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') for i in range(numberOfVideos): sleep(delayBetweenVideos) camera.annotate_text = " Hello duckpin1 world video " + dt.datetime.now( ).isoformat('0') camera.start_recording('/home/pi/Shared/videos/avideo%s.h264' % i) sleep(videoLength) camera.stop_recording() camera.stop_preview() camera.annotate_text = "" camera.annotate_background camera.annotate_foreground
def parseGPS(data, file_name_log): # print "raw:", data #prints raw data if data[0:6] == "$GPRMC": sdata = data.split(",") if sdata[2] == 'V': print "no satellite data available" logging.debug(str(datetime.now()) + 'no satellite data available') return ora = (int(sdata[1][0:2]) + 2) time = str(ora) + ":" + sdata[1][2:4] + ":" + sdata[1][4:6] lat_float = decode(float(sdata[3])) #latitude lat = str(lat_float) dirLat = sdata[4] #latitude direction N/S lon_float = decode(float(sdata[5])) #longitute lon = str(lon_float) dirLon = sdata[6] #longitude direction E/W speed_int = (float(sdata[7]) * 1.852) #Speed in km/h speed = str(speed_int) trCourse = sdata[8] #True course date = sdata[9][0:2] + "/" + sdata[9][2:4] + "/" + sdata[9][4:6] #date entry = date + " , " + time + " , " + lat + " , " + lon + " , " + speed + " , " + str( psutil.cpu_percent()) + " , " + measure_temp() + "\n" thingSpeak(speed) writeLog(entry, file_name_log) camera.annotate_background = Color('black') camera.annotate_text = "Date : %s, time : %s, latitude : %s(%s), longitude : %s(%s),speed : %s, CPU : %s, Temp : %s " % ( date, time, lat, dirLat, lon, dirLon, speed, str(psutil.cpu_percent()), str(measure_temp()))
def __init__(self): self.camera = PiCamera() self.camera.resolution = (1920, 1080) self.camera.framerate = 15 self.camera.annotate_background = Color('black') self.camera.annotate_text = str(datetime.datetime.now()) self.camera.rotation = 180
def takePicture(self): ret = None self.camera.start_preview() # self.camera.annotate_background = Color('black') self.camera.annotate_foreground = Color('white') self.camera.annotate_text_size = 100 self.camera.annotate_text = "\n\n 5 " time.sleep(1) self.camera.annotate_text = "\n\n 4 " time.sleep(1) self.camera.annotate_text = "\n\n 3 " time.sleep(1) self.camera.annotate_text = "\n\n 2 " time.sleep(1) self.camera.annotate_text = "\n\n 1 " time.sleep(1) self.camera.annotate_text = "" time.sleep(1) filename = time.strftime("%y%m%d_%H%M%S") self.camera.capture(self.localDirectory + filename + ".jpg") ret = filename self.camera.stop_preview() return ret
def capturePicture(filename='/tmp/iantest.jpg'): camera = PiCamera() # Heat the camera up camera.rotation = 180 camera.start_preview() camera.annotate_background = Color('blue') camera.annotate_foreground = Color('yellow') camera.annotate_text_size = 20 now = time.strftime("%c") camera.annotate_text = " Serena [TRB]: %s "% now time.sleep(.5) camera.capture(filename) time.sleep(.5) camera.stop_preview() return
def frames(): with picamera.PiCamera() as camera: camera.resolution = (640, 480) #HS camera.framerate = 5 #HS # let camera warm up time.sleep(2) stream = io.BytesIO() camera.annotate_foreground = Color('green') camera.annotate_text = dt.datetime.now().strftime( '%Y-%m-%d %H:%M:%S') for foo in camera.capture_continuous(stream, 'jpeg', use_video_port=True): camera.annotate_text = dt.datetime.now().strftime( '%Y-%m-%d %H:%M:%S') # return current frame stream.seek(0) yield stream.read() # reset stream for next frame stream.seek(0) stream.truncate()
def imageTaker(): GPIO.setmode(GPIO.BCM) # camera = PiCamera() # dir = os.path.dirname(__file__) # camera.resolution = (2592,1944) # camera.framerate = 15 # dateAndTime = datetime.datetime.now() # dateAndTime = dateAndTime.strftime("%m/%d/%Y %H:%M:%S") camera = PiCamera() GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW) time.sleep(5) for effect in camera.AWB_MODES: print(effect) camera.resolution = (2592, 1944) camera.framerate = 15 dateAndTime = datetime.datetime.now() dateAndTime = dateAndTime.strftime("%m/%d/%Y %H:%M:%S") imageName = '/home/pi/Documents/autoGrow/growPics/imageTroubleShoot/' + effect + 'image.png' camera.start_preview() camera.awb_mode = effect camera.annotate_foreground = Color('black') camera.annotate_text_size = 60 camera.annotate_text = dateAndTime camera.rotation = 180 time.sleep(5) camera.capture(imageName) camera.stop_preview() #time.sleep(5) GPIO.setup(26, GPIO.OUT, initial=GPIO.HIGH)
def home(): hostname = gethostname() # If there is no Pi camera available (local dev) always do simple GET returns. if request.method == 'GET' or not PI_CAMERA_AVAILABLE: return render_template('index.html', hostname=hostname, images=get_image_list(), disk_usage=get_disk_usage()) # POST method only gets this far now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') with PiCamera() as camera: camera.annotate_background = Color('green') camera.annotate_text = now image_name = f'image-{now}.jpg' try: camera.capture(os.path.join(STATIC_IMAGE_DIR, image_name), quality=15) except exc.PiCameraMMALError: # Sometimes the camera will already be in use from something else time.sleep(3) try: camera.capture((STATIC_IMAGE_DIR, image_name), quality=15) except Exception as e: app.logger.error('Error taking image.') return render_template('index.html', hostname=hostname, images=get_image_list(), image_name=image_name, disk_usage=get_disk_usage())