def cifferDetection(image_height, image_width, image_queue): start_time = time() with PiCamera() as camera: camera.resolution = (image_height, image_width) camera.framerate = 5 camera.exposure_mode = 'sports' with array.PiRGBArray(camera) as stream: # Warum Up for Camera' sleep(0.5) while True: capture_time = time() camera.capture(stream, 'rgb', use_video_port=True) # Process array for CNN prediction x = stream.array x = x.reshape((1, ) + x.shape) x = x * (1. / 255) image_queue.append(x) stream.truncate(0) if time() - start_time > 38: break
def init_camera(): camera = PiCamera() output = array.PiRGBArray(camera) camera.rotation = 0 # Max res for camera (2592, 1944) camera.resolution = res sleep(5) return camera, output
def getImgArray(self, asNDArray): output = pca.PiRGBArray(self.Acamera) self.Acamera.capture(output, 'rgb') if asNDArray: return output.array else: return output
def getImgArray(asNDArray): output = pca.PiRGBArray(camera1) camera1.capture(output,'rgb') if asNDArray: return output.array else: return output
def cumshot_rgb(): camera = PiCamera() stream = array.PiRGBArray(camera) camera.resolution = (32, 32) camera.start_preview() time.sleep(2) camera.capture(stream, 'rgb') print("RGB RGB") print(stream.array.shape) print(ddd_to_int(stream.array)) return ddd_to_int(stream.array)
def getImgArray(self, asNDArray): print("befor output") output = pca.PiRGBArray(self.C_camera) print("after output") print("\n") print("befor Capture") self.C_camera.capture(output, 'rgb') print("after capture") if asNDArray: return output.array else: return output
def get_image_fast_max(self): """ Acquire a quick image using the RGBarray method - good for auto-exposure, but not proper image analysis! """ #print 'Getting image...', st = time.time() self.resolution = (int(self.Hpixels / 2), int(self.Vpixels / 2)) with camarray.PiRGBArray(self) as stream: self.capture(stream, 'rgb') image = stream.array print('Elapsed time (get_image_fast):', time.time() - st) print('\tImage maximum value:', image[:, :, 0].max() * 4) return image[:, :, 0].max() * 4
def _stream_to_ros(self, camera, size): with piCamArray.PiRGBArray(camera, size=size) as stream: while self.power_on and not rospy.is_shutdown(): yield stream snap_time = rospy.Time.now( ) # - rospy.Duration(0.025) # delay to handle sync error image = self.undistort( stream.array) if UNDISTORT else stream.array self.camera_info.header.stamp = snap_time self.publish_standard_camera(image, snap_time) # image with camera world transform for localization if self.skip_count < 1: self.publish_image_with_stable_transform(image, snap_time) self.skip_count = 5 self.skip_count -= 1 stream.truncate(0)
def run(self): with array.PiRGBArray(self._camera) as output: while (self._running): self._camera.capture(output, 'rgb', use_video_port=True) if self._dedectGreenLight: self.isGreen = self._greenLightDedection.greenLightDedected( output) if self.isGreen: self.stopGreenlightDedection() if self._dedectRomanNumber: self.imageQueue.put(output) output.truncate(0) sleep(0.01)
def _stream_raw_to_file(self, camera, size): with piCamArray.PiRGBArray(camera, size=size) as stream: frame_num = 0 while frame_num < 100: yield stream print("Frame", frame_num) snap_time = rospy.Time.now( ) # - rospy.Duration(0.025) # delay to handle sync error image = stream.array self.camera_info.header.stamp = snap_time self.publish_standard_camera(image, snap_time) frame_num += 1 self.output = io.open( 'calibration/calibrate%02d.jpg' % frame_num, 'wb') self.output.write(cv2.imencode(".jpg", image)[1]) self.output.close() stream.truncate(0) self.turn_off()
def crosslightDetection(image_height, image_width, model): with PiCamera() as camera: camera.resolution = (image_height, image_width) camera.framerate = 3 sleep(1) start_time = 0 red_counter = 0 with array.PiRGBArray(camera) as output: while True: camera.capture(output, 'rgb', use_video_port=True) x = output.array x = x.reshape((1, ) + x.shape) x = x * (1. / 255) prediction = np.around(model.predict(x), 3)[0][0] #print(prediction) if prediction > 0.80: red_counter += 1 if red_counter == 5: display.zero() start_time = time() if time() - start_time > 15 and red_counter > 5: print("0") sys.stdout.flush() break if (prediction < 0.20 and red_counter > 5): print("0") sys.stdout.flush() break output.truncate(0)
def take_and_process_picture(): """ 1. a local stream for picture saving is initialised 2. camera resolution is set and picture captured into stream 3. the captured image is read from the stream and split into the two sides 4. the (now) two pictures are resized to the size of the field 5. the mask for the hands is created 6. the pictures are reduced to the hand positions 7. the background/ foreground noise is reduced 8. the pictures are converted in gray tones 9. the contours of the hands are searched and saved 10. the middle of the contours is determined 11. the new player positions are set :return: True when both players hands were found - important for standby() when registering players """ player1_reg = False player2_reg = False # code oriented at: # https://raspberrypi.stackexchange.com/questions/24232/picamera-taking-pictures-fast-and-processing-them # capture picture into stream stream = array.PiRGBArray(camera) camera.resolution = (640, 320) # (x,y) # camera.start_preview() # sleep(0.5) camera.capture(stream, format="rgb", use_video_port=True) # camera.stop_preview() # turn data into cv2 image # print(stream.array[0].size / 3, stream.array.size / stream.array[0].size) img = np.frombuffer(stream.getvalue(), dtype=np.uint8).reshape(320, 640, 3) # np.fliplr(img) # split in picture into two sides x = img[0].size / 3 x = x if x % 2 == 0 else x - 1 x = int(0.5 * x) max_y = img.size / img[0].size left = img[0:max_y, 0:x] right = img[0:max_y, x + 1:img[0].size / 3] # Resizing the images and convert them to HSV values for better recognition left = cv2.resize(left, (32, 32)) right = cv2.resize(right, (32, 32)) # Defining the skin color range and calculating if these values lie in that red_lower = np.array([160, 150, 140], np.uint8) red_upper = np.array([200, 190, 190], np.uint8) # because of light reasons it's easier to detect a light # red_lower = np.array([200, 200, 200], np.uint8) # red_upper = np.array([255, 255, 255], np.uint8) # get the mask left_mask = cv2.inRange(left, red_lower, red_upper) right_mask = cv2.inRange(right, red_lower, red_upper) # apply mask left_hand = cv2.bitwise_and(left, left, mask=left_mask) right_hand = cv2.bitwise_and(right, right, mask=right_mask) # cleans the skin colour space, extracting noise making it smaller # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html kernel = np.ones((3, 3), "uint8") # outside noise cancelling left_hand = cv2.morphologyEx(left_hand, cv2.MORPH_OPEN, kernel) right_hand = cv2.morphologyEx(right_hand, cv2.MORPH_OPEN, kernel) # inside noise cancelling left_hand = cv2.morphologyEx(left_hand, cv2.MORPH_CLOSE, kernel) right_hand = cv2.morphologyEx(right_hand, cv2.MORPH_CLOSE, kernel) # making it smaller left_hand = cv2.erode(left_hand, kernel) right_hand = cv2.erode(right_hand, kernel) left_hand = cv2.cvtColor(left_hand, cv2.COLOR_RGB2GRAY) right_hand = cv2.cvtColor(right_hand, cv2.COLOR_RGB2GRAY) _, left_hand = cv2.threshold(left_hand, 150, 255, cv2.THRESH_BINARY) _, right_hand = cv2.threshold(right_hand, 150, 255, cv2.THRESH_BINARY) _, left_contours, _ = cv2.findContours( left_hand, 1, 2) # cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) _, right_contours, _ = cv2.findContours( right_hand, 1, 2) # cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # change center of player1 and player2 # https://www.pyimagesearch.com/2016/02/01/opencv-center-of-contour/ if not left_contours == []: print("player 1 detected!") middle = cv2.moments(left_contours[0]) if middle["m00"] != 0: xt = player1.x[0] yt = player1.y[0] x = int(middle["m10"] / middle["m00"]) y = int(middle["m01"] / middle["m00"]) player1.set_position(x if not x == 0 else xt, y if not y == 0 else yt) player1_reg = True else: print("player 1 not detected.") if not right_contours == []: print("player 2 detected!") middle = cv2.moments(right_contours[0]) if middle["m00"] != 0: xt = player2.x[0] yt = player2.y[0] x = int(middle["m10"] / middle["m00"]) y = int(middle["m01"] / middle["m00"]) player2.set_position(x + 32 if not x == 0 else xt, y if not y == 0 else yt) player2_reg = True else: print("player 2 not detected.") cv2.destroyAllWindows() return player1_reg & player2_reg
## takes a single image and converts it to a cv2 image (ready for image processing) from picamera import PiCamera, array from time import sleep import socket, pickle import numpy as np import cv2 import copy with PiCamera() as camera: with array.PiRGBArray(camera) as output: camera.resolution = (320, 240) camera.framerate = 24 sleep(.25) current_img = np.empty((240 * 320 * 3), dtype=np.uint8) camera.capture(current_img, 'bgr') current_img = current_img.reshape((240, 320, 3)) cv2.imwrite('image_simple.jpg', current_img) # CONSTANTS -- right now a blueish image # #TODO: determine good bounds for our nav. object (& figure out nav. object) RED_LOW = 0 RED_HIGH = 25 GREEN_LOW = 25 GREEN_HIGH = 100 BLUE_LOW = 0 BLUE_HIGH = 255 target_pixel_count = 0 total_count = 240 * 320
def run_camera(config_file_name): config = configparser.ConfigParser() config.read(config_file_name) cam_mode = int(config['Recording']['sensor_mode']) print(cam_mode) MAX_WIDTH = float(config['Recording']['sensor_width']) MAX_HEIGHT = float(config['Recording']['sensor_height']) cam_width = float(config['Recording']['zoom_w']) * MAX_WIDTH cam_height = float(config['Recording']['zoom_h']) * MAX_HEIGHT bg_scale_factor = float(config['Background']['scale_factor']) bg_width = round((cam_width * bg_scale_factor) / 32) * 32 bg_height = round((cam_height * bg_scale_factor) / 16) * 16 bg = Background(float(config['Background']['alpha']), int(config['Background']['diff_threshold']), float(config['Background']['area_threshold']), int(config['Background']['delay']), bg_width, bg_height) led_green = LED(16) led_yellow = LED(20) with PiCamera(sensor_mode=cam_mode, resolution=(int(MAX_WIDTH), int(MAX_HEIGHT))) as cam: cam.framerate = int(config['Recording']['framerate']) cam.iso = int(config['Recording']['iso']) cam.zoom = (float(config['Recording']['zoom_x']), float(config['Recording']['zoom_y']), float(config['Recording']['zoom_w']), float(config['Recording']['zoom_h'])) cam.exposure_compensation = int( config['Recording']['exposure_compensation']) cam.color_effects = (128, 128) # window parameter doesnt work as API says. window remains small and position is changed with different widths and heights #cam.start_preview(fullscreen=False, window=(0, 0, int(cam_width/2), int(cam_height/2))) preview = cam.start_preview(fullscreen=False, window=(0, 10, int(cam_width), int(cam_height))) print('auto', config['Recording']['exposure_mode']) cam.exposure_mode = str(config['Recording']['exposure_mode']) #str(config['Recording']['exposure_mode']) cam.awb_mode = config['Recording']['awb_mode'] cam.shutter_speed = int(config['Recording']['shutter_speed']) #time.sleep(20) print("Exposure : {}".format(cam.exposure_speed)) recording = False #cam.wait_recording(5) last_split = time.time() still = None filename = "" with array.PiRGBArray(cam, size=(int(bg_width), int(bg_height))) as output: cam.capture(output, 'rgb', use_video_port=True, resize=(int(bg_width), int(bg_height))) still = output.array[:, :, 0] time.sleep(5) output.truncate(0) while True: cam.capture(output, 'rgb', use_video_port=True, resize=(int(bg_width), int(bg_height))) # tell world we're still alive led_green.toggle() still = output.array[:, :, 0] bg.update_bg(still) output.truncate(0) if (not bg.is_active()) and recording: led_yellow.off() print('move file') os.rename( filename, config['Recording']['video_dir'] + "/" + filename) cam.stop_recording() recording = False elif bg.is_active() and not recording: led_yellow.on() print('start recording') filename = config['General'][ 'feeder_id'] + '_' + datetime.datetime.utcnow( ).strftime("%Y-%m-%d-%H-%M-%S") + '.h264' cam.start_recording(filename, resize=(int(cam_width), int(cam_height)), quality=20) recording = True last_split = time.time() elif time.time() - last_split > int( config['Recording']['video_length']) and recording: print('split recording') filename_new = config['General'][ 'feeder_id'] + '_' + datetime.datetime.utcnow( ).strftime("%Y-%m-%d-%H-%M-%S") + '.h264' cam.split_recording(filename_new) last_split = time.time() os.rename( filename, config['Recording']['video_dir'] + "/" + filename) filename = filename_new if recording: cam.wait_recording(float(config['Background']['bg_time'])) else: time.sleep(float(config['Background']['bg_time'])) cam.stop_recording() cam.stop_preview()
#cam.start_preview(fullscreen=False, window=(0, 0, int(cam_width/2), int(cam_height/2))) cam.start_preview(fullscreen=False, window=(0, 0, 640, 480)) print('auto', len(config['Recording']['exposure_mode'])) cam.exposure_mode = str(config['Recording']['exposure_mode']) #str(config['Recording']['exposure_mode']) cam.awb_mode = config['Recording']['awb_mode'] cam.shutter_speed = int(config['Recording']['shutter_speed']) #time.sleep(20) recording = False #cam.wait_recording(5) last_split = time.time() still = None filename = "" with array.PiRGBArray(cam, size=(int(bg_width), int(bg_height))) as output: cam.capture(output, 'rgb', use_video_port=True, resize=(int(bg_width), int(bg_height))) still = output.array[:, :, 0] time.sleep(5) output.truncate(0) while True: cam.capture(output, 'rgb', use_video_port=True, resize=(int(bg_width), int(bg_height))) # tell world we're still alive led_green.toggle()
import threading import time # define the camera and its settings camera_resolution = (400, 300) camera = picamera.PiCamera() camera.rotation = 180 camera.resolution = camera_resolution # define the parameters inaccuracy = 0.10 stop_percentage = 0.8 resolution_middle = camera_resolution[0] / 2 # define the buffer to use for picture processing picture_buffer = pa.PiRGBArray(camera) def get_signal(image): result = imageRecognizer.bottle_detection(image) if result is not None: print result (lowest_x, highest_x) = result middle_point = lowest_x + (highest_x - lowest_x / 2) if (highest_x - lowest_x) / camera_resolution[1] >= stop_percentage: print "[Thread processor]\t: send stopping signal to the queue" return 0 elif middle_point <= resolution_middle - camera_resolution[ 0] * inaccuracy: print "[Thread processor]\t: send go left signal to the queue" return -1