示例#1
0
def picam_mmal(width, height, run, tflite=None):
    camera = mo.MMALCamera()
    camera.outputs[1].framesize = (width, height)
    camera.outputs[1].commit()

    event = Event()

    transform = CameraMO(width, height, run, event, tflite)

    # connect to camera out[1]
    transform.inputs[0].connect(camera.outputs[1])
    # enable the connection
    transform.connection.enable()

    # enable all components
    transform.enable()
    camera.enable()

    # Enable capture
    camera.outputs[1].params[mmal.MMAL_PARAMETER_CAPTURE] = True

    # Wait for event object to be set before returning
    event.wait()

    # Disable capture
    camera.outputs[1].params[mmal.MMAL_PARAMETER_CAPTURE] = False
    # Cleanup
    camera.close()
示例#2
0
    def __init__(self):
        threading.Thread.__init__(self)
        
        self.keepRunning=True
        
        camera = mo.MMALCamera()
        
        camera.outputs[0].framesize = (1920, 1080)
        camera.outputs[0].framerate = 1
        camera.outputs[0].format = mmal.MMAL_ENCODING_RGB24

        camera.control.params[mmal.MMAL_PARAMETER_ISO] = 800
        
        awb = camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
        awb.value = mmal.MMAL_PARAM_AWBMODE_SUNLIGHT
        camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = awb
        
        camera.outputs[0].commit()
        
        self.camera = camera

        self.set_shutter_speed(300)
                
        p = r.pubsub(ignore_subscribe_messages=True)

        p.subscribe(**{messages.STOP_ALL:self.stop_all_handler,
                        messages.CMD_SET_SHUTTER_SPEED : self.set_shutter_speed_handler})

        self.thread = p.run_in_thread(sleep_time = 0.01)
示例#3
0
def main(output_filename):
    camera = mo.MMALCamera()
    preview = mo.MMALRenderer()
    encoder = mo.MMALVideoEncoder()
    clock = ClockSplitter()
    target = mo.MMALPythonTarget(output_filename)

    # Configure camera output 0
    camera.outputs[0].framesize = (640, 480)
    camera.outputs[0].framerate = 24
    camera.outputs[0].commit()

    # Configure H.264 encoder
    encoder.outputs[0].format = mmal.MMAL_ENCODING_H264
    encoder.outputs[0].bitrate = 2000000
    encoder.outputs[0].commit()
    p = encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE]
    p.profile[0].profile = mmal.MMAL_VIDEO_PROFILE_H264_HIGH
    p.profile[0].level = mmal.MMAL_VIDEO_LEVEL_H264_41
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE] = p
    encoder.outputs[0].params[
        mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER] = True
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_INTRAPERIOD] = 30
    encoder.outputs[0].params[
        mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT] = 22
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT] = 22
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT] = 22

    # Connect everything up and enable everything (no need to enable capture on
    # camera port 0)
    clock.inputs[0].connect(camera.outputs[0])
    preview.inputs[0].connect(clock.outputs[0])
    encoder.inputs[0].connect(clock.outputs[1])
    target.inputs[0].connect(encoder.outputs[0])
    target.connection.enable()
    encoder.connection.enable()
    preview.connection.enable()
    clock.connection.enable()
    target.enable()
    encoder.enable()
    preview.enable()
    clock.enable()
    try:
        sleep(10)
    finally:
        # Disable everything and tear down the pipeline
        target.disable()
        encoder.disable()
        preview.disable()
        clock.disable()
        target.inputs[0].disconnect()
        encoder.inputs[0].disconnect()
        preview.inputs[0].disconnect()
        clock.inputs[0].disconnect()
示例#4
0
    def __init__(self):
        self.camera = mmalobj.MMALCamera()  # camera instance
        self.encoder = mmalobj.MMALImageEncoder()  # encoder instance
        self.framesize = (1640, 1232)  # Highest resolution, FPS ratio
        self.q = Queue()  # Queue that holds images

        # Debug variables
        self.t_start = 0
        self.t_stop = 0
        self.ts_now = 0
        self.ts_last = 0
        self.counter = 0
示例#5
0
    def __init__(self, width, height):
        BaseCamera.__init__(self, width, height)

        self.camera = mo.MMALCamera()
        self.camera.outputs[1].framesize = (self.w, self.h)
        self.camera.outputs[1].commit()

        self.transform = self.CameraMO(self.w, self.h)

        # connect to camera out[1]
        self.transform.inputs[0].connect(self.camera.outputs[1])
        # enable the connection
        self.transform.connection.enable()

        # enable all components
        self.transform.enable()
        self.camera.enable()
示例#6
0
def main(output_filename):
    camera = mo.MMALCamera()
    preview = mo.MMALRenderer()
    encoder = mo.MMALVideoEncoder()
    clock = ClockSplitter()

    # Configure camera output 0
    camera.outputs[0].framesize = (640, 480)
    camera.outputs[0].framerate = 24
    camera.outputs[0].commit()

    # Configure H.264 encoder
    encoder.outputs[0].format = mmal.MMAL_ENCODING_H264
    encoder.outputs[0].bitrate = 2000000
    encoder.outputs[0].commit()
    p = encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE]
    p.profile[0].profile = mmal.MMAL_VIDEO_PROFILE_H264_HIGH
    p.profile[0].level = mmal.MMAL_VIDEO_LEVEL_H264_41
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE] = p
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER] = True
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_INTRAPERIOD] = 30
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT] = 22
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT] = 22
    encoder.outputs[0].params[mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT] = 22
    output = io.open(output_filename, 'wb')
    def output_callback(port, buf):
        output.write(buf.data)
        return bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS)

    # Connect everything up (no need to enable capture on camera port 0)
    clock.connect(camera.outputs[0])
    preview.connect(clock.outputs[0])
    encoder.connect(clock.outputs[1])
    encoder.outputs[0].enable(output_callback)
    try:
        sleep(10)
    finally:
        preview.disconnect()
        encoder.disconnect()
        clock.disconnect()
示例#7
0
import time
#import cv2
import pickle
import csv
import os

#Setting the camera-specific variables

#Breaking into the stream, clocks
global camera
camera = []
global streamOn
streamOn = False
global firstFrame_idx
firstFrame_idx = 0
GPUtimer = mo.MMALCamera()
global outdata
global trialNum
trialNum = 0
global justOff
justOff = False

#Setting up the GPIO interface
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
on_pin = 27
GPIO.setup(on_pin,GPIO.IN)
led_pin = 4
GPIO.setup(led_pin,GPIO.OUT)
GPIO.output(led_pin, GPIO.LOW)
示例#8
0
    def __init__(self):

        self.__resolutions = {
            # 'short': [fps, (width, height)]
            '720p': [30, (1280, 720)],
            '1080p': [24, (1920, 1080)],
        }
        self.__config = ConfigParser()

        self.__config.read("/etc/raspberrydashcam/config.ini")

        # Set up the basic stuff
        self._title = self.__config["dashcam"]["title"]
        self._fps, self._resolution = self.__resolutions[
            self.__config["camera"]["resolution"]]

        self._bitrate = int(self.__config["camera"]["bitrate"])

        # Create the filename that we're going to record to.
        self._filename = datetime.now().strftime(
            "/mnt/storage/%Y-%m-%d-%H-%M-%S.mp4")

        # This is just a place holder.
        self.__fmpeg_instance = None

        # This is the ffmpeg command we will run
        self.__ffmpeg_command = [
            "/usr/bin/ffmpeg",
            #"-loglevel quiet -stats",
            "-async",
            "1",
            "-vsync",
            "1",
            "-ar",
            "44100",
            "-ac",
            "1",
            "-f",
            "s16le",
            "-f",
            "alsa",
            "-thread_queue_size",
            "10240",
            "-i",
            "hw:2,0",
            "-f",
            "h264",
            "-probesize",
            "10M",
            "-r",
            str(self._fps),
            "-thread_queue_size",
            "10240",
            "-i",
            "-",  # Read from stdin
            "-crf",
            "22",
            "-vcodec",
            "copy",
            "-acodec",
            "aac",
            "-ab",
            "128k",
            "-g",
            str(self._fps * 2),  # GOP should be double the fps.
            "-r",
            str(self._fps),
            "-f",
            "mp4",
            self._filename
        ]

        self.__camera = mo.MMALCamera()
        self.__preview = mo.MMALRenderer()
        self.__encoder = mo.MMALVideoEncoder()
        self.__DashCamData = DashCamData(title=self._title,
                                         resolution=self._resolution)

        # Here we start the ffmpeg process and at the same time
        # open up a stdin pipe to it.
        self.__ffmpeg_instance = subprocess.Popen(" ".join(
            self.__ffmpeg_command),
                                                  shell=True,
                                                  stdin=subprocess.PIPE)

        # Here we specify that the script should write to stdin of the
        # ffmpeg process.
        self.__target = mo.MMALPythonTarget(self.__ffmpeg_instance.stdin)

        # Setup resolution and fps
        self.__camera.outputs[0].framesize = self._resolution
        self.__camera.outputs[0].framerate = self._fps

        # Commit the two previous changes
        self.__camera.outputs[0].commit()

        # Do base configuration of encoder.
        self.__encoder.outputs[0].format = mmal.MMAL_ENCODING_H264
        self.__encoder.outputs[0].bitrate = self._bitrate

        # Commit the encoder changes.
        self.__encoder.outputs[0].commit()

        # Get current MMAL_PARAMETER_PROFILE from encoder
        profile = self.__encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE]

        # Modify the proflle
        # Set the profile to MMAL_VIDEO_PROFILE_H264_HIGH
        profile.profile[0].profile = mmal.MMAL_VIDEO_PROFILE_H264_HIGH
        profile.profile[0].level = mmal.MMAL_VIDEO_LEVEL_H264_41

        # Now make sure encoder get's the modified profile
        self.__encoder.outputs[0].params[mmal.MMAL_PARAMETER_PROFILE] = profile

        # Now to stuff that is completely stolen
        # which I do not yet know what they do.
        self.__encoder.outputs[0].params[
            mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER] = True
        self.__encoder.outputs[0].params[mmal.MMAL_PARAMETER_INTRAPERIOD] = 48
        self.__encoder.outputs[0].params[
            mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT] = 17
        self.__encoder.outputs[0].params[
            mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT] = 17
        self.__encoder.outputs[0].params[
            mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT] = 17

        # Stolen from picamera module, very clever stuff
        self.__mirror_parameter = {
            (False, False): mmal.MMAL_PARAM_MIRROR_NONE,
            (True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
            (False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
            (True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
        }

        # Set initial values
        self.vflip = self.hflip = False

        # Get actual config values from config
        if self.__config["camera"].getboolean("vflip"):
            self.set_vflip(True)
        if self.__config["camera"].getboolean("hflip"):
            self.set_hflip(True)
        self.__camera.control.params[
            mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = True
        mp = self.__camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
        print(mp)
        mp.value = mmal.MMAL_PARAM_EXPOSUREMODE_AUTO
        self.__camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
        mp = self.__camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
        mp.value = mmal.MMAL_PARAM_AWBMODE_HORIZON
        self.__camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
from picamera import mmalobj as mo, mmal
from signal import pause

# define values from the camera window
#my desktop 1600x900 (800x450)
camResW = 200
camResH = 225
camFramR = 30

# starting corner of your windows (upper-left position)
cornerX = 500
cornerY = 600
windowSpacer = 10  # space between the 2 windows

camera = mo.MMALCamera()
splitter = mo.MMALSplitter()
render_l = mo.MMALRenderer()
render_r = mo.MMALRenderer()
# The 960 and 720 are the resolution.  This can be changed to fill your display properly.  I believe I'm using 960 and 1080 on mine.
camera.outputs[0].framesize = (camResW, camResH)
# This originally was 30, but then I the latency is too high and it's nearly impossible to navigate while wearing a head mounted display.
camera.outputs[0].framerate = camFramR
camera.outputs[0].commit()

p = render_l.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION]
p.set = mmal.MMAL_DISPLAY_SET_FULLSCREEN | mmal.MMAL_DISPLAY_SET_DEST_RECT
p.fullscreen = False
# These parameters control the X,Y and scale of the left image showing up on your display
p.dest_rect = mmal.MMAL_RECT_T(cornerX, cornerY, camResW, camResH)
render_l.inputs[0].params[mmal.MMAL_PARAMETER_DISPLAYREGION] = p
# These control the X,Y and the scale of the right image showing up on your display.
示例#10
0
    def start(self):
        self.logger.info("Starting drivers")

        # General inits
        self.files_dir = None
        self.imu_data = None
        self.img_data = None
        self.img_counter = 0
        self.RECORD_MODE = False
        self.REPLAY_MODE = False

        self.data_window = {
            topic: list()
            for topic in
            ['accel_x', 'accel_y', 'accel_z', 'gyro_x', 'gyro_y', 'gyro_z']
        }

        # IMU INITS
        self.imu_next_sample_ms = self.get_time_ms()

        if RPI:
            # CAMERA INITS
            self.camera = mmalobj.MMALCamera()
            self.encoder = mmalobj.MMALImageEncoder()

            # Queues
            self.q_img = Queue()

            # CAMERA SETUP
            self.camera_pipeline_setup()
            self.camera_start()

            # IMU INITS
            self.bus = smbus.SMBus(1)

            # IMU SETUP
            self.bus.write_byte_data(ADDR, PWR_MGMT_1, 0)
            self.set_accel_range()
            self.set_gyro_range()

        # Get hardware configuration mode
        self.setup_hardware_configuration()

        while True:
            # Not replay mode, either normal or record mode
            if not self.REPLAY_MODE:
                # IMU gets sampled at a fixed frequency
                if self.get_time_ms() > self.imu_next_sample_ms:
                    timestamp = self.get_time_ms()

                    # Schedule the next sample time
                    self.imu_next_sample_ms = timestamp + IMU_SAMPLE_TIME_MS

                    # Dict of IMU data
                    data_dict = {
                        'accel_x': self.get_accel_x(),
                        'accel_y': self.get_accel_y(),
                        'accel_z': self.get_accel_z(),
                        'gyro_x': self.get_gyro_x(),
                        'gyro_y': self.get_gyro_y(),
                        'gyro_z': self.get_gyro_z(),
                        "timestamp": timestamp
                    }

                    # Track window for median filter
                    data_dict = self.track_val_median_filter(data_dict)

                    if self.RECORD_MODE:
                        # In record mode, we want to write data into the open file
                        self.imu_data.write(
                            f"{timestamp}: " +
                            f"accel_x: {data_dict['accel_x']}, " +
                            f"accel_y: {data_dict['accel_y']}, " +
                            f"accel_z: {data_dict['accel_z']}, " +
                            f"gyro_x: {data_dict['gyro_x']}, " +
                            f"gyro_y: {data_dict['gyro_y']}, " +
                            f"gyro_z: {data_dict['gyro_z']}\n")
                        self.imu_data.flush()
                    else:
                        # In normal mode, we just publish the data
                        self.publish("accelerations", data_dict,
                                     IMU_VALIDITY_MS)
                        self.publish("accelerations_vis", data_dict, -1)
            else:

                # We are in replay mode
                if not self.imu_timestamp:
                    # We read one line of data
                    imu_str = self.imu_data.readline()

                    # Look for data in the right format
                    out = re.search(IMU_RE_MASK, imu_str)
                    if out:
                        # Find timestamp of data
                        self.imu_timestamp = int(out.group(1))

                        if not self.imu_first_timestamp:
                            self.imu_first_timestamp = self.imu_timestamp

                        # Populate dict with data, as if it was sampled normally
                        self.imu_data_dict = {
                            'accel_x': float(out.group(2)),
                            'accel_y': float(out.group(3)),
                            'accel_z': float(out.group(4)),
                            'gyro_x': float(out.group(5)),
                            'gyro_y': float(out.group(6)),
                            'gyro_z': float(out.group(7)),
                            "timestamp": int(out.group(1))
                        }

                        # Track window for median filter
                        self.imu_data_dict = self.track_val_median_filter(
                            self.imu_data_dict)

                # If the relative time is correct, we publish the data

                if self.imu_timestamp and self.get_time_ms(
                ) - self.replay_start_timestamp > self.imu_timestamp - self.imu_first_timestamp:
                    self.publish("accelerations", self.imu_data_dict,
                                 IMU_VALIDITY_MS)
                    self.publish("accelerations_vis", self.imu_data_dict, -1)

                    # Reset the timestamp so that a new dataset is read
                    self.imu_timestamp = None

            # We want to forward image data as fast and often as possible
            # Not replay mode, either normal or record mode
            if not self.REPLAY_MODE:
                if not self.q_img.empty():
                    # Get next img from queue
                    data_dict = self.q_img.get()
                    timestamp = data_dict['timestamp']

                    if self.RECORD_MODE:
                        self.img_counter += 1
                        # Keep track of image counter and timestamp
                        self.img_data.write(
                            f"{self.img_counter}: {timestamp}\n")
                        self.img_data.flush()

                        # Write image
                        img = self.files_dir / 'imgs' / f"img_{self.img_counter:04d}.jpg"
                        img_f = io.open(img, 'wb')
                        img_f.write(data_dict['data'])
                        img_f.close()
                    else:
                        # In normal mode we just publish the image
                        self.publish("images", data_dict, IMAGES_VALIDITY_MS)
            else:
                # We are in replay mode
                if not self.img_timestamp:
                    # Read from the file that keeps track of timestamps
                    img_str = self.img_data.readline()

                    if img_str:
                        out = re.search(r'([0-9]*): ([0-9]*)', img_str)
                        if out:
                            self.img_timestamp = int(out.group(2))

                            if not self.img_first_timestamp:
                                self.img_first_timestamp = self.img_timestamp

                            # Read the image corresponding to the counter and timestamp
                            img_filename = f"img_{int(out.group(1)):04d}.jpg"
                            img_file_path = self.files_dir / 'imgs' / img_filename

                            with open(img_file_path, 'rb') as fp:
                                img_data_file = fp.read()

                            # Decode image
                            self.img = cv2.imdecode(np.frombuffer(
                                img_data_file, dtype=np.int8),
                                                    flags=cv2.IMREAD_COLOR)

                            # Undistort image
                            if UNDISTORT_IMAGE:
                                self.img = cv2.undistort(
                                    self.img, self.intrinsic_matrix,
                                    self.distortion_coeffs)

                            # Resize image
                            if RESIZE_IMAGE:
                                self.img = cv2.resize(self.img, RESIZED_IMAGE)

                # If the relative time is correct, we publish the data

                if self.img_timestamp and self.get_time_ms(
                ) - self.replay_start_timestamp > self.img_timestamp - self.img_first_timestamp:
                    self.publish("images", {
                        "data": self.img,
                        "timestamp": self.img_timestamp
                    }, -1)

                    # Reset the timestamp so that a new dataset is read
                    self.img_timestamp = None