예제 #1
0
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
import picamera


cascPcamera = picamera.PiCamera()
camera.resolution = (160, 120)
camera.framerate = 16
rawCapture = PiRGBArray(camera, size=(160, 120))
time.sleep(0.5)

ath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
log.basicConfig(filename='webcam.log',level=log.INFO)

video_capture = cv2.VideoCapture(0)
anterior = 0

while True:
    if not video_capture.isOpened():
        print('Unable to load camera.')
        sleep(5)
        pass

    # Capture frame-by-frame
    ret, frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
예제 #2
0
    for filename in root.iter('filename'):
        filename.text = '-1'

    tree.write(xml_path)


def denied_callback():
    #--1. Take photo
    img_filename = "denied_" + currentTime + img_out_format
    with picamera.PiCamera() as camera:
        camera.capture(img_out_path + img_filename)

    #--2. Modify xml to inform user
    #for log in root.iter('log'):
    #    log.text = ''
    #    log.set('updated','yes')

    for granted in root.iter('granted'):
        granted.text = '0'

    for filename in root.iter('filename'):
        filename.text = img_filename

    tree.write(xml_path)


if __name__ == '__main__':
    img_filename = "image_" + currentTime + img_out_format
    with picamera.PiCamera() as camera:
        camera.capture(img_filename)
예제 #3
0
import picamera  #picamera를 불러온다.
from time import sleep  #중간에 5초 딜레이를 위한 sleep 함수

camera = picamera.PiCamera()  #picamera 생성
camera.start_recording('video.h264')  #녹화 시작
sleep(5)  #5초간 대기
camera.stop_recording()  #녹화 종료
예제 #4
0
def capture_image(file_name):
    camera = picamera.PiCamera()
    camera.resolution = (1200, 1200)
    camera.capture(file_name)
    camera.close()
예제 #5
0
    def __init__(self):
        Tk.__init__(self)

        self.title("Plaka Tanıma")
        self.geometry("{}x{}+100+100".format(WINDOWWIDTH, WINDOWHEIGHT))

        self.strLicense = strLicense
        self.speed = 0
        self.speedLimit = hizLimiti

        #Camera
        self.camera = picamera.PiCamera()
        self.camera.close()

        #Serial connection
        self.serial = serial.Serial('/dev/ttyUSB0', 9600, timeout=5)

        #Left frame
        self.frameLeft = Frame(self)
        self.frameLeft.place(x=0, y=0, height=486, width=324)
        self.frameRight = Frame(self)
        self.frameRight.place(x=WINDOWWIDTH / 3, y=0)
        #self.frameRight.pack(side = "right")

        ##Entry for License
        self.entryLicense = Entry(self.frameLeft,
                                  width=len(strLicense),
                                  font=("Calibri", 40))
        self.entryLicense.insert(0, strLicense)
        self.entryLicense.place(x=WINDOWWIDTH / 20, y=5 * WINDOWHEIGHT / 6)

        #Read images
        self.imgOriginalSceneGui = ImageTk.PhotoImage(
            Image.open("imgOriginalSceneGui.png"))
        self.imgLicense = ImageTk.PhotoImage(Image.open("imgLicenseGui.png"))
        self.imgLicenseChars = ImageTk.PhotoImage(
            Image.open("imgLicenseCharsGui.png"))

        #Entry of Speed
        self.entrySpeed = Entry(self.frameLeft, width=4, font=("Calibri", 40))
        self.entrySpeed.insert(0, self.speed)
        ##        print(dir(self.entrySpeed))
        self.entrySpeed.place(x=2 * WINDOWWIDTH / 20, y=1 * WINDOWHEIGHT / 6)
        #entryLicense.grid(column=1, row=3, padx=20, pady=20)

        #Buttons
        self.var = IntVar()
        ##        print(dir(self.var))
        self.Button1 = Radiobutton(self.frameLeft,
                                   text="Option radardan ",
                                   variable=self.var,
                                   value=1,
                                   command=self.fromRadar)
        self.Button2 = Radiobutton(self.frameLeft,
                                   text="Option kameradan",
                                   variable=self.var,
                                   value=2,
                                   command=self.fromCamera)
        self.Button3 = Radiobutton(self.frameLeft,
                                   text="Option klasörden",
                                   variable=self.var,
                                   value=3,
                                   command=self.fromDirectory)
        self.Button1.place(x=0, y=0 * WINDOWHEIGHT / 18)
        self.Button2.place(x=0, y=1 * WINDOWHEIGHT / 18)
        self.Button3.place(x=0, y=2 * WINDOWHEIGHT / 18)

        #When close the window, run exit function
        self.protocol('WM_DELETE_WINDOW', self.exitFunction)

        #Start main function
        self.main()
        ##        self.putImages()

        self.writeSpeed()
예제 #6
0
파일: camera.py 프로젝트: studi0z/Mycodo
def camera_record(record_type, unique_id, duration_sec=None, tmp_filename=None):
    """
    Record still image from cameras
    :param record_type:
    :param unique_id:
    :param duration_sec:
    :param tmp_filename:
    :return:
    """
    daemon_control = None
    settings = db_retrieve_table_daemon(Camera, unique_id=unique_id)
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    root_path = os.path.abspath(os.path.join(INSTALL_DIRECTORY, 'cameras'))
    assure_path_exists(root_path)
    camera_path = assure_path_exists(
        os.path.join(root_path, '{uid}'.format(uid=settings.unique_id)))
    if record_type == 'photo':
        save_path = assure_path_exists(os.path.join(camera_path, 'still'))
        filename = 'Still-{cam_id}-{cam}-{ts}.jpg'.format(
            cam_id=settings.id,
            cam=settings.name,
            ts=timestamp).replace(" ", "_")
    elif record_type == 'timelapse':
        save_path = assure_path_exists(os.path.join(camera_path, 'timelapse'))
        start = datetime.datetime.fromtimestamp(
            settings.timelapse_start_time).strftime("%Y-%m-%d_%H-%M-%S")
        filename = 'Timelapse-{cam_id}-{cam}-{st}-img-{cn:05d}.jpg'.format(
            cam_id=settings.id,
            cam=settings.name,
            st=start,
            cn=settings.timelapse_capture_number).replace(" ", "_")
    elif record_type == 'video':
        save_path = assure_path_exists(os.path.join(camera_path, 'video'))
        filename = 'Video-{cam}-{ts}.h264'.format(
            cam=settings.name,
            ts=timestamp).replace(" ", "_")
    else:
        return

    if tmp_filename:
        filename = tmp_filename

    path_file = os.path.join(save_path, filename)


    # Turn on output, if configured
    if settings.output_id:
        daemon_control = DaemonControl()
        daemon_control.output_on(settings.output_id)

    # Pause while the output remains on for the specified duration.
    # Used for instance to allow fluorescent lights to fully turn on before
    # capturing an image.
    if settings.output_duration:
        time.sleep(settings.output_duration)


    if settings.library == 'picamera':
        # Try 5 times to access the pi camera (in case another process is accessing it)
        for _ in range(5):
            try:
                with picamera.PiCamera() as camera:
                    camera.resolution = (settings.width, settings.height)
                    camera.hflip = settings.hflip
                    camera.vflip = settings.vflip
                    camera.rotation = settings.rotation
                    camera.brightness = int(settings.brightness)
                    camera.contrast = int(settings.contrast)
                    camera.exposure_compensation = int(settings.exposure)
                    camera.saturation = int(settings.saturation)
                    camera.start_preview()
                    time.sleep(2)  # Camera warm-up time

                    if record_type in ['photo', 'timelapse']:
                        camera.capture(path_file, use_video_port=False)
                    elif record_type == 'video':
                        camera.start_recording(path_file, format='h264', quality=20)
                        camera.wait_recording(duration_sec)
                        camera.stop_recording()
                    else:
                        return
                    break
            except picamera.exc.PiCameraMMALError:
                logger.error("The camera is already open by picamera. Retrying 4 times.")
            time.sleep(1)

    elif settings.library == 'fswebcam':
        cmd = "/usr/bin/fswebcam --device {dev} --resolution {w}x{h} --set brightness={bt}% " \
              "--no-banner --save {file}".format(dev=settings.device,
                                                 w=settings.width,
                                                 h=settings.height,
                                                 bt=settings.brightness,
                                                 file=path_file)
        if settings.hflip:
            cmd += " --flip h"
        if settings.vflip:
            cmd += " --flip h"
        if settings.rotation:
            cmd += " --rotate {angle}".format(angle=settings.rotation)
        if settings.custom_options:
            cmd += " " + settings.custom_options

        out, err, status = cmd_output(cmd, stdout_pipe=False)
        # logger.error("TEST01: {}; {}; {}; {}".format(cmd, out, err, status))

    # Turn off output, if configured
    if settings.output_id and daemon_control:
        daemon_control.output_off(settings.output_id)

    try:
        set_user_grp(path_file, 'mycodo', 'mycodo')
        return save_path, filename
    except Exception as e:
        logger.exception(
            "Exception raised in 'camera_record' when setting user grp: "
            "{err}".format(err=e))
예제 #7
0
                    self.wfile.write(b'--FRAME\r\n')
                    self.send_header('Content-Type', 'image/jpeg')
                    self.send_header('Content-Length', len(frame))
                    self.end_headers()
                    self.wfile.write(frame)
                    self.wfile.write(b'\r\n')
            except Exception as e:
                logging.warning('Removed streaming client %s: %s',
                                self.client_address, str(e))
        else:
            self.send_error(404)
            self.end_headers()


class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
    allow_reuse_address = True
    daemon_threads = True


with picamera.PiCamera(resolution='1280x960', framerate=24) as camera:
    output = StreamingOutput()
    #Uncomment the next line to change your Pi's Camera rotation (in degrees)
    #camera.rotation = 90
    camera.start_recording(output, format='mjpeg')
    try:
        address = ('', 8000)
        server = StreamingServer(address, StreamingHandler)
        server.serve_forever()
    finally:
        camera.stop_recording()
예제 #8
0
    def __init__(self, cell_size, option, init_position=[0.0, 0.0, 0.0]):
        """
        Inicializa los parametros basicos del robot y lo configura en funcion
        de los puertos a los cuales estan conectados los sensores y motores
        """

        # Robot construction parameters

        self.r = 0.027  # Radio de las ruedas, en metros
        self.L = 0.135  # Distancia entre centros de ruedas, en metros
        self.thd = 0.0
        self.thi = 0.0
        self.logFile = open("log.txt", "w+")  # Fichero de log de odometria
        self.img_count = 0
        ##################################################
        # Motors and sensors setup

        # Create an instance of the BrickPi3 class. BP will be the BrickPi3 object.
        self.BP = brickpi3.BrickPi3()

        # Configure sensors
        self.BP.set_sensor_type(self.BP.PORT_1,
                                self.BP.SENSOR_TYPE.EV3_ULTRASONIC_CM)
        time.sleep(5)

        self.distance_from_obstacles = 20.0  # In centimeters
        self.security_distance = 10.0  # In centimeters
        self.cell_size = cell_size  # In meters

        # reset encoder B and C (or all the motors you are using)
        self.BP.offset_motor_encoder(self.BP.PORT_B,
                                     self.BP.get_motor_encoder(self.BP.PORT_B))
        self.BP.offset_motor_encoder(self.BP.PORT_C,
                                     self.BP.get_motor_encoder(self.BP.PORT_C))
        self.BP.offset_motor_encoder(self.BP.PORT_A,
                                     self.BP.get_motor_encoder(self.BP.PORT_A))

        # CAMERA SETTINGS
        self.CAM_CENTER = 320 / 2.0
        self.cam = picamera.PiCamera()
        self.cam.resolution = (320, 240)
        self.cam.framerate = 32
        self.cam_refresh = 1.0 / self.cam.framerate

        # EXIT SETTINGS

        if option == 'A':
            self.cmp_img = cv2.imread("img1.png", cv2.IMREAD_COLOR)
        else:
            self.cmp_img = cv2.imread("img2.png", cv2.IMREAD_COLOR)

        ##################################################
        # odometry shared memory values
        self.x = Value('d', init_position[0])
        self.y = Value('d', init_position[1])
        self.th = Value('d', init_position[2])
        self.finished = Value(
            'b', 1)  # boolean to show if odometry updates are finished

        # if we want to block several instructions to be run together, we may want to use an explicit Lock
        self.lock_odometry = Lock()

        self.P = 1.0 / 50  # Frecuencia de actualizacion de la odometria, en acts/segundo
예제 #9
0
def run():
    with picamera.PiCamera() as camera:
        # Set the camera resolution
        x = 400
        camera.resolution = (int(1.33 * x), x)
        # Various optional camera settings below:
        # camera.framerate = 5
        # camera.awb_mode = 'off'
        # camera.awb_gains = (0.5, 0.5)

        # Need to sleep to give the camera time to get set up properly
        time.sleep(1)

        with picamera.array.PiRGBArray(camera) as stream:
            # Loop constantly
            while True:
                # Grab data from the camera, in colour format
                # NOTE: This comes in BGR rather than RGB, which is important
                # for later!
                camera.capture(stream, format='bgr', use_video_port=True)
                image = stream.array

                # Get the individual colour components of the image
                b, g, r = cv2.split(image)

                #start video capture
                ret, image = cap.read()
                # Calculate the NDVI

                # Bottom of fraction
                bottom = (r.astype(float) + g.astype(float))
                bottom[bottom ==
                       0] = 0.01  # Make sure we don't divide by zero!

                ndvi = (r.astype(float) - g) / bottom
                ndvi = contrast_stretch(ndvi)
                ndvi = ndvi.astype(np.uint8)
                ndvi = cv2.applyColorMap(ndvi, cv2.COLORMAP_JET)

                # Do the labelling
                label(b, 'Blue')
                label(g, 'Green')
                label(r, 'Red')
                label(ndvi, 'NDVI')

                # Combine ready for display
                combined = disp_multiple(b, g, r, ndvi)

                #              write video

                out.write(combined)

                # Display
                cv2.imshow('combined', combined)

                stream.truncate(0)

                #  press ESC to break
                c = cv2.waitKey(7) % 0x100
                if c == 27:
                    break

    # cleanup or things will get messy
    cv2.destroyAllWindows()
    cap.release()
    out.release()
예제 #10
0
def capture_image(image_file):
	import picamera
	camera = picamera.PiCamera()
	camera.vflip = True
	camera.capture(image_file)
	camera.close()
예제 #11
0
def main():

    myIP = str(get_ip_address('eth0'))[10:13]
    masterIP = "192.168.0.100"  #Most likely address for master IP, this gets checked/confirmed upon recieving multicast messages

    #setup folder for pictures
    if not os.path.exists(LOCAL_DIR):
        os.makedirs(LOCAL_DIR)

    #init camera
    camera = picamera.PiCamera(
        resolution=(3280,
                    2464))  #this is the full resolution of the rpi camera 2
    camera.iso = 100
    time.sleep(
        2
    )  # this sleep is incredibly important!!!! otherwise pictures will all just come out black
    camera.shutter_speed = 2750
    camera.exposure_mode = 'off'
    g = camera.awb_gains
    camera.awb_mode = 'off'
    camera.awb_gains = g
    # camera = cameraSetup()
    print "camera setup successful"  #useful for debugging, in certain cases camera won't start and device needs to be restarted

    #start thread for recieving instructions
    # TODO, figure out if multicast recieve can happen in main thread
    multicastThread = threading.Thread(target=multicastRecieve)
    multicastThread.setDaemon(True)
    multicastThread.start()

    #start tracking heartbeat, this is sent to master computer periodically
    beat = time.time()

    while True:
        ##check instruction queue (messages recieved over udp)
        if (not instructQueue.empty()):
            data = instructQueue.get()
            incomming = Message()
            incomming.jsonToMessage(data)
            masterIP = incomming.originIp  #keeps master IP up to date.

            # handle quit command
            if "quit" == incomming.messageType:
                break

            #handle take a picture instruction
            elif "pic" == incomming.messageType:
                if incomming.allCams:
                    takePic(incomming, camera, myIP)
                #TODO handle other camera instructions
                else:
                    pass

        #heartbeat that is sent to master computer
        #sent aprox every 2 seconds
        if ((time.time() - beat) >= 2):
            beat = time.time()  #reset heartbeat counter
            heartbeatMessage = Message("heartBeat", myIP)
            heartbeatMessage.timeStamp = beat
            heartbeatMessage.destinationIp = masterIP
            sendQ.put(heartbeatMessage)

        ##send any messages that need to be sent to the master computer
        if (not sendQ.empty()):
            outGoingMsg = sendQ.get()
            try:
                udpSend(outGoingMsg)
            except Exception as e:
                print e
                print "send error"

    camera.close()
예제 #12
0
class PylapseApp(App):
    """PylapseApp : Kivy Application to create Timelapse videos on the Raspberry pi"""
    cam = picamera.PiCamera()  # camera object
    preview_on = False  # is preview running
    rotation_angle = 0  # rotation angle for the camera (0, 90 180 270°)
    h_flip = False  # horizontal mirroring (boolean)
    v_flip = False  # vertical mirroring (boolean)
    resolution = '1080p'  # default resolution for timelapse and preview
    timelapse_event = None  # Timelapse event to take picture
    interval_time = 10  # s - interval between two pictures
    total_time = 60  # s - total time of timelapse
    frame_counter = 0  # frame counter for filenames
    timelapse_started = False  # flag set to True when timelapse is started
    confirm_popup = None  # popup to confirm timelapse stop
    progress_popup = None
    device_observer = None
    pyudev_context = None
    usb_drive_device = None
    usb_drive_path = '/media/pi/usb'
    transfer_popup = None

    def on_start(self):
        """when app starts"""
        # pyudev context
        self.pyudev_context = pyudev.Context()
        # start preview
        self.toggle_preview(on=True)
        # listening for usb drive plugged
        self.start_listening_usb()

    def start_listening_usb(self):
        monitor = pyudev.Monitor.from_netlink(self.pyudev_context)
        monitor.filter_by('block')
        self.device_observer = pyudev.MonitorObserver(monitor,
                                                      self.new_usb_drive)
        self.device_observer.start()

    def stop_listening_usb(self):
        if self.device_observer is not None:
            self.device_observer.stop()
            self.device_observer = None

    @mainthread
    def new_usb_drive(self, action, device):
        """triggered when a usb drive is plugged"""
        if action == 'add' and device.device_type == 'partition':
            self.usb_drive_device = device.device_node
            drive_label = device.get('ID_FS_LABEL')
            Logger.info('{0} usb key \"{1}\" ({2})'.format(
                action, drive_label, self.usb_drive_device))
            self.root.ids['usb'].opacity = 1.0
            self.root.ids['usb_drive'].text = drive_label
            system('sudo mount -o gid=1000,uid=1000 {0} {1}'.format(
                self.usb_drive_device, self.usb_drive_path))
            self.toggle_preview(on=False)
            Logger.info('{0} usb key \"{1}\" ({2} - > {3})'.format(
                action, drive_label, self.usb_drive_device,
                self.usb_drive_path))
            self.transfer_popup = Factory.DialogTransfer()
            self.transfer_popup.open()
        elif action == 'remove' and device.device_type == 'partition':
            self.usb_drive_device = None
            self.root.ids['usb'].opacity = 0.1
            self.root.ids['usb_drive'].text = ''
            Logger.info('{0} usb key \"{1}\" ({2})'.format(
                action, device.get('ID_FS_LABEL'), device.device_node))
            if self.transfer_popup is not None:
                self.transfer_popup.dismiss()
                self.transfer_popup = None

    def eject_usb_drive(self):
        if self.usb_drive_device is not None:
            system('sudo eject {0}'.format(self.usb_drive_device))
            Logger.info('eject usb key {0}'.format(self.usb_drive_device))

    def transfer_files(self, transfer_images=True, transfer_video=False):
        try:
            if transfer_images or transfer_video:
                system("rm -rvf {0}/timelapse && mkdir {0}/timelapse".format(
                    self.usb_drive_path))
            if transfer_images:
                files = [
                    f for f in listdir('../images')
                    if isfile(join('../images', f))
                ]
                for f in files:
                    shutil.copy2(join('../images', f),
                                 join(self.usb_drive_path, 'timelapse'))
            if transfer_video:
                shutil.copy2(join('../video', 'timelapse.mp4'),
                             join(self.usb_drive_path, 'timelapse'))
            self.eject_usb_drive()
            self.show_message('Transfer Success',
                              'Files are copied on USB drive')
        except:
            self.show_message('Transfer Error',
                              'Can\'t copy files on USB drive')

    def show_message(self, title, message):
        p = Factory.DialogMessage()
        p.open()
        p.ids['message_lbl'].text = message
        p.title = title

    def close_message_soon(self, message_popup, when):
        Clock.schedule_once(lambda dt: message_popup.dismiss(), when)

    def set_preview_dims(self, pos, size):
        """set preview overlay coordinates : pos and size"""
        x, y = pos
        w, h = size
        x = int(x)
        # axle is reversed
        y = int(self.root_window.size[1] - h - y)
        w = int(w)
        h = int(h)
        self.cam.preview_window = (x, y, w, h)

    def on_stop(self):
        """when app stops"""
        self.toggle_preview(on=False)
        self.stop_listening_usb()

    def toggle_preview(self, on):
        """preview : Toggle display of the preview"""
        # show preview if the button is down
        if on:
            self.preview_on = True
            # display preview
            self.cam.start_preview(fullscreen=False,
                                   rotation=self.rotation_angle,
                                   hflip=self.h_flip,
                                   vflip=self.v_flip)
            self.set_preview_dims(self.root.ids['preview'].pos,
                                  self.root.ids['preview'].size)
            self.cam.resolution = self.resolution
            # a bit of log
            Logger.info("Camera: preview enabled")
        else:
            # stop preview
            self.preview_on = False
            self.cam.stop_preview()
            # bit of log
            Logger.info("Camera: preview disabled")

    def camera_angle_change(self, text):
        """camera_angle_change : change rotation angle"""
        # 0°
        if text == 'No rotation':
            self.rotation_angle = 0
        # 90°
        elif text == 'rotate 90°':
            self.rotation_angle = 90
        # 180°
        elif text == 'rotate 180°':
            self.rotation_angle = 180
        # 270°
        elif text == 'rotate 270°':
            self.rotation_angle = 270
        # in case of wrong value : set to 0°
        else:
            self.rotation_angle = 0
        self.cam.rotation = self.rotation_angle

    def add_time_toggle(self, state_active):
        if state_active:
            self.cam.annotate_background = picamera.Color('black')
            self.cam.annotate_foreground = picamera.Color('white')
            self.cam.annotate_text = 'Elapsed Time'
        else:
            self.cam.annotate_background = None
            self.cam.annotate_foreground = picamera.Color('white')
            self.cam.annotate_text = ''

    def consistent_image_toggle(self, state_active):
        """consistent_image_toggle : set or unset 50Hz flickering filter and consistent image settings across time"""
        if state_active:
            # Set ISO to the desired value
            # self.cam.iso = 10
            self.cam.exposure_mode = 'off'
            sleep(0.5)
            # Now fix the values
            # adjust speed as a multiple of 10ms to prevent 50Hz flickering (multiple of 100Hz -> 10000µs)
            # with a min of 10ms
            Logger.info("Camera: Shutter speed is {:3.0f}µts".format(
                self.cam.exposure_speed))
            speed = max(
                int(round(self.cam.exposure_speed / 10000)) * 10000, 10000)
            self.cam.shutter_speed = speed
            Logger.info("Camera: Shutter speed set to {:3.0f}µs".format(speed))
            g = self.cam.awb_gains
            self.cam.awb_mode = 'off'
            self.cam.awb_gains = g
        else:
            self.cam.shutter_speed = 0
            self.cam.exposure_mode = 'auto'
            self.cam.awb_mode = 'auto'

    def resolution_change(self, text):
        """resolution_change : change resolution of the timelapse"""
        if text == '640x480':
            self.resolution = text
        elif text == '800x600':
            self.resolution = text
        elif text == 'HD 1280x720p':
            self.resolution = '720p'
        elif text == 'FHD 1920x1080p':
            self.resolution = '1080p'
        elif text == 'Max 2592x1944':
            self.resolution = '2592x1944'  # max value for 2.0 pi camera
        # in case of wrong value: set to FHD
        else:
            self.resolution = '1080p'
        self.update_ui_time_info()
        self.cam.resolution = self.resolution

    def hflip_change(self, active):
        """hflip_change: set or unset horizontal flip"""
        self.h_flip = active
        self.cam.hflip = self.h_flip

    def vflip_change(self, active):
        """vflip_change: set or unset vertical flip"""
        self.v_flip = active
        self.cam.vflip = self.v_flip

    def interval_time_change(self):
        """"interval_time_change: change interval time and reflect on ui"""
        val = int(self.root.ids['interval_value'].text)
        unit = self.root.ids['interval_unit'].text
        self.interval_time = self.compute_time(val, unit)
        self.adapt_ui_to_interval_time()
        self.update_ui_time_info()

    def adapt_ui_to_interval_time(self):
        """adapt_ui: change ui widgets availability with interval_time"""
        # frame period < 2s : disable image display
        if self.interval_time <= 2:
            self.root.ids['show_images'].active = False
            self.root.ids['show_images'].disabled = True
        else:
            self.root.ids['show_images'].disabled = False

    def total_time_change(self):
        """"total_time_change: change total time and reflect on ui"""
        val = int(self.root.ids['total_value'].text)
        unit = self.root.ids['total_unit'].text
        self.total_time = self.compute_time(val, unit)
        self.update_ui_time_info()

    def update_ui_time_info(self):
        """update_ui_time_info: update time and size info according to settings"""
        every = self.interval_time
        total = self.total_time
        # compute number of frames
        number_of_frames = int(total / every)
        # update infos label
        s = "Ready to take {:d} frames : 1 frame every {:d}s; total time {:d}s"
        self.root.ids['infos'].text = s.format(number_of_frames, every, total)

    @staticmethod
    def compute_time(value, unit):
        """compute_time : compute internal times in seconds according to units"""
        if unit == 's':
            return value
        if unit == 'min':
            return value * 60
        if unit == 'h':
            return value * 3600
        if unit == 'd':
            return value * 3600 * 24

    def timelapse_toggle(self):
        """timelapse_toggle : start or stop timelapse"""
        # start timelapse if not started
        if not self.timelapse_started:
            self.confirm_popup = Factory.DialogConfirmDelete()
            self.confirm_popup.open()
        # or stop if it was running
        else:
            # check first if the user really wants to stop...
            self.confirm_popup = Factory.DialogConfirmStop()
            self.confirm_popup.open()

    def start_timelapse(self):
        """start_timelapse: starts the timelapse"""
        self.change_ui_for_timelapse(start_timelapse=True)
        if self.confirm_popup is not None:
            self.confirm_popup.dismiss()
            self.confirm_popup = None
        self.delete_images()
        self.timelapse_event = Clock.schedule_interval(
            lambda dt: self.perform_capture(), self.interval_time)
        self.frame_counter = 0
        self.perform_capture()
        self.timelapse_started = True
        Logger.info("Timelapse: Started every: {:d} s / total: {:d} s".format(
            self.interval_time, self.total_time))

    def stop_timelapse(self):
        """stop_timelapse: stops the timelapse"""
        if self.confirm_popup is not None:
            self.confirm_popup.dismiss()
            self.confirm_popup = None
        self.cam.annotate_text = "Elapsed time"
        self.timelapse_event.cancel()
        self.timelapse_event = None
        Logger.info("Timelapse: Stopped ({:d} frames captured)".format(
            self.frame_counter))
        self.frame_counter = 0
        self.timelapse_started = False
        self.change_ui_for_timelapse(start_timelapse=False)

    def change_ui_for_timelapse(self, start_timelapse=True):
        """change_ui_for_timelapse: disable/enable everything possible for/after timelapse"""
        if start_timelapse:
            self.stop_listening_usb()
            self.toggle_preview(on=False)
            self.root.ids['timelapse_toggle'].text = 'Stop Timelapse'
            self.root.ids['resolution_timelapse'].disabled = True
            self.root.ids['rotation'].disabled = True
            self.root.ids['hflip'].disabled = True
            self.root.ids['vflip'].disabled = True
            self.root.ids['interval_value'].disabled = True
            self.root.ids['interval_unit'].disabled = True
            self.root.ids['total_value'].disabled = True
            self.root.ids['total_unit'].disabled = True
            self.root.ids['show_images'].disabled = True
            self.root.ids['add_time'].disabled = True
            self.root.ids['consistent_images'].disabled = True
            # self.root.ids['create_video'].disabled = True
            self.root.ids['quit'].disabled = True
        else:  # False -> stop timelapse
            self.start_listening_usb()
            self.toggle_preview(on=True)
            self.root.ids['timelapse_toggle'].text = 'Start Timelapse'
            self.root.ids['preview'].source = "1blackpixel.png"
            self.update_ui_time_info()
            self.root.ids['resolution_timelapse'].disabled = False
            self.root.ids['rotation'].disabled = False
            self.root.ids['hflip'].disabled = False
            self.root.ids['vflip'].disabled = False
            self.root.ids['interval_value'].disabled = False
            self.root.ids['interval_unit'].disabled = False
            self.root.ids['total_value'].disabled = False
            self.root.ids['total_unit'].disabled = False
            self.root.ids['show_images'].disabled = False
            self.root.ids['add_time'].disabled = False
            self.root.ids['consistent_images'].disabled = False
            # self.root.ids['create_video'].disabled = False
            self.root.ids['quit'].disabled = False

    def perform_capture(self):
        """perform_capture : get one frame and record it as jpg"""
        filename = "../images/img_{:05d}.jpg".format(self.frame_counter)
        if self.root.ids['add_time'].active:
            elapsed_time = timedelta(seconds=self.interval_time *
                                     self.frame_counter)
            self.cam.annotate_text = str(elapsed_time).replace("day", "jour")
        self.cam.capture(filename)
        if self.root.ids['show_images'].active:
            self.root.ids['preview'].source = filename
        self.frame_counter += 1
        if self.interval_time * self.frame_counter > self.total_time:
            self.stop_timelapse()
        Logger.info("Timelapse: captured frame num {:d} : {:s}".format(
            self.frame_counter, filename))
        self.root.ids[
            'infos'].text = "Timelapse Running (Captured frame n°{:d}.)".format(
                self.frame_counter)

    def create_video(self):
        """create_video: create a video by calling ffmpeg"""
        # TODO: find a way to get ffmpeg stats and run asynchronously
        self.toggle_preview(on=False)
        self.progress_popup = Factory.VideoProgress()
        self.progress_popup.open()

    def launch_video_thread(self):
        pass

    def on_quit_button_released(self):
        self.toggle_preview(on=False)
        p = Factory.DialogConfirmQuit()
        p.open()

    @staticmethod
    def poweroff():
        """poweroff the pi"""
        Logger.info("App: Stopping the pi")
        system("sudo poweroff")
        self.stop()

    @staticmethod
    def delete_images():
        """delete_images : empty the images dir"""
        Logger.info("Timelapse: deleting images in the \"images\" dir")
        # delete the directory and recreate it
        system("rm -rvf ../images && mkdir ../images")
예제 #13
0
    def start_streaming(self, rec_filename=False):

        # Create the camera source
        if self.device == 'picam1' or self.device == 'picam2':
            if self.device == 'picam1':
                self.camera = picamera.PiCamera(camera_num=0)
            else:
                self.camera = picamera.PiCamera(camera_num=1)

            # The camera frame size has to be the larger of the streaming and recording size
            self.rec_width = max(self.width, self.rec_width)
            self.rec_height = max(self.height, self.rec_height)

            # Initilize the camera parameters
            self.camera.resolution = (self.rec_width, self.rec_height)
            self.camera.framerate = self.fps
            self.camera.awb_mode = 'sunlight'

            # Are we recording and streaming, or just streaming?
            self.streaming = True
            if rec_filename:
                self.recording = True
                self.camera.start_recording(
                    rec_filename,
                    format='h264',
                    intra_period=self.rec_intra_period,
                    inline_headers=self.rec_inline_headers,
                    bitrate=self.rec_bitrate,
                    quality=self.rec_quality)
                self.camera.start_recording(self.stream,
                                            format='h264',
                                            intra_period=self.intra_period,
                                            inline_headers=self.inline_headers,
                                            bitrate=self.bitrate,
                                            quality=self.quality,
                                            splitter_port=2,
                                            resize=(self.width, self.height))
            else:
                self.camera.start_recording(self.stream,
                                            format='h264',
                                            intra_period=self.intra_period,
                                            inline_headers=self.inline_headers,
                                            bitrate=self.bitrate)

            while self.streaming:
                self.wait_streaming(1)

        else:

            # We can only read one stream, so we'll use the best
            self.width = max(self.width, self.rec_width)
            self.height = max(self.height, self.rec_height)

            # Open the device
            control = Control(self.device)
            control.set_control_value(9963800, 2)
            control.close()

            # Start streaming frames
            frame = Frame(self.device, self.width, self.height)
            self.streaming = True
            while self.streaming:
                frame_data = frame.get_frame()
                self.stream.write(frame_data)
예제 #14
0
def main(argobj):
    render_times = open("render_times.csv", 'w+')
    pickled_overlays = open("pickled_overlays.txt", "w+")
    camera = None
    srv_sock, conn = None, None

    def gentle_shutdown(*args):
        if camera:
            camera.close()
        if conn:
            conn.close()

        sys.exit(0)

    signal.signal(signal.SIGINT, gentle_shutdown)

    logging.basicConfig(format='%(asctime)s %(message)s',
                        stream=sys.stdout,
                        level=logging.DEBUG)

    if argobj.overlay_only:
        logging.info("Showing naked overlay")
        pad = create_pad()
        pad.show()
        sys.exit(1)

    try:
        last = None
        overlays = {}

        camera = picamera.PiCamera()
        camera.start_preview()
        logging.info('Preview started')

        srv_sock, conn = get_conn_comm()

        while True:

            try:
                #logging.info("trying to read from sock")
                #lines = conn.makefile().readlines()
                #data = "".join([line + '\n' for line in lines])
                #pickled_overlays.write(data)
                overlays = pickle.load(conn.makefile())
                logging.info('Unpickled {} objects'.format(len(overlays)))
            except EOFError as e:
                pickled_overlays.close()
                logging.info("Com server died?")
                overlays = {}

                try:
                    conn.close()
                except Exception as e:
                    logging.debug(
                        "Closing connection after EOF did not succeed")

                try:
                    srv_sock.close()
                except Exception as e:
                    logging.debug(
                        "Closing server connection after EOF did not succeed")

                # reconnect
                srv_sock, conn = get_conn_comm()

            # Create a new canvas for overlays

            pad = create_pad()
            if NO_GREEN_BAR or len(overlays) == 0:
                pad = Image.new('RGBA', (
                    ((1280 + 31) // 32) * 32,
                    ((720 + 15) // 16) * 16,
                ))

            inBuff = ""
            count = 0
            # Paste all our overlays onto the pad
            for form in overlays.itervalues():
                count += 1
                inBuff = form.inputBuff
                render_times.write(
                    "bitmap render: {},%d\n".format(form.inputBuff) %
                    (int(time.time() * 1000)))
                pad.paste(form.img, (ORIGIN_X, ORIGIN_Y))
                pad.paste(form.img2, (INPUT_X, ORIGIN_Y))
                for field in form.fields:
                    count += 1
                    pad.paste(field.img, (field.x + 7, field.y - 3))
            print("total number of forms + inputs: %d" % (count))
            # Create a new overlay to add to the screen
            new_overlay = camera.add_overlay(pad.tobytes(), size=pad.size)
            render_times.write("bitmap display: {},%d\n".format(inBuff) %
                               (int(time.time() * 1000)))
            render_times.flush()
            # Renders the overlay above the camera preview layer
            new_overlay.layer = 3

            if last:
                # Push the last layer underneath the preview then remove it
                last.layer = 0
                camera.remove_overlay(last)
            last = new_overlay
    except Exception as e:
        pickled_overlays.close()
        logging.exception(e)
        gentle_shutdown()
예제 #15
0
def main():

        #setting env
        img_src = '/home/pi/ai-makers-kit/python3/genieSalon/homepage/static/img/capture.jpg'
        url = 'http://211.254.215.243:18070'
        output_file = "testtts.wav"
        camera = picamera.PiCamera()
        camera.resolution=(720,480)
        
        #check word to open app
        test()
        text = getVoice2Text()
        while text.find("미용실")==-1:
            test()
            text = getVoice2Text()
        getText2VoiceStream("지니살롱에 오신것을 환영합니다. 추천을 받으시려면 사진찍어줘를 말해주세요.", output_file)
        MS.play_file(output_file)
        if(text.find("지니 살롱") != -1 or text.find("미용실") != -1):
            print("goto geniesalon")
            #pid=os.fork()
            #if pid==0:
                #webbrowser.open("http://211.254.215.243:18070")
            #webbrowser.open("http://211.254.215.243:18070")
            test()
            text=getVoice2Text()
            while text.find("사진") == -1 and text.find("주변") == -1 and text.find("근처") ==-1:
                test()
                text=getVoice2Text()
            print(text)
            print("check keyword picture or gucnhar")
            if text.find("사진") != -1:
                getText2VoiceStream("네. 준비가되시면 지니야를 불러주세요.", output_file)
                MS.play_file(output_file)
                #take a picture
                camera.start_preview()
                text = getVoice2Text()
                try:
                    while text.find("지니") ==-1:
                        text=getVoice2Text()
                except:
                        camera.stop_preview()
                        return
                camera.capture(img_src)
                camera.stop_preview()
                #send picture to server
                print("start send img")
                img_send=open(img_src,"rb")
                img_tmp = img_send.read()
                img_send.close()
                client_conn.sendImage(img_tmp)
                print("send img")
                sleep(5)
                getText2VoiceStream("사진이 전송되는동안 잠시 기다려주세요", output_file)
                MS.play_file(output_file)
                webbrowser.open(url + "/recommendation")
                
            elif text.find("주변") != -1 or text.find("근처") != -1: #'주변' or '근처' 미용실정보 
                print("jubyeon")
                getText2VoiceStream("주변 미용실정보를 보여드릴께요", output_file)
                #MS.play_file(output_file)
                
                findsalon_url=(url + '/findSalon')
                t=threading.Thread(target=openbrowser,args=(findsalon_url,))
                
                t.start()
                sleep(2)
                t.stop()
                t.join()
                
                
                #webbrowser.open("http://211.254.215.243:18070/findSalon")
                
                text = getVoice2Text()
                while text.find("예약") == -1:
                    text=getVoice2Text()
                    
                print("ok.goto reservation")
예제 #16
0
import RPi.GPIO as GPIO
from picamera.array import PiRGBArray
import picamera
import time
import numpy as np
import cv2
import time
import threading
from sklearn.cluster import KMeans

# start) init variables #####################################################
# global variables ----------------------------------------------------------
BUZZER_PIN = 12

# main variable -------------------------------------------------------------
cam = picamera.PiCamera()
rawCapture = PiRGBArray(cam)
cam.capture(rawCapture, format='bgr')
img = rawCapture.array
height, width = img.shape[:2]
imgSet = []  # set of image for write video
imgCount = 0  # number of processed imgs
avgTime = 0  # to calculate this program's each image processing time
font = cv2.FONT_HERSHEY_SIMPLEX  # alarm safety or not
prevDist = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BUZZER_PIN, GPIO.OUT)
buzzer = GPIO.PWM(BUZZER_PIN, 882)

# shoes variable ------------------------------------------------------------
tracker = cv2.MultiTracker_create()  # init tracker for tracking shoes
예제 #17
0
                self.connection.flush()
                self.stream.seek(0)
                self.connection.write(self.stream.read(size))
                self.count += 1
                self.stream.seek(0)
        self.stream.write(buf)


my_server = '192.168.0.35'
#my_server='172.18.1.249'
client_socket = socket.socket()
client_socket.connect((my_server, 8000))  #my_server is the address
connection = client_socket.makefile('wb')
try:
    output = SplitFrames(connection)
    with picamera.PiCamera(resolution='VGA', framerate=30) as camera:
        time.sleep(2)
        start = time.time()
        camera.start_recording(output, format='mjpeg')
        camera.wait_recording(30)
        camera.stop_recording()
        # Write the terminating 0-length to the connection to let the
        # server know we're done
        connection.write(struct.pack('<L', 0))
#except IOError as e:
#    if e.errno == errno.EPIPE:
#        print("Broken pipe error")
#        connection.close()
#    print("something went wrong!")
finally:
    connection.close()
예제 #18
0
    def frames():
        # カメラ初期化
        with picamera.PiCamera() as camera:
            #カメラ画像を左右左右逆転させる
            camera.vflip = True
            camera.hflip = True

            # 解像度の設定
            camera.resolution = (640, 480)

            # カメラの画像をリアルタイムで取得するための処理
            with picamera.array.PiRGBArray(camera) as stream:
                #記録用の動画ファイルを開く(時間ごと)
                curstr = datetime.datetime.now().strftime("%Y%m%d_%H")
                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter(
                    str(videopath) + '/video_' + curstr + '.avi', fourcc, 20.0,
                    (640, 480))

                #カメラ映像が落ち着くまで待つ
                time.sleep(2)

                while True:  #カメラから画像を取得してファイルに書き込むことを繰り返す
                    # カメラから映像を取得
                    camera.capture(stream, 'bgr', use_video_port=True)

                    #動画を記録
                    nowstr = datetime.datetime.now().strftime("%Y%m%d_%H")

                    #次の時間になったら新たな動画ファイルを切り替え
                    if curstr != nowstr:
                        curstr = nowstr
                        out.release()
                        out = cv2.VideoWriter(
                            str(videopath) + '/video_' + curstr + '.avi',
                            fourcc, 20.0, (640, 480))

                    #動画を記録
                    out.write(stream.array)

                    #動体検知メソッドを呼び出す
                    Camera.moveDetect(stream.array)

                    #ライブ配信用に画像を返す
                    yield cv2.imencode('.jpg', stream.array)[1].tobytes()

                    # 結果の画像を表示する
                    #cv2.imshow('camera', stream.array)

                    #キーが押されたら終了
                    if cv2.waitKey(1) < 255:
                        break

                    # カメラから読み込んだ映像を破棄する
                    stream.seek(0)
                    stream.truncate()

                # 表示したウィンドウを閉じる
                out.release()
                cv2.destroyAllWindows()


#単独起動用
#Camera.frames()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    #initialize variables to calculate FPS
    instantaneous_frame_rates = []

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=10) as camera:
        camera.start_preview()  #alpha = 200
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                start_time = time.monotonic(
                )  #start_time declaration moved to give a more accurate measurement to calculate FPS
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                results = detect_objects(interpreter, image, args.threshold)

                # get the coordinates for all bounding boxes within frame
                rects = get_rects(results)

                # return active objects from the centroid tracker
                objects = ct.update(rects)

                annotator.clear()
                annotate_objects(annotator, results, labels)

                for (objectID, centroid) in objects.items():

                    text = "ID {}".format(objectID)
                    annotator.text([centroid[0], centroid[1]], text)

                elapsed_ms = (time.monotonic() - start_time) * 1000
                annotator.text([5, 0], '%.1f ms' % (elapsed_ms))
                frame_rate = 1 / ((time.monotonic() - start_time))
                #annotator.text([5, 15], '%.1f FPS' % (frame_rate))
                #print('%.1f FPS' % (frame_rate))
                #annotator.update()

                #calculate average FPS
                instantaneous_frame_rates.append(frame_rate)
                avg_frame_rate = sum(instantaneous_frame_rates) / len(
                    instantaneous_frame_rates)
                print("FPS: " + str(avg_frame_rate))
                annotator.text([5, 15], '%.1f FPS' % (avg_frame_rate))

                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
예제 #20
0
    def test(self):
        # this method assigns a random array as a test array (right now, only alternating pixels are random)
        # and checks whether the program correctly reads it
        test_array = np.array(np.zeros(64, 128), dtype=int)
        for i in range(0, 64, 2):
            for j in range(0, 128, 2):
                test_array[i][j] = int(np.random.randint(0, 2, size=1))

        RST = 24
        # 128x64 display with hardware I2C:
        disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)

        # Initialize library.
        disp.begin()

        # Clear display.
        disp.clear()
        disp.display()

        # Create blank image for drawing.
        # Make sure to create image with mode '1' for 1-bit color.
        self.width = disp.width
        self.height = disp.height
        image_1 = Image.new('1', (self.width, self.height))

        # Get drawing object to draw on image.
        draw = ImageDraw.Draw(image_1)

        # Draw a black filled box to clear the image.
        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        for i in range(0, self.height):
            for j in range(0, self.width):
                if int(test_array[i][j]) == 1:
                    draw.point([(j, i)], fill=255)  # x,y

        disp.image(image_1)
        disp.display()

        # Now get the pixel data

        camera = picamera.PiCamera()
        camera.resolution = (2592, 1944)
        camera.start_preview()
        camera.led = False
        time.sleep(2)
        camera.capture('test.jpg')
        camera.stop_preview()
        image = 'test.jpg'
        self.crop(image, (1020, 620, 1800, 1050), 'test_crop.jpg')
        image = 'test_crop.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        pixels2 = []
        for pixel in pixels:
            total = 0
            for x in pixel:
                total += x
            total = int(total / 3)
            pixels2.append((total, total, total))

        filtered_list = self.filter(120, pixels2)
        img = Image.new('RGB', Image.open('test_crop.jpg').size)
        img.putdata(filtered_list)
        img.save('test_filter.jpg')
        img = cv2.imread('test_filter.jpg')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)

        result_array = np.array(np.zeros(64 , 128), dtype = int)

        for i in range(64):
            for j in range(128):
                if gray[int(self.all_points_array[i][j][0])][int(self.all_points_array[i][j][1])] == 0:
                    result_array[i][j] = 0
                else:
                    result_array[i][j] = 1

        errors_list = []
        errors = 0
        for i in range(0, 64, 4):
            for j in range(0, 128, 2):
                if (abs(result_array[i][j] - test_array[i][j])) > 0.1:
                    errors += 1
                    errors_list.append((i, j))

        print('errors', errors)
        print(len(errors_list))

        camera.close()
예제 #21
0
def takephoto():
    camera = picamera.PiCamera()
    camera.capture('image.jpg')
예제 #22
0
    def test_1(self, num):
        """ this method randomly chooses as many points as num and makes them bright pixels and
        checks whether the program reads  them correctly
        """
        test_list = []
        for r in range(num):
            test_list.append((int(np.random.randint(0, 64, size=1)), int(np.random.randint(0, 128, size=1))))

        RST = 24
        # 128x64 display with hardware I2C:
        disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)

        # Initialize library.
        disp.begin()

        # Clear display.
        disp.clear()
        disp.display()

        # Create blank image for drawing.
        # Make sure to create image with mode '1' for 1-bit color.
        self.width = disp.width
        self.height = disp.height
        image_1 = Image.new('1', (self.width, self.height))

        # Get drawing object to draw on image.
        draw = ImageDraw.Draw(image_1)

        # Draw a black filled box to clear the image.
        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        for point in test_list:
            draw.point([(point[1], point[0])], fill=255)

        disp.image(image_1)
        disp.display()

        # Now get the pixel data

        camera = picamera.PiCamera()
        camera.resolution = (2592, 1944)
        camera.start_preview()
        camera.led = False
        time.sleep(2)
        camera.capture('test.jpg')
        camera.stop_preview()
        image = 'test.jpg'
        self.crop(image, (1020, 620, 1800, 1050), 'test_crop.jpg')
        image = 'test_crop.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        pixels2 = []
        for pixel in pixels:
            total = 0
            for x in pixel:
                total += x
            total = int(total / 3)
            pixels2.append((total, total, total))

        filtered_list = self.filter(120, pixels2)
        img = Image.new('RGB', Image.open('test_crop.jpg').size)
        img.putdata(filtered_list)
        img.save('test_filter.jpg')
        img = cv2.imread('test_filter.jpg')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)
        result_list = []
        for i in range(64):
            for j in range(128):
                if not (gray[int(self.all_points_array[i][j][0])][int(self.all_points_array[i][j][1])] == 0):
                    result_list.append((i, j))

        print('test_value', test_list,'result_value', result_list)
        camera.close()
예제 #23
0
 def cameraOpen(self):
     if self.camera.closed:
         self.camera = picamera.PiCamera()
         time.sleep(1)
예제 #24
0
    def start(self):
        """ In order to initialise the self.all_points_array that is to find the center coords of all the pixels on the adafruit display """
        # Raspberry Pi pin configuration:
        RST = 24
        # 128x64 display with hardware I2C:
        disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)

        disp.begin()
        disp.clear()
        disp.display()

        self.width = disp.width
        self.height = disp.height
        image_1 = Image.new('1', (self.width, self.height))

        # Get drawing object to draw on image.
        draw = ImageDraw.Draw(image_1)

        # Draw a black filled box to clear the image.
        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        # using only these pixels so that i could later sort as per vertical axis values and then horizontal
        for i in range(0, self.height, 4):
            for j in range(0, self.width, 2):
                draw.point([(j, i)], fill=255)  # x,y

        disp.image(image_1)
        disp.display()

        # Now get the pixel data

        camera = picamera.PiCamera()
        camera.resolution = (2592, 1944)
        camera.start_preview()
        camera.led = False
        time.sleep(2)
        camera.capture('full_multi_point_2.jpg')
        camera.stop_preview()
        image = 'full_multi_point_2.jpg'
        self.crop(image, (1020, 620, 1800, 1050), 'full_multi_crop_2.jpg')
        image = 'full_multi_crop_2.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        pixels2 = []
        for pixel in pixels:
            total = 0
            for x in pixel:
                total += x
            total = int(total / 3)
            pixels2.append((total, total, total))
        # averaging over the RGB spectrum
        filtered_list = self.filter(140, pixels2)
        img = Image.new('RGB', Image.open('full_multi_crop_2.jpg').size)
        img.putdata(filtered_list)
        img.save('full_multi_filter_2.jpg')

        start_time = time.time()

        image = cv2.imread('full_multi_filter_2.jpg')

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)

        new_image = Image.fromarray(gray)
        new_image.save('cv_proc_mult.png')

        labels = measure.label(gray, neighbors=8, background=0)
        mask = np.zeros(gray.shape, dtype="uint8")

        # loop over the unique components
        for label in np.unique(labels):
            # if this is the background label, ignore it
            if label == 0:
                continue
            # otherwise, construct the label mask and count the
            # number of pixels
            labelMask = np.zeros(gray.shape, dtype="uint8")
            labelMask[labels == label] = 255

            # if the number of pixels in the component is sufficiently
            # large, then add it to our mask of "large blobs"
            mask = cv2.add(mask, labelMask)

        # find the contours in the mask, then sort them from left to
        # right

        pixel_point = []
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = contours.sort_contours(cnts)[0]
        # loop over the contours
        for (i, c) in enumerate(cnts):
            # draw the bright spot on the image
            ((cX, cY), radius) = cv2.minEnclosingCircle(c)
            pixel_point.append((int(cY), int(cX)))
            cv2.circle(image, (int(cX), int(cY)), int(radius), (0, 0, 255), 2)

        # show the output image
        new_image = Image.fromarray(image)
        new_image.save('full_cv_proc_mult_2.jpg')

        # sorting the list as per vertical coordinate in order to get the rows
        pixel_point_sort = sorted(pixel_point, key=lambda y: y[0])
        new_pixel_point = []
        for i in range(16):
            new_pixel_point.append(sorted(pixel_point_sort[64 * i:64 * (i + 1)], key=lambda z: z[1]))
        # sorting all the rows as per the horizontal values
        new_pixel_array = np.array([y for x in new_pixel_point for y in x])
        new_pixel_array.shape = (16, 64, 2)

        # interpolating over the smaller regions
        self.all_points_array = np.array(np.zeros(64 * 128 * 2))
        self.all_points_array.shape = (64, 128, 2)
        for i in range(0, 16):
            for j in range(0, 64):
                self.all_points_array[60 - 4 * i][126 - 2 * j][0] = new_pixel_array[i][j][0]
                self.all_points_array[60 - 4 * i][126 - 2 * j][1] = new_pixel_array[i][j][1]

        for i in range(0, 61, 4):
            for j in range(1, 127, 2):
                self.all_points_array[i][j][0] = int((self.all_points_array[i][j - 1][0] + self.all_points_array[i][j + 1][0]) / 2)
                self.all_points_array[i][j][1] = int((self.all_points_array[i][j - 1][1] + self.all_points_array[i][j + 1][1]) / 2)

        for i in range(61):
            for j in range(127):
                if i % 4 == 1:
                    self.all_points_array[i][j][0] = int(
                        (3 * self.all_points_array[i - 1][j][0] + self.all_points_array[i + 3][j][0]) / 4)
                    self.all_points_array[i][j][1] = int(
                        (3 * self.all_points_array[i - 1][j][1] + self.all_points_array[i + 3][j][1]) / 4)
                elif i % 4 == 2:
                    self.all_points_array[i][j][0] = int(
                        (2 * self.all_points_array[i - 2][j][0] + 2 * self.all_points_array[i + 2][j][0]) / 4)
                    self.all_points_array[i][j][1] = int(
                        (2 * self.all_points_array[i - 2][j][1] + 2 * self.all_points_array[i + 2][j][1]) / 4)
                elif i % 4 == 3:
                    self.all_points_array[i][j][0] = int(
                        (self.all_points_array[i - 3][j][0] + 3 * self.all_points_array[i + 1][j][0]) / 4)
                    self.all_points_array[i][j][1] = int(
                        (self.all_points_array[i - 3][j][1] + 3 * self.all_points_array[i + 1][j][1]) / 4)

        # Draw a black filled box to clear the image.
        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        # now to find the coords of the pixels on the last three rows by lighting up the
        # alternate points on the last row
        i = 63
        for j in range(0, self.width, 2):
            draw.point([(j, i)], fill=255)  # x,y

        disp.image(image_1)
        disp.display()

        camera.start_preview()
        camera.led = False
        time.sleep(2)
        camera.capture('last_row_disp.jpg')
        camera.stop_preview()

        image = 'last_row_disp.jpg'
        self.crop(image, (1020, 620, 1800, 1050), 'last_row_crop.jpg')

        image = 'last_row_crop.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        pixels2 = []
        for pixel in pixels:
            total = 0
            for x in pixel:
                total += x
            total = int(total / 3)
            pixels2.append((total, total, total))

        filtered_list = self.filter(140, pixels2)
        img = Image.new('RGB', Image.open(image).size)
        img.putdata(filtered_list)
        img.save('last_row_filter.jpg')

        image = cv2.imread('last_row_filter.jpg')

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)

        labels = measure.label(gray, neighbors=8, background=0)
        mask = np.zeros(gray.shape, dtype="uint8")

        # loop over the unique components
        for label in np.unique(labels):
            # if this is the background label, ignore it
            if label == 0:
                continue

            # otherwise, construct the label mask and count the
            # number of pixels
            labelMask = np.zeros(gray.shape, dtype="uint8")
            labelMask[labels == label] = 255

            # if the number of pixels in the component is sufficiently
            # large, then add it to our mask of "large blobs"
            mask = cv2.add(mask, labelMask)

        # find the contours in the mask, then sort them from left to
        # right

        pixel_point = []
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = contours.sort_contours(cnts)[0]
        # loop over the contours
        for (i, c) in enumerate(cnts):
            # draw the bright spot on the image
            ((cX, cY), radius) = cv2.minEnclosingCircle(c)
            pixel_point.append((int(cY), int(cX)))
            cv2.circle(image, (int(cX), int(cY)), int(radius), (0, 0, 255), 2)

        # show the output image
        pixel_point_sort = sorted(pixel_point, key=lambda z: z[1])

        # missed the filling in the middle parts between the alternate ones code - fixed it in later programs
        for j in range(64):
            self.all_points_array[63][126 - 2 * j][0] = pixel_point_sort[j][0]
            self.all_points_array[63][126 - 2 * j][1] = pixel_point_sort[j][1]
            self.all_points_array[62][126 - 2 * j][0] = (2*pixel_point_sort[j][0] + self.all_points_array[60][126 - 2 * j][0])/3
            self.all_points_array[62][126 - 2 * j][1] = (2*pixel_point_sort[j][1] + self.all_points_array[60][126 - 2 * j][1])/3
            self.all_points_array[61][126 - 2 * j][0] = (pixel_point_sort[j][0] + 2*self.all_points_array[60][126 - 2 * j][0])/3
            self.all_points_array[61][126 - 2 * j][1] = (pixel_point_sort[j][1] + 2*self.all_points_array[60][126 - 2 * j][1])/3

        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        j = 127
        for i in range(0, self.width, 3):
            draw.point([(j, i)], fill=255)  # x,y

        disp.image(image_1)
        disp.display()

        # Now get the pixel data

        camera.start_preview()
        time.sleep(2)
        camera.capture('last_col_disp.jpg')
        camera.stop_preview()
        image = 'last_col_disp.jpg'
        self.crop(image, (1020, 620, 1800, 1050), 'last_col_crop.jpg')
        image = 'last_col_crop.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        pixels2 = []
        for pixel in pixels:
            total = 0
            for x in pixel:
                total += x
            total = int(total / 3)
            pixels2.append((total, total, total))

        filtered_list = self.filter(140, pixels2)
        img = Image.new('RGB', Image.open(image).size)
        img.putdata(filtered_list)
        img.save('last_col_filter.jpg')

        image = cv2.imread('last_col_filter.jpg')

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)

        labels = measure.label(gray, neighbors=8, background=0)
        mask = np.zeros(gray.shape, dtype="uint8")

        # loop over the unique components
        for label in np.unique(labels):
            # if this is the background label, ignore it
            if label == 0:
                continue

            # otherwise, construct the label mask and count the
            # number of pixels
            labelMask = np.zeros(gray.shape, dtype="uint8")
            labelMask[labels == label] = 255

            # if the number of pixels in the component is sufficiently
            # large, then add it to our mask of "large blobs"
            mask = cv2.add(mask, labelMask)

        # find the contours in the mask, then sort them from left to
        # right

        pixel_point = []
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = contours.sort_contours(cnts)[0]
        # loop over the contours
        for (i, c) in enumerate(cnts):
            # draw the bright spot on the image
            ((cX, cY), radius) = cv2.minEnclosingCircle(c)
            pixel_point.append((int(cY), int(cX)))
            cv2.circle(image, (int(cX), int(cY)), int(radius), (0, 0, 255), 2)

        # show the output image

        pixel_point_sort = sorted(pixel_point, key=lambda z: z[0])
        for j in range(64):
            if j % 3 == 0:
                self.all_points_array[j][127][0] = pixel_point_sort[int(21 - j/3)][0]
                self.all_points_array[j][127][1] = pixel_point_sort[int(21 - j/3)][1]
            elif j % 3 == 1:
                self.all_points_array[j][127][0] = (2*pixel_point_sort[21 - int((j-1)/3)][0] + pixel_point_sort[21 - int((j+2)/3)][0])/3
                self.all_points_array[j][127][1] = (2*pixel_point_sort[21 - int((j-1)/3)][0] + pixel_point_sort[21 - int((j+2)/3)][1])/3
            elif j % 3 == 2:
                self.all_points_array[j][127][0] = (2*pixel_point_sort[21 - int((j+1)/3)][0] + pixel_point_sort[21 - int((j-2)/3)][0])/3
                self.all_points_array[j][127][1] = (2*pixel_point_sort[21 - int((j+1)/3)][0] + pixel_point_sort[21 - int((j-2)/3)][1])/3

        print(time.time() - start_time)
        camera.close()
예제 #25
0
        camera.close()

        exit(0)


def run():
    os.system(f"flite -voice rms -t {'System ready'.__repr__()}")
    signal.pause()


# Load options based on argparser
args = init_parser()

# Set up PiCamera globally so main() doesn't create an instance each time
camera = picamera.PiCamera(resolution=(args.width, args.height), framerate=90)

# Set up pins
start_button = Button(args.start)
stop_button = Button(args.stop)
end_button = Button(args.kill)

start_button.when_pressed = main
stop_button.when_pressed = stop
end_button.when_pressed = kill_process

# Log GPS Coordinates
logger = Logger()

print("System is ready.")
예제 #26
0
    def read_with_led(self, num):
        """ randomly selects as many even numbered points as num and checks whether the program can read the points
        when the led is on """
        led = LED(21)
        camera = picamera.PiCamera()
        camera.resolution = (2592, 1944)
        camera.start_preview()
        led.on()
        test_list = []

        for r in range(num):
            test_list.append((int(np.random.randint(0, 64, size=1)/2)*2, int(np.random.randint(0, 128, size=1)/2)*2))

        RST = 24
        # 128x64 display with hardware I2C:
        disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)

        # Initialize library.
        disp.begin()

        # Clear display.
        disp.clear()
        disp.display()

        # Create blank image for drawing.
        # Make sure to create image with mode '1' for 1-bit color.
        self.width = disp.width
        self.height = disp.height
        image_1 = Image.new('1', (self.width, self.height))

        # Get drawing object to draw on image.
        draw = ImageDraw.Draw(image_1)

        # Draw a black filled box to clear the image.
        draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)

        for point in test_list:
            draw.point([(point[1], point[0])], fill=255)

        disp.image(image_1)
        disp.display()

        # Now get the pixel data
        image = 'led.jpg'
        camera.capture(image)
        camera.stop_preview()

        self.crop(image, (1020, 620, 1800, 1050), 'led_crop.jpg')
        image = 'led_crop.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())
        red = []
        for point in pixels:
            red.append((point[0], point[0], point[0]))
        img = Image.new('RGB', Image.open('led_crop.jpg').size)
        img.putdata(red)
        img.save('led_crop_red.jpg')
        # led_crop_red is there in order to find whether the signal is 1 or 0
        blue = []
        for point in pixels:
            blue.append((point[2], point[2], point[2]))
        img = Image.new('RGB', Image.open('led_crop.jpg').size)
        img.putdata(blue)
        img.save('led_crop_blue.jpg')
        # using blue for reading the array
        image = 'led_crop_blue.jpg'
        img = Image.open(image)
        pixels = list(img.getdata())

        filtered_list = self.filter(120, pixels)
        img = Image.new('RGB', Image.open(image).size)
        img.putdata(filtered_list)
        img.save('led_blue_filter.jpg')
        img = cv2.imread('led_blue_filter.jpg')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.erode(gray, None, iterations=1)
        result_list = []
        for i in range(64):
            for j in range(128):
                if not (gray[int(self.all_points_array[i][j][0])][int(self.all_points_array[i][j][1])] == 0):
                    result_list.append((i, j))

        print('test_value', test_list,'result_value', result_list)
        camera.close()
예제 #27
0
def logGPSdata(fullpath,subdir,csvfilename,ndx,prev_loc,f,dtraveled,debug):
  lat = 0.0
  lon = 0.0
  satfix = 0
  sats = 0

  # We must create a new gpsd object each time we call logGPSdata, to flush the buffer and get latest data
  gpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)

  # Keep looping until we get valid latitude, longitude, and satellite values from gpsd
  noSats = noCoords = True
  while noSats or noCoords:
    report = gpsd.next()
    if report['class'] == 'SKY' and noSats: 
      sats = len(report['satellites']) # update num of satellites in view
      noSats = False
    elif report['class'] == 'TPV' and noCoords:
      (lat,lon,satfix) = latlonfix(report)
      if lon and lat: # Yeah, this won't work if standing exactly on the prime meridian or the equator
        saveReport = report
        noCoords = False
      elif not satfix: time.sleep(0.5) # We don't have a satellite fix so slow our roll
 
  # Define additional variables
  utc = datetime.strptime(str(getattr(saveReport,'time','')), '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=timezone('UTC'))
  central = utc.astimezone(timezone('US/Central'))
  date_str = central.strftime("%b %d %Y")
  time_str = central.strftime("%I:%M:%S%p %Z")
  lat_ref = 'S' if lat < 0 else 'N'
  lon_ref = 'W' if lon < 0 else 'E' 
  speed_mps =  strtofloat(getattr(saveReport,'speed','0.0'))
  speed_mph =  round(speed_mps*2.23694,1)
  alt_meters = strtofloat(getattr(saveReport,'alt','0.0'))
  alt_feet = round(alt_meters*3.28084,1)
  cur_loc = (lat, lon)
  temp = read_temp() # temp is an array containing both fahrenheit and celcius values
  temp_f = round(temp[1],1)

  # Only log a data point if we've traveled more than X feet
  if distance.distance(prev_loc, cur_loc).feet > dtraveled:
    ndx += 1
    picname = subdir + '-' + str(ndx) + '.jpg'

    # print some output to the screen if debug is on
    if debug:
      print  date_str,"\t",
      print  time_str,"\t",
      print  lat,"\t",
      print  lon,"\t",
      print  speed_mph,"\t",
      print  alt_feet,"\t",
      print  temp_f,"\t",
      print  sats,"\t",
      print  picname,"\t"

    with open(fullpath + '/' + csvfilename,'a') as f:
      f.write('%s,%s,%s,%s,%s,%s,%s,%s,%s' % (date_str, time_str, lat, lon, speed_mph, alt_feet, temp_f, sats, picname))
      
    # Fire up the Pi Camera then take a picture!
    with picamera.PiCamera() as camera:
      camera.resolution = (3280, 2464)
      camera.rotation = 270
      camera.start_preview()
      # Camera warm-up time
      time.sleep(2)

      # Apply GPS Exif tags
      camera.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/100' % dec2dms(lat)
      camera.exif_tags['GPS.GPSLatitudeRef'] = lat_ref
      camera.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/100' % dec2dms(lon) 
      camera.exif_tags['GPS.GPSLongitudeRef'] = lon_ref
      camera.exif_tags['GPS.GPSAltitude'] = '%d/100' % int(100 * alt_meters)
      camera.exif_tags['GPS.GPSAltitudeRef'] = '0'
      camera.exif_tags['GPS.GPSSpeed'] = '%d/1000' % int(1000 * speed_mps)
      camera.exif_tags['GPS.GPSSpeedRef'] = 'M'
      camera.exif_tags['GPS.GPSSatellites'] = str(sats)
      camera.exif_tags['GPS.GPSTimeStamp'] = '%s/1,%s/1,%s/1' % (utc.strftime('%H'),utc.strftime('%M'),utc.strftime('%S'))
      camera.exif_tags['GPS.GPSDateStamp'] = utc.strftime('%Y:%m:%d')

      camera.capture(fullpath + '/' + picname )

  return (cur_loc,ndx)
예제 #28
0
        elif arg in ['-s', '--save-file']:
            if i + 1 >= len(arguments):
                print('No save file has been given.')
                print('Using the default one : ', save_folder)
            else:
                save_folder = arguments[i + 1]
            i += 1
        i += 1

    if init_imu:
        bno = initialize_imu(5)

    if init_motor:
        initialize_motor(xacc_threshold)

    # cam setup
    if controls != 'autopilot':
        print('Setting up the pi camera')
        cam = picamera.PiCamera(framerate=60)
        cam.resolution = (250, 150)
        cam_output = picamera.array.PiRGBArray(cam, size=(250, 150))
        stream = cam.capture_continuous(cam_output,
                                        format="rgb",
                                        use_video_port=True)
        print('Pi camera set up')

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    main()
# протестировать автоматическое подключение
# tcp/h264://192.168.1.70:8000/ ссылка для трансляции в VLC

import socket
import time
import picamera

camera = picamera.PiCamera()
camera.resolution = (1280, 720)
camera.framerate = 6

server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)

while True:
    conn = server_socket.accept()[0]
    print('conn')

    while True:
        try:
            camera.stop_recording()
            print('STOP')
            connection.close()
            print('CLOSE')
        except Exception:
            try:
                with conn.makefile('wb') as connection:
                    camera.start_recording(connection, format='h264')
                    print('start')
                    camera.wait_recording(60)
예제 #30
0
 def capture_frame():
     cmd = 'date +"%Y-%m-%d_%H_%M"'
     date = commands.getoutput(cmd)
     with picamera.PiCamera() as cam:
         time.sleep(2)
         cam.capture(img_base + date + '.jpg')