Esempio n. 1
0
def snapshot():
    cam = Camera()
    photo = cam.get_frame()
    file = open('test.jpg', 'wb+')
    file.write(photo)
    file.close()
    return render_template('index.html')
Esempio n. 2
0
def gamepad_data():
    data = request.get_json()
    bottons = data['bottons']
    axis = data['axes']
    bot_move = axis[:2]
    bot_move[1] = -bot_move[1]
    camera_move = axis[2:]
    b, a, y, x = bottons[:4]
    u = bot_move[0] + bot_move[1]
    v = -bot_move[0] + bot_move[1]
    u /= 2**0.5
    v /= 2**0.5
    print(axis)
    if abs(u) < 0.05: u = 0
    if abs(v) < 0.05: v = 0
    if abs(axis[2]) < 0.05: axis[2] = 0
    if abs(axis[3]) < 0.05: axis[3] = 0
    bot.axis = [u, v, axis[2], -axis[3]]
    bot.event.set()
    global last
    msg = ""
    if a and time.time() - last > 1:
        last = time.time()
        Camera().shoot()
        msg = "Shoot"
    global record
    if x and not record:
        msg = "Start Record"
        record = True
        Camera().start_record()
    elif y and record:
        msg = "End Record"
        Camera().end_record()
        record = False
    return Response(msg)
Esempio n. 3
0
def change_stream_method(option=''):
    # DEBUG: why do I need the global Camera?
    global Camera
    # when not specify option, use the request
    if option is '':
        new_stream_method = request.args.get('stream_method', 'PiCamera')
        option = new_stream_method

    if option == 'OpenCV':
        print('Change the stream method to OpenCV')
        # only change the stream method if the current one is not right
        if Camera.stream_method == 'PiCamera':
            Camera.stop_stream()
            from camera_pi_cv import Camera
            Camera.stream_method = 'OpenCV'
            Camera.start_stream()
            time.sleep(0.1)
    
    elif option == 'PiCamera':
        print('Change the stream method to Picamera')
        # only change the stream method if the current one is not right
        if Camera.stream_method == 'OpenCV':
            Camera.stop_stream()
            from camera_pi import Camera
            Camera.stream_method = 'PiCamera'
            Camera.start_stream()
            time.sleep(0.1)
Esempio n. 4
0
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""

    return Response(
        gen(Camera(
        )),  #displays the current frame of the camera to the /video_feed dir
        mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 5
0
def video_feed():
    if "user" in session and escape(session["user"]) == "stan":
        return Response(gen(Camera()),
                        mimetype='multipart/x-mixed-replace; boundary=frame')
    else:
        session["user"] = "******"
        return render_template("login.html")
def autopilot_loop():
    frame = decodeImage(Camera().get_frame())
    if frame.any():
        now = time.time()

        telemetry_data = dict()
        telemetry_data["accel_val_auto"] = 0.0
        telemetry_data["steering_angle_auto"] = 0.0
        telemetry_data["speed"] = 0.0
        try:
            telemetry_data["accel_val_auto"] = float(prefs.get_pref("accel_val_auto"))
        except:
            prefs.set_pref("accel_val_auto", 0.0)
        try:
            telemetry_data["steering_angle_auto"] = float(prefs.get_pref("steering_angle_auto"))
        except:
            prefs.set_pref("steering_angle_auto", 0.0)
        try:
            telemetry_data["speed"] = float(prefs.get_pref("speed"))
        except:
            prefs.set_pref("speed", 0.0)

        #telemetry_data["speed"] = float(abs(y))
        
        #log("accel_val", round(telemetry_data["accel_val_auto"], 3), "steering_angle", round(telemetry_data["steering_angle_auto"], 3), "[AUTOPILOT]")

        accel_val, steering_angle = telemetry(telemetry_data, frame)

        steering_angle = 0

        prefs.set_pref("accel_val_auto", accel_val)
        prefs.set_pref("steering_angle_auto", steering_angle)
Esempio n. 7
0
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    w = request.args.get('w', 320, int)
    h = request.args.get('h', 240, int)
    fps = request.args.get('fps', 10, int)
    delay = request.args.get('delay', 5, int)
    return Response(gen(Camera(w, h, fps, delay)),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 8
0
def shoot():
    global last
    if time.time() - last > 1:
        last = time.time()
        Camera().shoot()
        return "Complete"
    else:
        return "too fast"
Esempio n. 9
0
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


# if __name__ == '__main__':
#     app.run(host='0.0.0.0', port=8000, debug=True, threaded=True)
Esempio n. 10
0
def video_feed():
    # TODO: JT writes: I could really do with a comment explaining how this is working.
    # The 'yield' stuff elsewhere could do with commenting, but I understand it.
    # I don't understand what is going on here. gen() looks like it behaves as an iterator;
    # what does Response do, is it somehow permanently streaming messages for each frame, or what...?
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(Camera()),
                    mimetype="multipart/x-mixed-replace; boundary=frame")
Esempio n. 11
0
def gen(Camera):
    """Video streaming generator function."""
    while True:
        frame = Camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
        color_image = cv2.imdecode(np.fromstring(frame, dtype=np.uint8),
                                   cv2.IMREAD_COLOR)
        roi = color_image[120:240, :]
        image_array = roi.reshape(1, 115200).astype(np.float32)
def video_feed():
    conn = sqlite3.connect(DB_NAME)
    curr = conn.cursor()
    curr.execute("SELECT status FROM armstatus ORDER BY timestmp DESC LIMIT 1")
    active = curr.fetchall()[0][0]
    print(active)
    if active == 0:
        return Response(gen(Camera()),
                        mimetype='multipart/x-mixed-replace; boundary=frame')
    else:
        return "ERROR", 500
Esempio n. 13
0
def auto_focus():
    # swap to opencv and then start the auto focusing
    # NOTE: Do we need to use the URL rather than calling the function directly
    command = request.args.get('command', '')
    try:
        Camera.auto_focus_status
    except AttributeError:
        Camera.auto_focus_status = 'Waiting for auto focusing'

    if command == 'start':
        change_stream_method(option='OpenCV')
        initialise_serial_connection()
        # start auto focusing
        Camera.start_auto_focus_thread()
        Camera.auto_focus_status = 'auto focusing...'
        return render_template('index.html') 
    elif command == 'done':
        Camera.auto_focus_status = 'auto focus completed'
        return render_template('index.html')
    else:
        return jsonify({'auto_focus_status': Camera.auto_focus_status})
Esempio n. 14
0
def settings_io():
    ''' swap between opencv and picamera for streaming'''
    # set default value for the stream_method
    try:
        Camera.stream_method
    except AttributeError:
        Camera.stream_method = 'PiCamera'

    zoom_value = request.args.get('zoom_value', '')
    config_update = request.args.get('config_update', '')
    stop_flag = request.args.get('stop', '')

    if zoom_value is not '':
        # only change zoom when passing an arg
        Camera.change_zoom(zoom_value)
    if config_update == 'true':
        Camera.update_camera_setting()
    if stop_flag == 'true':
        Camera.stop_stream()

    with open('config_picamera.yaml') as config_file:
        config = yaml.load(config_file)
        default_LED_RGB = config['default_LED_RGB']
        print(default_LED_RGB)

    settings = {
        'stream_method': Camera.stream_method, 
        'available_arduino_boards': Arduinos.available_arduino_boards,
        'default_LED_RGB': default_LED_RGB
        }

    return jsonify(settings)
Esempio n. 15
0
def parse_filename_and_acquire_data(filename, method):
    # the filename is consist of user defined value and the time stamp
    # arduino_time is the seconds from the booting up 
    # raspberry_time is the absolute time
    # synchronise the arduino_time
    if 'arduino_time' in filename:
        # HH:MM:SS format
        time_value_formatted, temp_value = parse_serial_time_temp()
        # allowing other appendix
        filename = str(time_value_formatted.time()) + '_T{}'.format(temp_value) + filename.replace('arduino_time', '')
    # synchronise the raspberry pi time
    # to set pi's time: sudo date -s '2017-02-05 15:30:00'
    elif 'raspberry_pi_time' in filename:
        time_value_formatted, temp_value = parse_serial_time_temp()
        # DEBUG: alfred says this is a problem
        now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        # allowing other appendix
    filename = now + '_T{}'.format(temp_value) + filename.replace('raspberry_pi_time', '')

    if method == 'normal':
        Camera.take_image(resolution='normal', filename=filename)
    elif method == 'high_res':
        Camera.take_image(resolution='high_res', filename=filename)
    elif method == 'waterscope':
        # waterscope method requires LED to be only on when taking images
        Arduinos.serial_controllers['waterscope'].serial_write('led_on', parser='waterscope')
        time.sleep(2)
        Camera.take_image(resolution='high_res', filename=filename)
        time.sleep(2)
        Arduinos.serial_controllers['waterscope'].serial_write('led_off', parser='waterscope')
Esempio n. 16
0
def path_finding():
    global map, last_card_read, motor, heading, card_detected
    dist_rec = []
    direc_rec = []
    camera = Camera('auto')
    node = map.find_node(map.path['header']['start'])
    while node.next_node_id:
        dist_rec.append(node.get_edge_len(node.next_node_id))
        if node.prev_node_id:
            direc_rec.append(node.get_direction())
            print(node.id)
        node = map.find_node(map.path[node.id]['next'])

    update_heading()
    current_node_idx = 0
    while True:
        frame, cmd = camera.get_frame()
        if mode == 'auto' and card_detected:
            print('card detected')
            if current_node_idx == len(direc_rec):
                print('complete')
                return
            motor.stop()
            time.sleep(0.1)
            update_heading()
            goal_ang = heading + direc_rec[current_node_idx]
            if goal_ang > 360:
                goal_ang -= 360
            elif goal_ang < 0:
                goal_ang += 360
            print('goal ang:', goal_ang)
            heading = goal_ang
            card_detected = False
            current_node_idx += 1
            cmd = 'l'
            last_card_read = datetime.now()
        elif mode == 'auto' or mode == 'debug':
            cmd = convert_cmd(cmd)
        if mode == 'auto':
            cmd_to_motor(cmd)
Esempio n. 17
0
def acquire_data():
    # the filename is consist of user defined value and the time stamp           
    filename = request.args.get('filename', '')

    # the option determines whether it is the video recording, high_resolution image, or timelapse
    option = request.args.get('option', '')
    # Note: args: image capture - option='normal' or 'high_res'
    if option == '' or option == 'normal':
        parse_filename_and_acquire_data(filename, 'normal')
    elif option == 'high_res':
        parse_filename_and_acquire_data(filename, 'high_res')

    elif 'timelapse_' in option:
        # this method allows the timelapse to be taken when the browser is closed, but the terminal needs to be open
        # NOTE: args: high_res_timelapse_10, normal_timelapse_10, waterscope_timelapse_10
        # parse the timelapse_interval and method from the option arg 
        print(option)
        timelapse_interval = int(option.split('timelapse_')[1])
        # the method "waterscope" or "normal" is string before _timelapse
        method = option.split('_timelapse_')[0]

        # NOTE: a flag that help to terminate the threading later
        Camera.stop_timelapse = False
        timelapse_thread = threading.Thread(target=take_timelapse, args=[timelapse_interval, method])
        # DEBUG: Check whether the daemon is needed
        timelapse_thread.daemon = True
        timelapse_thread.start()
    
    elif option == 'stop_timelapse':
        # a flag that will terminate all the timelapse
        Camera.stop_timelapse = True

    # video capture
    elif option == 'start_recording_video':
        Camera.video_recording_thread(filename=filename, recording_flag=True)
    elif option == 'stop_recording_video':
        Camera.video_recording_thread(recording_flag=False)

    return render_template('index.html')
Esempio n. 18
0
def stop_streams():
    """stop showing the camera streams"""
    global cam
    if cam is None:
        cam = Camera(app.config)
    cam.stop_preview_stream()
    cam.stop_live_stream()
    return render_template("redirect.html", redirect_to=url_for("home"))
def switch_camera( cam_new ):
#  cam_new = cam.cam_data['camera']
  global cam
  global cam_selected
  oldcam = cam;
  
  if cam_new != cam_selected:
    if ( cam_new == 'none' ):
      cam = CameraBase( exit_event, socketio )
    elif ( cam_new == 'rpi' ):
      cam = Camera( exit_event, socketio )
    
    oldcam.close()
  
  cam_selected = cam_new
Esempio n. 20
0
class CountThread(Thread):
    """Stream data on thread"""
    def __init__(self):
        self.delay = 1 / 30
        super(CountThread, self).__init__()
        self.cam = Camera()

    def get_data(self):
        """
		Get data and emit to socket
		"""
        while True:
            image = self.cam.get_frame()
            socketio.emit('imageSend', {'buffer': image}, namespace="/client")
            sleep(self.delay)

    def run(self):
        """Default run method"""
        self.get_data()
Esempio n. 21
0
    secs_rem = 2
    tmr = None
    requestId = None
    apiEndpoint = None
    apiAccessToken = None
    image = None


# import camera driver
##if os.environ.get('CAMERA'):
#    Camera = import_module('camera_' + os.environ['CAMERA']).Camera
#else:
#    from camera import Camera

from camera_pi import Camera
camera = Camera()

image_file = 'image_file.png'

# this sound is not guaranteed to be available.  replace sound.  use ffmpeg to translate to Alexa requirements
camera_sound_url = 'https://videotalker.blob.core.windows.net/videoblob/camera_sound_conv.mp3'

# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera

app = Flask(__name__)
ask = Ask(app, "/")

log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
Esempio n. 22
0
def gen(Camera):
    """Video streaming generator function."""
    while True:
        frame = Camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
Esempio n. 23
0
from flask import Flask, render_template, Response

from labthings.server.quick import create_app
from labthings.server.find import find_component
from labthings.server.view import PropertyView

# import camera driver
from camera_pi import Camera
from views import MjpegStream, SnapshotStream

# Set root logger level
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

print("Creating Lab Thing...")
app, labthing = create_app(__name__,
                           title="Pi Camera",
                           description="Thing for Pi Camers",
                           types=["org.raspberrypi.camera"],
                           version="0.1.0")

labthing.add_component(Camera(), "org.raspberrypi.camera")

labthing.add_view(MjpegStream, "/mjpeg")
labthing.add_view(SnapshotStream, "/still")

if __name__ == "__main__":
    print("Starting server...")
    from labthings.server.wsgi import Server
    Server(app).run(host="::", port=5000, debug=False, zeroconf=True)
Esempio n. 24
0
def image():
    """Returns a single current image for the webcam"""
    return Response(gen2(Camera()), mimetype='image/jpeg')
Esempio n. 25
0
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 26
0
from flask import Flask, request, render_template, redirect, url_for, Response, current_app
from camera_pi import Camera
from multiprocessing import Process
import time
from threading import Thread
import sys

app = Flask(__name__)

timeOut = 1200
accessGranted = False
startTime = time.time();
killall = False
camera = Camera()
userInputReceived = False
server = None

def run_server():
	app.run(host = '0.0.0.0',use_reloader = False, debug = False)

def accessTimer():
	global timeOut
	global accessGranted
	global startTime
	global killall
	print "Access Timer Started"
	while(not(killall)):
		time.sleep(0.5)
		elapsedTime = time.time()-startTime
		print 'Next Iteration, killall',killall, elapsedTime, accessGranted
		if (elapsedTime>timeOut) and accessGranted:
Esempio n. 27
0
def index(cmd=None):
    """Video streaming home page."""
    result = ''
    camera = Camera()
    if cmd == 'image':
        frame = camera.get_frame()
        conn = http.client.HTTPSConnection(
            'eastasia.api.cognitive.microsoft.com')
        conn.request('POST', "/vision/v1.0/analyze?%s" % analyze_params, frame,
                     cv_headers)
        response = conn.getresponse()
        data = response.read()
        conn.close()
        dec_data = json.loads(data.decode('utf-8'))
        result_list = []
        caption = dec_data['description']['captions'][0]['text']
        result_list.append(caption)
        categories = dec_data['categories'] if 'categories' in dec_data else []
        c_detail = {}
        l_detail = {}
        for cat in categories:
            if cat['name'] == 'people_':
                c_detail = cat['detail'] if 'detail' in cat else {}
            elif cat['name'] == 'outdoor_' or cat['name'] == 'building_':
                l_detail = cat['detail'] if 'detail' in cat else {}
        if c_detail:
            celebrities = []
            for cel in c_detail['celebrities']:
                celebrities.append(cel['name'])
            if celebrities:
                result_list.append(' '.join(celebrities))
        elif l_detail:
            landmarks = []
            for lan in l_detail['landmarks']:
                landmarks.append(lan['name'])
            if landmarks:
                result_list.append(' '.join(landmarks))

        # result = "{}".format(dec_data['description']['captions'][0]['text'])
        result = '\n'.join(result_list)
    elif cmd == 'word':
        frame = camera.get_frame()
        conn = http.client.HTTPSConnection(
            'eastasia.api.cognitive.microsoft.com')
        conn.request('POST', "/vision/v1.0/ocr?%s" % ocr_params, frame,
                     cv_headers)
        response = conn.getresponse()
        data = response.read()
        conn.close()
        dec_data = json.loads(data.decode('utf-8'))
        words_list = []
        for big_box in dec_data['regions']:
            for small_box in big_box['lines']:
                tmp = []
                for words in small_box['words']:
                    tmp.append(words['text'])
                words_list.append(' '.join(tmp))
        result = '\n'.join(words_list) if len(
            words_list) != 0 else 'There are no words in the image.'
        tl_params = urllib.parse.urlencode({
            # Request parameters
            'text': result,
            'to': 'zh',
        })
        conn = http.client.HTTPConnection('api.microsofttranslator.com')
        conn.request('GET',
                     "/V2/Http.svc/Translate?%s" % tl_params,
                     headers=tl_headers)
        response = conn.getresponse()
        tl_data = response.read()
        conn.close()
        tl_data = tl_data.replace(
            b'<string xmlns="http://schemas.microsoft.com/2003/10/Serialization/">',
            b'')
        tl_data = tl_data.replace(b'</string>', b'')
        dec_tl_data = tl_data.decode('utf-8')
        result = dec_tl_data
    return render_template('index.html', result=result)
Esempio n. 28
0
def snapshot():
    Camera.StopPreview()
    print("I AM GOING TO TAKE A PICTURE BE WARNED")
    snapshot = pfun.picture()
    return jsonify({'snapshot': snapshot })
Esempio n. 29
0
def index():
    """Video streaming home page."""
    Camera.start_stream()
    initialise_serial_connection()
    return render_template('index.html')
Esempio n. 30
0
def gen(camera):
    """Video streaming generator function."""
    while True:
        frame = camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')


@app.route('/video_feed')
def video_feed():
    """Video streaming route. Put this in the src attribute of an img tag."""
    camera.stream()
    return Response(gen(camera),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


@app.route('/<cmd>')
def cmd(cmd=None):
    try:
        response = eval("controller.{}()".format(cmd))
    except:
        raise
    
    return "{}: success".format(cmd), 200, {'Content-Type': 'text/plain'}

if __name__=='__main__':
    camera = Camera()
    app.run(host='0.0.0.0', threaded=True, port=5000)
    camera.shutdown()
Esempio n. 31
0
def getStatus():
    """Provide some information about this rover."""
    return json.dumps({'camera': Camera.status(), 'rover': rover.status()}), 200
Esempio n. 32
0
def video_feed():
    """This function streams the images on the frame, with the next one overlapping and replacing the former. This is
    achieved by setting mimetype to 'multipart/x-mixed-replace'. The idea is that by replacing the image with another
    so quickly, it'd look like a video."""
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 33
0
def video_feed():
    videoState = request.args.get('videoState')
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
import picamera
import RPi.GPIO as GPIO
from gpiozero import Robot
from distance_sensor import DistanceSensor
LEFT_FRONT_MOTOR = 5
LEFT_REAR_MOTOR = 26
RIGHT_FRONT_MOTOR = 13
RIGHT_REAR_MOTOR = 6
robby = Robot(left=(13,6), right=(5,26))
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LEFT_FRONT_MOTOR, GPIO.OUT)
GPIO.setup(LEFT_REAR_MOTOR, GPIO.OUT)
GPIO.setup(RIGHT_FRONT_MOTOR, GPIO.OUT)
GPIO.setup(RIGHT_REAR_MOTOR, GPIO.OUT)
camera = Camera()
camera.initialize()
frame = camera.take_frame()
distance_sensor = DistanceSensor()
client_socket = socket.socket()
client_socket.connect(('192.168.137.1', 8000))
connection = client_socket.makefile('rwb')

try:
    
    
    while True:
        frame = camera.take_frame()
        size = len(frame)
        distance = distance_sensor.distance()
        connection.write(struct.pack('<L', int(distance)))
Esempio n. 35
0
def video():
    random = request.args.get('random')
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 36
0
def snapshot():
    return Response(snap(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
    "takeSnapshot()"
    "return send_file('snapshots/snapshot.jpg', mimetype='image/jpg')"
Esempio n. 37
0
def video_feed():
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Esempio n. 38
0
def video_stream():
    return Response(generate_frame(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')