def gamepad_data(): data = request.get_json() bottons = data['bottons'] axis = data['axes'] bot_move = axis[:2] bot_move[1] = -bot_move[1] camera_move = axis[2:] b, a, y, x = bottons[:4] u = bot_move[0] + bot_move[1] v = -bot_move[0] + bot_move[1] u /= 2**0.5 v /= 2**0.5 print(axis) if abs(u) < 0.05: u = 0 if abs(v) < 0.05: v = 0 if abs(axis[2]) < 0.05: axis[2] = 0 if abs(axis[3]) < 0.05: axis[3] = 0 bot.axis = [u, v, axis[2], -axis[3]] bot.event.set() global last msg = "" if a and time.time() - last > 1: last = time.time() Camera().shoot() msg = "Shoot" global record if x and not record: msg = "Start Record" record = True Camera().start_record() elif y and record: msg = "End Record" Camera().end_record() record = False return Response(msg)
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response( gen(Camera( )), #displays the current frame of the camera to the /video_feed dir mimetype='multipart/x-mixed-replace; boundary=frame')
def video_feed(): if "user" in session and escape(session["user"]) == "stan": return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') else: session["user"] = "******" return render_template("login.html")
def autopilot_loop(): frame = decodeImage(Camera().get_frame()) if frame.any(): now = time.time() telemetry_data = dict() telemetry_data["accel_val_auto"] = 0.0 telemetry_data["steering_angle_auto"] = 0.0 telemetry_data["speed"] = 0.0 try: telemetry_data["accel_val_auto"] = float(prefs.get_pref("accel_val_auto")) except: prefs.set_pref("accel_val_auto", 0.0) try: telemetry_data["steering_angle_auto"] = float(prefs.get_pref("steering_angle_auto")) except: prefs.set_pref("steering_angle_auto", 0.0) try: telemetry_data["speed"] = float(prefs.get_pref("speed")) except: prefs.set_pref("speed", 0.0) #telemetry_data["speed"] = float(abs(y)) #log("accel_val", round(telemetry_data["accel_val_auto"], 3), "steering_angle", round(telemetry_data["steering_angle_auto"], 3), "[AUTOPILOT]") accel_val, steering_angle = telemetry(telemetry_data, frame) steering_angle = 0 prefs.set_pref("accel_val_auto", accel_val) prefs.set_pref("steering_angle_auto", steering_angle)
def snapshot(): cam = Camera() photo = cam.get_frame() file = open('test.jpg', 'wb+') file.write(photo) file.close() return render_template('index.html')
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" w = request.args.get('w', 320, int) h = request.args.get('h', 240, int) fps = request.args.get('fps', 10, int) delay = request.args.get('delay', 5, int) return Response(gen(Camera(w, h, fps, delay)), mimetype='multipart/x-mixed-replace; boundary=frame')
def video_feed(): # TODO: JT writes: I could really do with a comment explaining how this is working. # The 'yield' stuff elsewhere could do with commenting, but I understand it. # I don't understand what is going on here. gen() looks like it behaves as an iterator; # what does Response do, is it somehow permanently streaming messages for each frame, or what...? """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype="multipart/x-mixed-replace; boundary=frame")
def stop_streams(): """stop showing the camera streams""" global cam if cam is None: cam = Camera(app.config) cam.stop_preview_stream() cam.stop_live_stream() return render_template("redirect.html", redirect_to=url_for("home"))
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') # if __name__ == '__main__': # app.run(host='0.0.0.0', port=8000, debug=True, threaded=True)
def shoot(): global last if time.time() - last > 1: last = time.time() Camera().shoot() return "Complete" else: return "too fast"
def video_feed(): conn = sqlite3.connect(DB_NAME) curr = conn.cursor() curr.execute("SELECT status FROM armstatus ORDER BY timestmp DESC LIMIT 1") active = curr.fetchall()[0][0] print(active) if active == 0: return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') else: return "ERROR", 500
def switch_camera( cam_new ): # cam_new = cam.cam_data['camera'] global cam global cam_selected oldcam = cam; if cam_new != cam_selected: if ( cam_new == 'none' ): cam = CameraBase( exit_event, socketio ) elif ( cam_new == 'rpi' ): cam = Camera( exit_event, socketio ) oldcam.close() cam_selected = cam_new
def path_finding(): global map, last_card_read, motor, heading, card_detected dist_rec = [] direc_rec = [] camera = Camera('auto') node = map.find_node(map.path['header']['start']) while node.next_node_id: dist_rec.append(node.get_edge_len(node.next_node_id)) if node.prev_node_id: direc_rec.append(node.get_direction()) print(node.id) node = map.find_node(map.path[node.id]['next']) update_heading() current_node_idx = 0 while True: frame, cmd = camera.get_frame() if mode == 'auto' and card_detected: print('card detected') if current_node_idx == len(direc_rec): print('complete') return motor.stop() time.sleep(0.1) update_heading() goal_ang = heading + direc_rec[current_node_idx] if goal_ang > 360: goal_ang -= 360 elif goal_ang < 0: goal_ang += 360 print('goal ang:', goal_ang) heading = goal_ang card_detected = False current_node_idx += 1 cmd = 'l' last_card_read = datetime.now() elif mode == 'auto' or mode == 'debug': cmd = convert_cmd(cmd) if mode == 'auto': cmd_to_motor(cmd)
from flask import Flask, render_template, Response from labthings.server.quick import create_app from labthings.server.find import find_component from labthings.server.view import PropertyView # import camera driver from camera_pi import Camera from views import MjpegStream, SnapshotStream # Set root logger level logger = logging.getLogger() logger.setLevel(logging.DEBUG) print("Creating Lab Thing...") app, labthing = create_app(__name__, title="Pi Camera", description="Thing for Pi Camers", types=["org.raspberrypi.camera"], version="0.1.0") labthing.add_component(Camera(), "org.raspberrypi.camera") labthing.add_view(MjpegStream, "/mjpeg") labthing.add_view(SnapshotStream, "/still") if __name__ == "__main__": print("Starting server...") from labthings.server.wsgi import Server Server(app).run(host="::", port=5000, debug=False, zeroconf=True)
def snapshot(): return Response(snap(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') "takeSnapshot()" "return send_file('snapshots/snapshot.jpg', mimetype='image/jpg')"
def video_feed(): """This function streams the images on the frame, with the next one overlapping and replacing the former. This is achieved by setting mimetype to 'multipart/x-mixed-replace'. The idea is that by replacing the image with another so quickly, it'd look like a video.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def index(cmd=None): """Video streaming home page.""" result = '' camera = Camera() if cmd == 'image': frame = camera.get_frame() conn = http.client.HTTPSConnection( 'eastasia.api.cognitive.microsoft.com') conn.request('POST', "/vision/v1.0/analyze?%s" % analyze_params, frame, cv_headers) response = conn.getresponse() data = response.read() conn.close() dec_data = json.loads(data.decode('utf-8')) result_list = [] caption = dec_data['description']['captions'][0]['text'] result_list.append(caption) categories = dec_data['categories'] if 'categories' in dec_data else [] c_detail = {} l_detail = {} for cat in categories: if cat['name'] == 'people_': c_detail = cat['detail'] if 'detail' in cat else {} elif cat['name'] == 'outdoor_' or cat['name'] == 'building_': l_detail = cat['detail'] if 'detail' in cat else {} if c_detail: celebrities = [] for cel in c_detail['celebrities']: celebrities.append(cel['name']) if celebrities: result_list.append(' '.join(celebrities)) elif l_detail: landmarks = [] for lan in l_detail['landmarks']: landmarks.append(lan['name']) if landmarks: result_list.append(' '.join(landmarks)) # result = "{}".format(dec_data['description']['captions'][0]['text']) result = '\n'.join(result_list) elif cmd == 'word': frame = camera.get_frame() conn = http.client.HTTPSConnection( 'eastasia.api.cognitive.microsoft.com') conn.request('POST', "/vision/v1.0/ocr?%s" % ocr_params, frame, cv_headers) response = conn.getresponse() data = response.read() conn.close() dec_data = json.loads(data.decode('utf-8')) words_list = [] for big_box in dec_data['regions']: for small_box in big_box['lines']: tmp = [] for words in small_box['words']: tmp.append(words['text']) words_list.append(' '.join(tmp)) result = '\n'.join(words_list) if len( words_list) != 0 else 'There are no words in the image.' tl_params = urllib.parse.urlencode({ # Request parameters 'text': result, 'to': 'zh', }) conn = http.client.HTTPConnection('api.microsofttranslator.com') conn.request('GET', "/V2/Http.svc/Translate?%s" % tl_params, headers=tl_headers) response = conn.getresponse() tl_data = response.read() conn.close() tl_data = tl_data.replace( b'<string xmlns="http://schemas.microsoft.com/2003/10/Serialization/">', b'') tl_data = tl_data.replace(b'</string>', b'') dec_tl_data = tl_data.decode('utf-8') result = dec_tl_data return render_template('index.html', result=result)
if cam is None: cam = Camera(app.config) cam.start_preview_stream(app.config) cam.start_live_stream(app.config) return render_template("redirect.html", redirect_to=url_for("home")) @app.route('/stop/') def stop_streams(): """stop showing the camera streams""" global cam if cam is None: cam = Camera(app.config) cam.stop_preview_stream() cam.stop_live_stream() return render_template("redirect.html", redirect_to=url_for("home")) @app.route('/random/') def random_route(): template_data = { 'title': 'Random Number', 'content': '%f' % random.random() } return render_template("320simple.html", **template_data) if __name__ == '__main__': global cam with Camera(app.config) as cam: app.run(host='0.0.0.0', port=8080, debug=True, use_reloader=False)
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def photoFeed(): return Response(gen(Camera(), genType="photo"), mimetype='multipart/x-mixed-replace; boundary=frame')
def __init__(self): self.delay = 1 / 30 super(CountThread, self).__init__() self.cam = Camera()
def get_camera(): """Stream frames from the right camera to the client""" return Response(frame_generator(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
secs_rem = 2 tmr = None requestId = None apiEndpoint = None apiAccessToken = None image = None # import camera driver ##if os.environ.get('CAMERA'): # Camera = import_module('camera_' + os.environ['CAMERA']).Camera #else: # from camera import Camera from camera_pi import Camera camera = Camera() image_file = 'image_file.png' # this sound is not guaranteed to be available. replace sound. use ffmpeg to translate to Alexa requirements camera_sound_url = 'https://videotalker.blob.core.windows.net/videoblob/camera_sound_conv.mp3' # Raspberry Pi camera module (requires picamera package) # from camera_pi import Camera app = Flask(__name__) ask = Ask(app, "/") log = logging.getLogger() log.addHandler(logging.StreamHandler()) log.setLevel(logging.DEBUG)
def video_stream(): return Response(generate_frame(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def video_feed(): videoState = request.args.get('videoState') return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def get_camera_with_meta(): """Stream frames from the right camera to the client""" if auth.username() == ROBOT_USER: return Response(frame_generator_with_meta(Camera())) else: return make_response(jsonify({'error': 'Unauthorized access'}), 401)
def video(): random = request.args.get('random') """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def video_feed(): if couldStream: """Video streaming route.""" return Response(genStream(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def video_feed(): return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
def image(): """Returns a single current image for the webcam""" return Response(gen2(Camera()), mimetype='image/jpeg')