def launcher(motionProxy, postureProxy, markProxy): init_robot.stiffness_on(motionProxy) init_robot.init_pos(postureProxy) videoDeviceProxy = camera.init_camera() camera.up_camera(videoDeviceProxy) period = 500 # ms markProxy.subscribe('goal_pos', period, 0.0)
def camp(): # send 3d set to compute - without display #num = request.args.get('number') send_start() camera = init_camera() camera.resolution =(160,160) #camera.framerate_range =(10,10) cam_settings = CameraSettings(camera) init_3d_camera(cam_settings) size = request.args.get('size', None) if size: camera.resolution =(int(size),int(size)) set_dias(False) set_flash(FLASH_LEVEL) warm_up() fd1 = BytesIO() camera.capture(fd1, format='jpeg', use_video_port=True, quality=JPEG_QUALITY) fd1.truncate() fd1.seek(0) (fd2, fd3) = get_picture_set(camera) send_picture([fd1,fd2,fd3], 1) camera.close() led_off() send_stop() res = { "device": DEVICEID } return res
def p_picture(): arg = "?" + get_set_led() camera = init_camera() warm_up() exposure = get_exposure_info(camera) camera.close() return render_template('picture.html', name="", exposure=exposure, arg=arg)
def u_picture(): #?quality=85&dias=0&type=jpeg/png if False: camera = init_camera() camera.resolution = (2592, 1944) pic_format = 'jpeg' pic_mime = 'image/jpeg' img_type = request.args.get('type', None) if img_type == 'png': pic_format = 'png' pic_mime = 'image/png' pic_quality = 85 quality = request.args.get('quality', None) if quality: pic_quality = int(quality) size = request.args.get('size', None) if size: camera.resolution = (int(size), int(size)) compensation = request.args.get('compensation', None) if compensation: camera.exposure_compensation = int(compensation) get_set_led() #warm_up() # if _DEBUG: # print(get_exposure_info(camera)) return send_file(get_picture(camera, format=pic_format, quality=pic_quality), mimetype=pic_mime)
def info(): camera = init_camera() get_set_led() warm_up() camera_info = get_picture_info(camera) #pprint.pprint(camera_info) camera.close() led_off() return Response( pprint.pformat(camera_info).replace('\n', '<br />') + '<br><a href="/">Back</a>')
def cam(): set_flash(FLASH_LEVEL) send_api_request("start2d", url=COMPUTE_SERVER) camera = init_camera() camera.resolution = (640, 480) #camera.framerate_range =(10,25) size = request.args.get('size', None) if size: camera.resolution = (int(size), int(size)) warm_up() return Response(get_pictures(camera), mimetype='multipart/x-mixed-replace; boundary=frame')
def test(): mycamera = init_camera() mycamera.resolution = (640, 480) warm_up() settings = CameraSettings(mycamera) dias = None flash = None if request.method == 'POST': print(request.form) settings.contrast = int(request.form['contrast']) settings.brightness = int(request.form['brightness']) settings.saturation = int(request.form['saturation']) settings.iso = int(request.form['iso']) settings.exposure_compensation = int( request.form['exposure_compensation']) settings.exposure_mode = request.form['exposure_mode'] settings.awb_mode = request.form['awb_mode'] settings.sharpness = int(request.form['sharpness']) settings.meter_mode = request.form['meter_mode'] settings.drc_strength = request.form['drc_strength'] settings.resolution = request.form['resolution'] settings.shutter_speed = int(request.form['shutter_speed']) * 1000 dias = request.form.get('dias') flash = request.form.get('flash') #print ("Flash", flash) #print ("Dias", dias) fd = capture_picture(mycamera) exposure1 = Markup( get_exposure_info(mycamera) + "<br>" + get_white_balance(mycamera)) img1 = base64.b64encode(fd.getvalue()).decode() settings.set() mysettings = "Camera: " + settings.str() if flash: set_flash(True) if dias: set_dias(True) warm_up() fd2 = capture_picture(mycamera) exposure2 = Markup( get_exposure_info(mycamera) + "<br>" + get_white_balance(mycamera) + " " + mysettings) img2 = base64.b64encode(fd2.getvalue()).decode() mycamera.close() set_flash(False) set_dias(False) return render_template('calibrate.html', header="Calibrate", img1=img1, img2=img2, exposure1=exposure1, exposure2=exposure2)
def main(): motionProxy = init_robot.start_motion() rospy.init_node('set_position', anonymous=True) while not rospy.get_param('lower'): time.sleep(0.5) # Cuando otro módulo hace lower True... videoDeviceProxy = camera.init_camera() camera.low_camera(videoDeviceProxy) ball = rospy.wait_for_message('pos', Point) while not inrange(ball.x, TARGET_X-ERROR_X, TARGET_X+ERROR_X) or \ not inrange(ball.y, TARGET_Y-ERROR_Y, TARGET_Y+ERROR_Y): set_position(motionProxy, ball) ball = rospy.wait_for_message('pos', Point) rospy.set_param('kick_now', True)
def test(): server_up = send_start() if not server_up: return '{"result": 0, "reason": "no connection to compute server"}' camera = init_camera() camera.resolution =(160,160) camera.framerate_range =(10,10) cam_settings = CameraSettings(camera) mode = '' mode = request.args.get('mode', None) init_3d_camera(cam_settings) # start set_dias(False) set_flash(FLASH_LEVEL) warm_up() return Response(get_test_pictures(camera, mode=mode),mimetype='multipart/x-mixed-replace; boundary=frame')
def cam(): get_set_led() camera = init_camera() warm_up() if _DEBUG: print(get_exposure_info(camera)) size = request.args.get('size', None) if size: camera.resolution = (int(size), int(size)) max_frame = request.args.get('maxframerate', None) if max_frame: myrange = camera.framerate_range camera.framerate_range = (myrange.low, int(max_frame)) quality = request.args.get('quality', None) if quality: quality = int(quality) return Response(scan_cont_pictures(camera, quality=quality), mimetype='multipart/x-mixed-replace; boundary=frame')
def picture(): # send one picture to API server camera = init_camera() camera.resolution = (640, 480) #camera.framerate_range =(10,25) size = request.args.get('size', None) if size: camera.resolution = (int(size), int(size)) warm_up() fd = capture_picture(camera) expinfo = get_exposure_info(camera) camera.close() res = send_file_object(fd, "2d.jpg", data={ 'folder': "testfolder", 'exposure': expinfo }) print(res) return Response(res)
def cam(): # send 3d set to compute no_pictures = request.args.get('no_pictures', NUMBER_PICTURES, type=int) picture_interval = request.args.get('picture_interval', PICTURE_INTERVAL, type=float) server_up = send_start() if not server_up: return '{"result": 0, "reason": "no connection to compute server"}' camera = init_camera() camera.resolution =(160,160) camera.framerate_range =(10,10) cam_settings = CameraSettings(camera) init_3d_camera(cam_settings) size = request.args.get('size', None) if size: camera.resolution =(int(size),int(size)) # start set_dias(False) set_flash(FLASH_LEVEL) warm_up() return Response(get_pictures(camera, no_pictures=no_pictures, picture_interval=picture_interval),mimetype='multipart/x-mixed-replace; boundary=frame')
def cam3dias(): # send a serie of pictures send_start() camera = init_camera() camera.resolution =(160,160) camera.framerate_range =(10,10) cam_settings = CameraSettings(camera) init_3d_camera(cam_settings) size = request.args.get('size', None) if size: camera.resolution =(int(size),int(size)) print (get_exposure_info(camera)) set_dias(False) set_flash(True) print (get_exposure_info(camera)) warm_up() print (get_exposure_info(camera)) warm_up() print (get_exposure_info(camera)) warm_up() print (get_exposure_info(camera)) return Response(get_dias(camera, 10),mimetype='multipart/x-mixed-replace; boundary=frame')
#stream.truncate() #stream.seek(0) return stream def print_settings(camera): strg = "ExposureSpeed: {:5.3f} sec(max {:5.1f} pic/sec)<br>".format( camera.exposure_speed / 1000000, 1000000 / camera.exposure_speed) strg += "FrameRate: " + str(camera.framerate) + "<br>" strg += "FrameRateRange: " + str(camera.framerate_range) + "<br>" strg += "PictureSize: " + str(camera.resolution) + "<br>" return strg #camera = PiCamera() camera = init_camera() camera.iso = 800 camera.framerate_range = (5, 50) camera.framerate = 80 settings = CameraSettings(camera) print(get_camera_settings(camera)) sleep(5) antal = 200 start = datetime.now() scan_cont_mem_set(camera, antal=antal) s**t = datetime.now() print("Billeder pr sek", antal / (s**t - start).total_seconds()) strg = "ExposureSpeed: {:5.3f} sec(max {:5.1f} pic/sec)\r\n".format(
import camera import sys import os import time PREPATH = 'dataset/' path = sys.argv[1] now = str(int(time.time())) try: os.makedirs(PREPATH + str(path)) except FileExistsError: print('appending to existing folder!') webcam = camera.init_camera() i = 0 while True: camera.save_ss(webcam, PREPATH + path + '/' + now + '_' + str(i) + '.jpg') i += 1
def dummyServerWithCamera(): import camera camera_pipeline = camera.init_camera() remote_control(None, None, camera_pipeline=camera_pipeline, dummy=True)
left = int(left) if left >= -100 and left <= 100: cfg.left_motor = left hw.motor_two_speed(cfg.left_motor) if right: right = int(right) if right >= -100 and right <= 100: cfg.right_motor = right hw.motor_one_speed(cfg.right_motor) return 'record_ok' else: return 'no video' if __name__ == '__main__': cfg.camera_detected, cfg.camera_handle = cam.init_camera() #gen(cam.Camera()) #cfg.frame = gen(cam.Camera()) #cv2.imwrite(cfg.outputDir+cfg.currentDir+'/'+ 'test.jpg', cfg.frame) #cam.Camera() #print('camera started') #app.run(host='0.0.0.0', debug=False, threaded=True) http_server = WSGIServer(('', 5000), app) http_server.serve_forever() hw.motor_clean() cv2.destroyAllWindows()
key, val = ln.strip().split('=', 1) if val[0] == '\'' and val[-1] == '\'': val = val[1:-2] glparams[key.lower()] = val # define servr properties #host = '192.168.1.201' #host = '192.168.242.1' host = glparams['server'] port = int(glparams['port']) size = int(glparams['buf_size']) # configure server socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((host, port)) print "server started %s:%s" % (host, port) cm = camera.init_camera(glparams['tmp_image_file']) print "camera initialized" ser = initserial() # wait for connections # terminate with try: while True: print "recving..." data, address = sock.recvfrom(size) message = data + "\0" print "datagram from %s:\t%s" % (address, message) if message.startswith("sensor"): # print "sensor:",data reply = "ack:sensor data received"
def process_message(message, fromid, remotejids): global bot, glparams resp = "" talkonly = -1 if fromid == remotejids['master_id']: if message.find("hello") >= 0 or message.find("hi") >= 0: resp = u'marvel,欢迎您' talkonly = 1 elif message.find(u"温度") >= 0 or message.find( u"多热") >= 0 or message.find(u"多冷") >= 0: code, temp = sensor.read_temperature() if code < 0: resp = temp talkonly = 1 else: resp = u'marvel,您家里当前温度是%s摄氏度' % temp elif message.find(u"亮度") >= 0 or message.find(u"多亮") >= 0: code, brightness = sensor.read_brightness() if code < 0: resp = brightness talkonly = 1 else: desc = sensor.get_brightness_description(brightness) resp = u'marvel,您家里当前亮度是%s,%s' % (brightness, desc) elif message.find(u"图片") >= 0: cm = camera.init_camera(glparams['tmp_image_file']) if camera.capture_camera(bot.cm) > 0: resp = u'已经照完' else: resp = u'照相失败' camera.release_camera(bot.cm) talkonly = 1 else: talkonly = 1 resp = u'marvel,我看不懂您的话,blush' if message.find(u"谢谢") >= 0: resp = resp + u',不客气//%s' % message else: resp = resp + "//%s" % message if message.find("talkonly") >= 0: talkonly = 1 sys.stdout.write("I said:%s\n" % resp) if glparams['rep_gtalk'] == 'true': bot.send_master_message(resp) sys.stdout.write("gtalk message sent\n") if talkonly < 0 and (glparams['rep_twitter'] == 'true' or message.find(u'推一下') >= 0): send_twitter_message(resp) sys.stdout.write("twitter message sent\n") if talkonly < 0 and glparams['rep_weibo'] == 'true': bot.send_weibo_message(resp) sys.stdout.write("weibo message sent\n") #os.write(sys.stdin.fileno(), resp) elif fromid == remotejids['weibo_id']: if message.find(u'[评论]') >= 0: print "new comment received" elif message.find(u'[私信]') >= 0: print "new mail received" return
def manual_control_loop(self): # Create robot object. if self.config.norobot: robot_command_client = None robot_state_client = None else: sdk = bosdyn.client.create_standard_sdk('RobotCommandMaster') sdk.load_app_token(self.config.app_token) robot = sdk.create_robot(self.config.hostname) robot.authenticate(self.config.username, self.config.password) # Check that an estop is connected with the robot so that the robot commands can be executed. verify_estop(robot) # Create the lease client. lease_client = robot.ensure_client( LeaseClient.default_service_name) lease = lease_client.acquire() robot.time_sync.wait_for_sync() lk = bosdyn.client.lease.LeaseKeepAlive(lease_client) # Setup clients for the robot state and robot command services. robot_state_client = robot.ensure_client( RobotStateClient.default_service_name) robot_command_client = robot.ensure_client( RobotCommandClient.default_service_name) # Power on the robot and stand it up. if not self.config.nopower: robot.power_on() blocking_stand(robot_command_client) # Initialize camera camera_pipeline = camera.init_camera() # Print pose self.get_pose(robot_state_client) is_server_on = False # Control loop while True: cmd = input('Command (wasd/qe/c/r):') if cmd == 'c': break elif cmd == 'r': print("Remote control..") try: self.reset_last_frame() if is_server_on: asyncio.get_event_loop().run_forever() else: is_server_on = True self.remote_control_loop( robot_command_client, robot_state_client, camera_pipeline=camera_pipeline, dummy=(robot_command_client is None)) except KeyboardInterrupt: print("ending remote control") pass else: self.reset_last_frame() pos = self.execute_command(robot_command_client, robot_state_client, cmd, wait_to_stabilize=False) # move_relative(robot_command_client, robot_state_client, timeout=5.0) robot_cmd = RobotCommandBuilder.safe_power_off_command() robot_command_client.robot_command(lease=None, command=robot_cmd, end_time_secs=time.time() + 2.) time.sleep(2.) return True
def process_message(message, fromid, remotejids): global bot,glparams resp="" talkonly = -1 if fromid == remotejids['master_id']: if message.find("hello")>=0 or message.find("hi")>=0: resp = u'marvel,欢迎您' talkonly = 1 elif message.find(u"温度")>=0 or message.find(u"多热")>=0 or message.find(u"多冷")>=0 : code, temp = sensor.read_temperature() if code < 0: resp = temp talkonly = 1 else: resp = u'marvel,您家里当前温度是%s摄氏度'%temp elif message.find(u"亮度")>=0 or message.find(u"多亮")>=0: code, brightness = sensor.read_brightness() if code < 0: resp = brightness talkonly = 1 else: desc = sensor.get_brightness_description(brightness) resp = u'marvel,您家里当前亮度是%s,%s'%(brightness, desc) elif message.find(u"图片")>=0: cm = camera.init_camera(glparams['tmp_image_file']) if camera.capture_camera(bot.cm) > 0: resp = u'已经照完' else: resp = u'照相失败' camera.release_camera(bot.cm) talkonly = 1 else: talkonly = 1 resp = u'marvel,我看不懂您的话,blush' if message.find(u"谢谢")>=0: resp = resp+u',不客气//%s'%message else: resp = resp+"//%s"%message if message.find("talkonly")>=0: talkonly = 1 sys.stdout.write("I said:%s\n"%resp) if glparams['rep_gtalk']=='true': bot.send_master_message(resp) sys.stdout.write("gtalk message sent\n") if talkonly < 0 and ( glparams['rep_twitter']=='true' or message.find(u'推一下')>=0): send_twitter_message(resp) sys.stdout.write("twitter message sent\n") if talkonly < 0 and glparams['rep_weibo']=='true': bot.send_weibo_message(resp) sys.stdout.write("weibo message sent\n") #os.write(sys.stdin.fileno(), resp) elif fromid == remotejids['weibo_id']: if message.find(u'[评论]')>=0: print "new comment received" elif message.find(u'[私信]')>=0: print "new mail received" return
def camera(): sleeptime = 1.5 os.makedirs("/tmp/calib", mode=0o777, exist_ok=True) mycamera = init_camera() #mycamera.resolution =(2592,1944) set_dias(0) set_flash(0) warm_up() if _DEBUG: print(get_camera_settings(mycamera)) #normal mycamera.capture('/tmp/calib/color.png', use_video_port=False) if _DEBUG: print("Color", get_exposure_info(mycamera)) mycamera.capture('/tmp/calib/color.jpg', use_video_port=False) write_picture_info("/tmp/calib/color.json", get_picture_info_json(mycamera)) #dias set_dias(1) sleep(sleeptime) mycamera.capture('/tmp/calib/dias.png', use_video_port=False) if _DEBUG: print("Dias:", get_exposure_info(mycamera)) mycamera.capture('/tmp/calib/dias.jpg', use_video_port=False) write_picture_info("/tmp/calib/dias.json", get_picture_info_json(mycamera)) #full flash set_dias(0) set_flash(1) sleep(sleeptime) mycamera.capture('/tmp/calib/flash.png', use_video_port=False) if _DEBUG: print("Flash", get_exposure_info(mycamera)) mycamera.capture('/tmp/calib/flash.jpg', use_video_port=False) write_picture_info("/tmp/calib/flash.json", get_picture_info_json(mycamera)) fix_exposure(mycamera) #dark set_flash(0) sleep(sleeptime) mycamera.capture('/tmp/calib/nolight.png', use_video_port=False) if _DEBUG: print("NoLight", get_exposure_info(mycamera)) mycamera.capture('/tmp/calib/nolight.jpg', use_video_port=False) write_picture_info("/tmp/calib/nolight.json", get_picture_info_json(mycamera)) auto_exposure(mycamera) #low flash set_dias(0) set_flash(0.1) sleep(sleeptime) mycamera.capture('/tmp/calib/flash01.png', use_video_port=False) if _DEBUG: print("Flash01", get_exposure_info(mycamera)) mycamera.capture('/tmp/calib/flash01.jpg', use_video_port=False) write_picture_info("/tmp/calib/flash01.json", get_picture_info_json(mycamera)) fix_exposure(mycamera) set_flash(0) sleep(sleeptime) mycamera.capture('/tmp/calib/nolight01.png', use_video_port=False) mycamera.close() filelist = [ '/tmp/calib/color.png', '/tmp/calib/color.jpg', '/tmp/calib/color.json', '/tmp/calib/dias.png', '/tmp/calib/dias.jpg', '/tmp/calib/dias.json', '/tmp/calib/flash.png', '/tmp/calib/flash.jpg', '/tmp/calib/flash.json', '/tmp/calib/nolight.png', '/tmp/calib/nolight.jpg', '/tmp/calib/nolight.json', '/tmp/calib/flash01.png', '/tmp/calib/flash01.jpg', '/tmp/calib/flash01.json', #'/tmp/calib/nolight01.png', '/tmp/calib/nolight01.jpg', '/tmp/calib/nolight01.json' ] res = send_files(filelist, post_data={ "cmd": "calcamera", "sice": 160, "zoom": ZOOM }) if res: return res print("det gik skidt", res) return '{ "result": "false"}'
key,val=ln.strip().split('=',1) if val[0]=='\'' and val[-1]=='\'': val = val[1:-2] glparams[key.lower()]=val # define servr properties #host = '192.168.1.201' #host = '192.168.242.1' host = glparams['server'] port = int(glparams['port']) size = int(glparams['buf_size']) # configure server socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((host, port)) print "server started %s:%s"%(host, port) cm = camera.init_camera(glparams['tmp_image_file']) print "camera initialized" ser=initserial() # wait for connections # terminate with try: while True: print "recving..." data, address = sock.recvfrom(size) message = data + "\0" print "datagram from %s:\t%s"% (address, message) if message.startswith("sensor"): # print "sensor:",data reply = "ack:sensor data received"