def run(self, app_config): self.led = machine.Pin(app_config['led'], machine.Pin.OUT) # Camera resilience - if we fail to init try to deinit and init again if app_config['camera'] == 'ESP32-CAM': camera.init(0, format=camera.JPEG) #ESP32-CAM elif app_config['camera'] == 'M5CAMERA': camera.init(0, d0=32, d1=35, d2=34, d3=5, d4=39, d5=18, d6=36, d7=19, href=26, vsync=25, reset=15, sioc=23, siod=22, xclk=27, pclk=21) #M5CAMERA camera.framesize(self.framesize) mws = MicroWebSrv(routeHandlers=self.routeHandlers, webPath="www/") mws.Start(threaded=True) gc.collect()
def index(req, resp): # parse query string req.parse_qs() flash = req.form.get('flash', 'false') if flash == 'true': led.on() # Camera resilience - if we fail to init try to deinit and init again if (not camera.init()): camera.deinit() await asyncio.sleep(1) # If we fail to init, return a 503 if (not camera.init()): yield from picoweb.start_response(resp, status=503) yield from resp.awrite('ERROR: Failed to initialise camera\r\n\r\n') return # wait for sensor to start and focus before capturing image await asyncio.sleep(2) buf = camera.capture() led.off() camera.deinit() if type(buf) is bytes and len(buf) > 0: #yield from picoweb.start_response(resp, "image/jpeg") yield from resp.awrite(buf) else: picoweb.http_error(resp, 503)
def __init__(self, processFunction = None, forceOpenCv = False, displaySurface=None, show=True, **argd): self.__dict__.update(**argd) super(VideoCapturePlayer, self).__init__(**argd) logging.debug("Initializing Video Capture Class") self.processFunction = processFunction self.processRuns = 0 self.show = show self.display = displaySurface if self.display is None: if show is True: # create a display surface. standard pygame stuff self.display = pygame.display.set_mode( self.size, 0 ) else: pygame.display.init() #pygame.display.set_mode((0,0),0) self.display = pygame.surface.Surface(self.size) if forceOpenCv: import camera else: import pygame.camera as camera camera.init() # gets a list of available cameras. self.clist = camera.list_cameras() if not self.clist: raise ValueError("Sorry, no cameras detected.") logging.info("Opening device %s, with video size (%s,%s)" % (self.clist[0],self.size[0],self.size[1])) # creates the camera of the specified size and in RGB colorspace if not forceOpenCv: try: self.camera = camera.Camera(self.clist[0], self.size, "RGB") # starts the camera self.camera.start() except: logging.debug("Ignoring that pygame camera failed - we will try opencv") forceOpenCv = True del camera import camera self.clist = camera.list_cameras() if forceOpenCv: logging.debug("Trying to open the OpenCV wrapped camera") self.camera = camera.Camera(self.clist[0], self.size, "RGB", imageType="pygame") self.camera.start() logging.info("Waiting for camera...") self.waitForCam() logging.info("Camera ready.") self.clock = pygame.time.Clock() self.processClock = pygame.time.Clock() # create a surface to capture to. for performance purposes, you want the # bit depth to be the same as that of the display surface. self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
def init(): particle.init() render.add_screen_sprites(cursor, cursor.hover_text) map.load_map("2") camera.init(map.width(), map.height())
def capture4(): os.chdir('webserver') camera.init(1) for i in range(100): f = open('frame'+str(i)+'.jpg', 'w') buf = camera.capture() f.write(buf) camera.deinit() f.close()
def envia_foto(Bot, mensaje=''): global camara #------------------ejemplo envio de foto------------------------------------------------------- # camera.init(0, format=camera.JPEG) try: camera.init(0, format=camera.JPEG) except OSError as exc: camera.deinit() Bot.send_message(Bot.canal, exc.args[0]) print(exc.args) return Led = Pin(4, Pin.OUT) # define flash Led.on() #enciende flash # girar vertical camera.flip(0) # giro horizontal camera.mirror(0) camera.framesize(camara.tamanyo_imagen) #'FRAME_240X240'/4, 'FRAME_96X96'/0, 'FRAME_CIF'/6, 'FRAME_FHD'/14, 'FRAME_HD'/11, 'FRAME_HQVGA'/3, 'FRAME_HVGA'/7, 'FRAME_P_3MP'/16, 'FRAME_P_FHD'/20, 'FRAME_P_HD'/15, 'FRAME_QCIF'/2, 'FRAME_QHD'/18, 'FRAME_QQVGA'/1, 'FRAME_QSXGA'/21, 'FRAME_QVGA'/5, 'FRAME_QXGA'/17, 'FRAME_SVGA'/9, 'FRAME_SXGA'/12, 'FRAME_UXGA'/13, 'FRAME_VGA'/8, 'FRAME_WQXGA'/19, 'FRAME_XGA'/10 # efectos especiales camera.speffect(camara.efecto_especial) # EFFECT_NONE (default) EFFECT_NEG EFFECT_BW EFFECT_RED EFFECT_GREEN EFFECT_BLUE EFFECT_RETRO # white balance camera.whitebalance(camara.balance_blancos) # The options are the following: # WB_NONE (default) WB_SUNNY WB_CLOUDY WB_OFFICE WB_HOME # saturation camera.saturation(camara.saturacion) # -2,2 (default 0). -2 grayscale # brightness camera.brightness(camara.brillo) # -2,2 (default 0). 2 brightness # contrast camera.contrast(camara.contraste) #-2,2 (default 0). 2 highcontrast # quality camera.quality(camara.calidad) # 10-63 lower number means higher quality bufer = camera.capture() print('captura realizada') Led.off() #apaga flash with open('file.jpg', 'w') as k: k.write(bufer) bufer = None camera.deinit() Bot.envia_archivo_multipart(configuracion.Chat_Id, 'file.jpg', 'sendPhoto', 'photo', mensaje)
def run(tfile): camera.init() led.on() time.sleep(0.5) buf = camera.capture() led.off() camera.deinit() print(buf[0:8]) f = open(tfile, 'wb') f.write(buf) f.close()
def test(): begin = utime.ticks_ms() time = utime.ticks_ms() camera.init(1) i = 0 while (time - begin < 3000): buf = camera.capture() i = i + 1 time = utime.ticks_ms() camera.deinit() print('Took', i, ' pictures')
def __init__(self): print('Entering camera focus mode') #1) Init camera camera.init() camera.camera_handler.toggleShowOutput(True) #2) Init stepper stepper.init() stepper.setDirection(-1) #Counter clockwise #3) Init sensor photo_resistor.init()
def capture3(): os.chdir('webserver') f = open('data.jpg', 'w') camera.init(1) buf = camera.capture() f.write(buf) f = open('data'+str(1)'+.jpg', 'w') buf = camera.capture() f.write(buf) camera.deinit() f.close()
def __init__(self): print('Entering calibration mode') self.state = STATE_CALIBRATE_SENSOR_ON self.angles = [] #1) Init camera camera.init() camera.camera_handler.toggleShowOutput(True) #2) Init stepper stepper.init() stepper.setDirection(-1) #Counter clockwise #3) Init sensor photo_resistor.init()
def init(): glEnable(GL_COLOR_MATERIAL) glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) glEnable(GL_NORMALIZE) lighting.init() hud.init() camera.init() global playing, alpha playing = True alpha = 0 global lines lines = list(info.dialogue[settings.level]) + [info.missionhelp[settings.level]] hud.dialoguebox.settext(lines.pop(0))
def init(): glEnable(GL_COLOR_MATERIAL) glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) glEnable(GL_NORMALIZE) lighting.init() hud.init() camera.init() cursor.tobuild = None global playing, ending, alpha, lastsave playing = True ending = False alpha = 0 lastsave = 0 sound.playmusic(info.music[settings.level])
def testThread(): begin = utime.ticks_ms() time = utime.ticks_ms() camera.init(1) i = 0 while (time - begin < 3000): if (n < 5): _thread.start_new_thread(cap, ()) else: pass i = i + 1 time = utime.ticks_ms() machine.sleep(500) camera.deinit() print('Took', i, ' pictures')
def setup_cam(logger): QUALITY = 40 try: camera.init(0) except OSError as e: # TODO: there should be a safeguard here to prevent constant restart time.sleep(2) logger.error( "unable to initialize camera. restarting.\n err: {}".format(e)) machine.reset() else: logger.debug("Camera initialized") camera.quality(QUALITY) logger.debug("Camera quality set to {}".format(QUALITY)) return camera
def __init__(self): print('Scanner engaged') #1) Init camera camera.init() camera.camera_handler.toggleShowOutput(True) #2) Init stepper stepper.init() stepper.setDirection(-1) #Counter clockwise #3) Init sensor photo_resistor.init() #4) Init filters filters.init() #Init vars self.current_state = STATE_ROLL self.processed_count = 0 self.log = [] self.last_processed_data = []
def callbackSee(client, userdata, message): print "Topic=" + message.topic print "Message=" + message.payload cmd = message.payload if cmd == "see": polly, cam = camera.init() message = camera.see(polly, cam) distMsg = scan() message = message + distMsg PollyApi.speak(polly, message) elif cmd == "tweet": polly, cam = camera.init() message = camera.see(polly, cam) distMsg = scan() message = message + distMsg PollyApi.speak(polly, message) camera.tweet(message) else: print "Wrong Command, Please Enter Again"
def capture2s(): os.chdir('webserver') camera.init(1) begin = utime.ticks_ms() time = begin prev = time i=0 machine.sleep(500) #while(time - begin < 2000): #if(time - prev > -1): prev = time i=i+1 name = 'frame'+str(i)+'.jpg' print(name) f = open(name, 'w') buf = camera.capture() f.write(buf) f.close() #time = utime.ticks_ms() camera.deinit()
def start(): wc = 0 while True: cr = camera.init() print("Camera ready?: ", cr) if cr: break time.sleep(2) wc += 1 if wc >= 5: break return cr
def main(): cam.init() #dis.init(1920, 1080) while(True): # capture frame-by-frame img = cam.read() # our operations on the frame come here gray = vis.gray(img) # display the resulting frame dis.show(gray) # handle key input if cv2.waitKey(1) & 0xFF == ord('q'): # quit break if cv2.waitKey(1) & 0xFF == ord('c'): # calibrate calibrate() # clean up camera cam.destroy()
def index(req, resp): # parse query string req.parse_qs() flash = req.form.get('flash', 'false') if flash == 'true': led.on() camera.init() # wait for sensor to start and focus before capturing image await asyncio.sleep(2) buf = camera.capture() led.off() camera.deinit() if len(buf) > 0: yield from picoweb.start_response(resp, "image/jpeg") yield from resp.awrite(buf) else: picoweb.http_error(resp, 503)
def start(): global cr wc = 0 while True: cr = camera.init(0, format=camera.JPEG) print("Camera ready?: ", cr) if cr: camera.framesize(7) break time.sleep(2) wc += 1 if wc >= 5: break return cr
def initCam(autoAdjust=True, retainCameraSettings=False, gain=-5, shutter=0.005): """ autoAdjust: If True, the function will attempt to automatically set the shutter and gain values by iteration. This will take a while retainCameraSettings: If True, no camera settings will be changed. The Gain and Shutter values will be left as they are, but this function checks if the camera is saturated. """ cam.printNumOfCam() cam.init() if autoAdjust: cam.autoAdjustShutter() elif retainCameraSettings == False: cam.setGain(gain) cam.setShutter(shutter) sat = cam.isSaturated() print("Camera saturation: ", sat) if sat: raise RuntimeWarning( "WARNING: Camera is saturated. Optimisation will not work properly." ) cam.capture()
def main(): # urlは環境に合わせて適宜変更 url = 'http://192.168.0.2:8080/save' errr_count = 0 camera.deinit() try: print('カメラ初期化中...') camera.init() print('カメラ初期化完了') utime.sleep(1) except: print('カメラの初期化に失敗しました。5秒後に再起動します。') utime.sleep(5) machine.reset() while True: try: print('撮影中...') buf = camera.capture() print('撮影完了...') print('画像変換中...') img_byte = base64.b64encode(buf) utime.sleep(2) img_json = ujson.dumps({"image": img_byte}) utime.sleep(2) print('サーバーへ送信中...') res = urequests.post(url, data=img_json) print(res.text) utime.sleep(2) except: errr_count += 1 print(errr_count) if errr_count == 10: # machine.reset() break
def open(): """ Open the camera """ if Camera.opened == False: for i in range(10): res = camera.init() if res == False: print("Camera not initialized") camera.deinit() time.sleep(0.5) else: break else: print("Failed to initialized camera reboot") machine.reset() # Photo on 800x600, motion detection / 8 (100x75), each square detection 8x8 (12.5 x 9.375) Camera.opened = True
def start(): global cr wc = 0 while True: try: cr = camera.init(0, format=camera.JPEG) except: print('error initiating camera') print("Camera ready?: ", cr) if cr: camera.framesize(camera.FRAME_VGA) camera.quality(10) break time.sleep(2) wc += 1 if wc >= 5: break return cr
@author: douglas """ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Mar 25 19:47:48 2021 @author: douglas """ from time import sleep import camera camera.init(0, format=camera.JPEG, framesize=camera.FRAME_SVGA) camera.quality(40) def server(): import socket import utime loop_t = 100 header= 'HTTP/1.0 200 OK\n'\ 'Content-type: application/octet-stream\n' \ 'Cache-Control: no-cache\n' \ 'Connection: close\n\n\n' \ bund1 = "--boundarydncross\n" \
credentials_state, cred_ssid, cred_psw = wifi.get_credentials() print(tr_swap) gc.collect() file = open('/structure/index.html', 'r') chtml = file.read() chtml = chtml.replace('$tr_swap', tr_swap).replace('$cred_ssid', cred_ssid).replace( '$cred_psw', cred_psw) file.close() return chtml # setup networking wc = 0 while True: cr = camera.init(0, format=camera.JPEG) print("Camera ready?: ", cr) if cr: break time.sleep(2) wc += 1 if wc >= 5: break if not cr: print("Camera not ready. Can't continue!") else: file = open('/structure/logo.png', 'r') logo_img = file.read() file.close()
print(rq[0], rq[1], ca) except: ok = False else: cs.send(b'%s' % hdr['favicon']) # handle favicon request early clean_up(cs) ok = False yield if ok: # No soc.recv timeout or favicon request await ports[p](cs, rq) wc = 0 while True: cr = camera.init() print("Camera ready?: ", cr) if cr: break time.sleep(2) wc += 1 if wc >= 5: break if not cr: print("Camera not ready. Can't continue!") else: # reconfigure camera # camera.speffect(2) # black and white # camera.quality(10) # increase quality from 12 (default) to 10 # setup networking
def __init__(self, processFunction=None, forceOpenCv=False, displaySurface=None, show=True, **argd): self.__dict__.update(**argd) super(VideoCapturePlayer, self).__init__(**argd) logging.debug("Initializing Video Capture Class") self.processFunction = processFunction self.processRuns = 0 self.show = show self.display = displaySurface if self.display is None: if show is True: # create a display surface. standard pygame stuff self.display = pygame.display.set_mode(self.size, 0) else: pygame.display.init() #pygame.display.set_mode((0,0),0) self.display = pygame.surface.Surface(self.size) if forceOpenCv: import camera else: import pygame.camera as camera camera.init() # gets a list of available cameras. self.clist = camera.list_cameras() if not self.clist: raise ValueError("Sorry, no cameras detected.") logging.info("Opening device %s, with video size (%s,%s)" % (self.clist[0], self.size[0], self.size[1])) # creates the camera of the specified size and in RGB colorspace if not forceOpenCv: try: self.camera = camera.Camera(self.clist[0], self.size, "RGB") # starts the camera self.camera.start() except: logging.debug( "Ignoring that pygame camera failed - we will try opencv") forceOpenCv = True del camera import camera self.clist = camera.list_cameras() if forceOpenCv: logging.debug("Trying to open the OpenCV wrapped camera") self.camera = camera.Camera(self.clist[0], self.size, "RGB", imageType="pygame") self.camera.start() logging.info("Waiting for camera...") self.waitForCam() logging.info("Camera ready.") self.clock = pygame.time.Clock() self.processClock = pygame.time.Clock() # create a surface to capture to. for performance purposes, you want the # bit depth to be the same as that of the display surface. self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
def main(): sound=True buzzdisp.init() buzzdisp.headline("Spider Buzz!") logo = buzzdisp.initLogo() players = [] wrong = [] host = 0 playerImages=[] scores=[] #initialize buzz controls devices=buzz.deviceInit() if sound: sons.init() sons.play("generique.mp3") buzz.lightRun(devices) buzz.lightRun(devices) buzzdisp.info("Maitre de jeu: Appuyez sur votre bouton rouge") while 1: click = buzz.getPress(devices) if click%5 == 0: ## buzz button was clicked host = click//5 buzz.light(devices,host,1) buzzdisp.info("Maitre de jeu: Appuyez sur votre bouton orange pour confirmer") if buzz.getPress(devices) == click+2: buzz.blink(devices,host) buzz.light(devices,host,1) break buzz.light(devices,host,0) buzzdisp.info("Maitre de jeu: Appuyez sur votre bouton rouge") mode=1 buzzdisp.headline("Inscriptions") webcam = camera.init() while len(players)<(len(devices)*4)-1: while 1: buzzdisp.info("Joueur " + str(len(players)+1) + ", mettez vous devant la camera et appuyez sur votre bouton rouge") buzzdisp.bigImage(logo) click = buzz.getPress(devices) if click == (host*5)+4: break if click % 5 == 0 and click != host*5: pNum = click // 5 if not pNum in players: if sound: sons.play("camera.aiff") img = buzzdisp.camImage(webcam) buzzdisp.info("Appuyez sur le bouton orange pour accepter ou le bouton vert pour continuer") while 1: confclick = buzz.getPress(devices) if confclick == pNum*5+2: playerImages.append(img) players.append(pNum) scores.append(0) buzz.light(devices,pNum,1) buzzdisp.addPlayer(img,len(players)) break if confclick == pNum*5+3: break if confclick == pNum*5+2: break if click == (host*5)+4: break buzz.lightRun(devices) buzzdisp.updateScores(playerImages, scores) buzzdisp.headline("Les inscriptions sont closes") buzzdisp.info("Maitre: Appuyez sur le bouton bleu pour demarrer le jeu") while 1: while buzz.getPress(devices) != host*5+1: test=1 if sound: sons.play("zero.mp3") buzzdisp.headline("Nouvelle question....") buzzdisp.info("") buzzdisp.bigImage(logo) buzz.lightRun(devices) clicker = 0 wrong = [] while 1: click = buzz.getPress(devices) if click%5 == 0 and click != host*5: #Player buzzed in clicker = click/5 #If Player has already answered wrong their Buzz is not accepted if not clicker in wrong and clicker in players: pIndex = players.index(clicker) if sound: sons.play("ringin.mp3") buzz.light(devices,clicker,1) buzzdisp.bigImage(playerImages[pIndex]) buzzdisp.headline("Joueur " + str(pIndex+1)) #Wait for right or wrong decision while 1: click = buzz.getPress(devices) if click == host*5+3: if sound: sons.play("wrong.mp3") buzzdisp.headline("MAUVAISE REPONSE") wrong.append(clicker) buzz.light(devices,clicker,0) break if click == host*5+2: if sound: sons.play("right.mp3") buzzdisp.headline("BONNE REPONSE!") scores[pIndex] = scores[pIndex] + 100 buzzdisp.updateScores(playerImages, scores) buzz.light(devices,clicker,0) break if click == host*5+1: break if click == host*5+1 or click == host*5+2: #Next Question button break while 1: for event in pygame.event.get(): if event.type == QUIT: buzz.release(devices) return pass
# Run the person detection model # This version reads the images from the ov2640 camera on the esp32-cam board # with minor changes this also works for the m5 timer camera import sys import microlite import camera from machine import Pin, PWM # initialize the camera to read 96x96 pixel gray scale images try: # uncomment for esp32-cam-mb with ov2640 sensor camera.init(0, format=camera.GRAYSCALE, framesize=camera.FRAME_96X96) # uncomment for the m5 timer camera with ov3660 sensor # camera.init(0,format=camera.GRAYSCALE,framesize=camera.FRAME_96X96, # sioc=23,siod=25,xclk=27,vsync=22,href=26,pclk=21, # d0=32,d1=35,d2=34,d3=5,d4=39,d5=18,d6=36,d7=19, # reset=15) except: print("Error when initializing the camera") sys.exit() # initialize the flash-light LED, it is connected to GPIO 4 flash_light = PWM(Pin(4)) # switch it off flash_light.duty(0) # change for m5 timer camera
def index(req, resp): # parse query string req.parse_qs() flash = req.form.get('flash', 'false') if flash == 'true': led.on() stream = req.form.get('stream', 'false') stream = True if stream == 'true' else False # Camera resilience - if we fail to init try to deinit and init again if app_config['camera'] == 'ESP32-CAM': if (not camera.init(0, format=camera.JPEG)): camera.deinit() await asyncio.sleep(1) # If we fail to init, return a 503 if (not camera.init(0, format=camera.JPEG)): yield from picoweb.start_response(resp, status=503) yield from resp.awrite('ERROR: Failed to initialise camera\r\n\r\n') return elif app_config['camera'] == 'M5CAMERA': if (not camera.init(0, d0=32, d1=35, d2=34, d3=5, d4=39, d5=18, d6=36, d7=19, href=26, vsync=25, reset=15, sioc=23, siod=22, xclk=27, pclk=21)): camera.deinit() await asyncio.sleep(1) # If we fail to init, return a 503 if (not camera.init(0, d0=32, d1=35, d2=34, d3=5, d4=39, d5=18, d6=36, d7=19, href=26, vsync=25, reset=15, sioc=23, siod=22, xclk=27, pclk=21)): #M5CAMERA yield from picoweb.start_response(resp, status=503) yield from resp.awrite('ERROR: Failed to initialise camera\r\n\r\n') return # wait for sensor to start and focus before capturing image await asyncio.sleep(2) n_frame = 0 while True: n_try = 0 buf = False while (n_try < 10 and buf == False): #{ # wait for sensor to start and focus before capturing image buf = camera.capture() if (buf == False): await asyncio.sleep(2) n_try = n_try + 1 if (not stream): led.off() camera.deinit() if (type(buf) is bytes and len(buf) > 0): try: if (not stream): yield from picoweb.start_response(resp, "image/jpeg") yield from resp.awrite(buf) print('JPEG: Output frame') break if (n_frame == 0): yield from picoweb.start_response(resp, "multipart/x-mixed-replace; boundary=myboundary") yield from resp.awrite('--myboundary\r\n') yield from resp.awrite('Content-Type: image/jpeg\r\n') yield from resp.awrite('Content-length: ' + str(len(buf)) + '\r\n\r\n') yield from resp.awrite(buf) except: # Connection gone? print('Connection closed by client') led.off() camera.deinit() return else: if (stream): led.off() camera.deinit() #picoweb.http_error(resp, 503) yield from picoweb.start_response(resp, status=503) if (stream and n_frame > 0): yield from resp.awrite('Content-Type: text/html; charset=utf-8\r\n\r\n') yield from resp.awrite('Issues:\r\n\r\n' + str(buf)) return print('MJPEG: Output frame ' + str(n_frame)) n_frame = n_frame + 1
import pygame, datetime, math from pygame.locals import * math.tau = 2 * math.pi import camera, settings, state, thing, grid, control pygame.init() camera.init() thing.init() pygame.display.set_caption("Prototype: pawn") clock = pygame.time.Clock() playing = True font = pygame.font.Font(None, 24) while playing: dt = 0.001 * clock.tick() mstate = { "pos": pygame.mouse.get_pos(), "ldown": False, "lup": False, "mdown": False, "mup": False, "rdown": False, "rup": False, "wheel": 0, } kstate = { "pressed": pygame.key.get_pressed(), "down": set(), } for event in pygame.event.get(): if event.type == QUIT:
import camera import movement import os import pygame os.environ['SDL_VIDEODRIVER'] = 'dummy' pygame.init() pygame.display.set_mode((1, 1)) condition = True st = 0.5 camera.init() #movement.car.init() print('HAJIMAE') while condition: for event in pygame.event.get(): #print(event) if event.type == pygame.KEYDOWN: print(event.key) if event.key == pygame.K_UP: print('UP') movement.forward(st) os.chdir('/home/pi/Desktop/New_beginning/w') camera.take_picture() elif event.key == pygame.K_DOWN: movement.reverse(st) print('DOWN') elif event.key == pygame.K_LEFT: os.chdir('/home/pi/Desktop/New_beginning/a') camera.take_picture() movement.left(st) print('LEFT')
def init(): camera.init()
def init(): global marker_array_pub, marker_pub, tf_broadcaster, tf_listener global move_group, turntable global camera_mesh, camera_pos, camera_size, min_distance, max_distance, num_positions, num_spins, object_size, photobox_pos, photobox_size, reach, simulation, test, turntable_pos, working_dir rospy.init_node('acquisition') camera_mesh = rospy.get_param('~camera_mesh', None) camera_orientation = rospy.get_param('~camera_orientation', None) camera_pos = rospy.get_param('~camera_pos', [0.0, 0.0, 0.0]) camera_size = rospy.get_param('~camera_size', [0.1, 0.1, 0.1]) min_distance = rospy.get_param('~min_distance', 0.2) max_distance = rospy.get_param('~max_distance', min_distance) max_velocity = rospy.get_param('~max_velocity', 0.1) num_positions = rospy.get_param('~num_positions', 15) num_spins = rospy.get_param('~num_spins', 8) object_size = numpy.array(rospy.get_param('~object_size', [0.2, 0.2, 0.2])) photobox_pos = rospy.get_param('~photobox_pos', [0.0, -0.6, 0.0]) photobox_size = rospy.get_param('~photobox_size', [0.7, 0.7, 1.0]) ptpip = rospy.get_param('~ptpip', None) reach = rospy.get_param('~reach', 0.85) simulation = '/gazebo' in get_node_names() test = rospy.get_param('~test', True) turntable_pos = rospy.get_param( '~turntable_pos', photobox_pos[:2] + [photobox_pos[2] + 0.05]) turntable_radius = rospy.get_param('~turntable_radius', 0.2) wall_thickness = rospy.get_param('~wall_thickness', 0.04) marker_array_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=1, latch=True) marker_pub = rospy.Publisher('visualization_marker', Marker, queue_size=1, latch=True) tf_broadcaster = tf.TransformBroadcaster() tf_listener = tf.TransformListener() move_group = MoveGroupCommander('manipulator') move_group.set_max_acceleration_scaling_factor( 1.0 if simulation else max_velocity) move_group.set_max_velocity_scaling_factor( 1.0 if simulation else max_velocity) robot = RobotCommander() scene = PlanningSceneInterface(synchronous=True) try: st_control = load_source( 'st_control', os.path.join(RosPack().get_path('iai_scanning_table'), 'scripts', 'iai_scanning_table', 'st_control.py')) turntable = st_control.ElmoUdp() if turntable.check_device(): turntable.configure() turntable.reset_encoder() turntable.start_controller() turntable.set_speed_deg(30) except Exception as e: print(e) sys.exit('Could not connect to turntable.') if simulation: move_home() elif not test: working_dir = rospy.get_param('~working_dir', None) if working_dir is None: sys.exit('Working directory not specified.') elif not camera.init( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'out', working_dir, 'images'), ptpip): sys.exit('Could not initialize camera.') # add ground plane ps = PoseStamped() ps.header.frame_id = robot.get_planning_frame() scene.add_plane('ground', ps) # add photobox ps.pose.position.x = photobox_pos[ 0] + photobox_size[0] / 2 + wall_thickness / 2 ps.pose.position.y = photobox_pos[1] ps.pose.position.z = photobox_pos[2] + photobox_size[2] / 2 scene.add_box('box_wall_left', ps, (wall_thickness, photobox_size[1], photobox_size[2])) ps.pose.position.x = photobox_pos[ 0] - photobox_size[0] / 2 - wall_thickness / 2 ps.pose.position.y = photobox_pos[1] ps.pose.position.z = photobox_pos[2] + photobox_size[2] / 2 scene.add_box('box_wall_right', ps, (wall_thickness, photobox_size[1], photobox_size[2])) ps.pose.position.x = photobox_pos[0] ps.pose.position.y = photobox_pos[ 1] - photobox_size[1] / 2 - wall_thickness / 2 ps.pose.position.z = photobox_pos[2] + photobox_size[2] / 2 scene.add_box('box_wall_back', ps, (photobox_size[0], wall_thickness, photobox_size[2])) ps.pose.position.x = photobox_pos[0] ps.pose.position.y = photobox_pos[1] ps.pose.position.z = photobox_pos[2] - wall_thickness / 2 scene.add_box('box_ground', ps, (photobox_size[0], photobox_size[1], wall_thickness)) # add turntable turntable_height = turntable_pos[2] - photobox_pos[2] ps.pose.position.x = turntable_pos[0] ps.pose.position.y = turntable_pos[1] ps.pose.position.z = photobox_pos[2] + turntable_height / 2 scene.add_cylinder('turntable', ps, turntable_height, turntable_radius) # add object on turntable ps.pose.position.x = turntable_pos[0] ps.pose.position.y = turntable_pos[1] ps.pose.position.z = turntable_pos[2] + object_size[2] / 2 scene.add_cylinder('object', ps, object_size[2], max(object_size[:2]) / 2) # add cable mounts scene.remove_attached_object('upper_arm_link', 'upper_arm_cable_mount') scene.remove_attached_object('forearm_link', 'forearm_cable_mount') scene.remove_world_object('upper_arm_cable_mount') scene.remove_world_object('forearm_cable_mount') if ptpip is None and not simulation: size = [0.08, 0.08, 0.08] ps.header.frame_id = 'upper_arm_link' ps.pose.position.x = -0.13 ps.pose.position.y = -0.095 ps.pose.position.z = 0.135 scene.attach_box(ps.header.frame_id, 'upper_arm_cable_mount', ps, size) ps.header.frame_id = 'forearm_link' ps.pose.position.x = -0.275 ps.pose.position.y = -0.08 ps.pose.position.z = 0.02 scene.attach_box(ps.header.frame_id, 'forearm_cable_mount', ps, size) # add camera eef_link = move_group.get_end_effector_link() ps = PoseStamped() ps.header.frame_id = eef_link scene.remove_attached_object(eef_link, 'camera_0') scene.remove_attached_object(eef_link, 'camera_1') scene.remove_world_object('camera_0') scene.remove_world_object('camera_1') ps.pose.position.y = camera_pos[1] ps.pose.position.z = camera_pos[2] if camera_mesh: ps.pose.position.x = camera_pos[0] quaternion = tf.transformations.quaternion_from_euler( math.radians(camera_orientation[0]), math.radians(camera_orientation[1]), math.radians(camera_orientation[2])) ps.pose.orientation.x = quaternion[0] ps.pose.orientation.y = quaternion[1] ps.pose.orientation.z = quaternion[2] ps.pose.orientation.w = quaternion[3] scene.attach_mesh(eef_link, 'camera_0', ps, os.path.expanduser(camera_mesh), camera_size) if not simulation: scene.attach_mesh(eef_link, 'camera_1', ps, os.path.expanduser(camera_mesh), numpy.array(camera_size) * 1.5) vertices = scene.get_attached_objects( ['camera_0'])['camera_0'].object.meshes[0].vertices camera_size[0] = max(vertice.x for vertice in vertices) - min( vertice.x for vertice in vertices) camera_size[1] = max(vertice.y for vertice in vertices) - min( vertice.y for vertice in vertices) camera_size[2] = max(vertice.z for vertice in vertices) - min( vertice.z for vertice in vertices) else: ps.pose.position.x = camera_pos[0] + camera_size[0] / 2 scene.attach_box(eef_link, 'camera_0', ps, camera_size)
joueur.init() #On initialise le joueur... monstres.init()#...les monstres... boulesdefeu.init()#... et les boules de feu. bonus.init() niveaux = ["tutoriel", "niveau1", "niveau2", "niveau3", "fin du jeu"]#Liste des niveaux niveauActuel = 0 #Niveau dans lequel le joueur se trouve actuellement interface.init() #Initialisation de l'interface (compteurs de bonus) carte.init(niveaux[niveauActuel],True) #On charge le terrain du jeu depuis un fichier cache.init(largeur,hauteur,niveaux[niveauActuel]) #On initialise la camera, la centrant sur le joueur et lui mettant les dimensions de la fenêtre: camera.init(joueur.x,joueur.y,largeur,hauteur,carte.w*32,carte.h*32) #On initialise l' "afficheur", qui permettra le rendu des textures à l'écran afficheur.init(0,0) afficheur.mettreWindow(window) afficheur.update(camera.x, camera.y) musiquelancee = False end = False def reinitialiserNiveau():#Permet de réinitialiser tout sauf l'état de la carte joueur.init() monstres.init() boulesdefeu.init() bonus.init() interface.init()