def play(): if loop: while True: if self.gameIsRunning: s = SoundPlayer(soundpath, self) s.play() else: s = SoundPlayer(soundpath, self) s.play()
def setAudioData(self, data, skip=0.0): if self.audio is not None: self.audio.terminate() self.audio = None self.audio_data = data self.audio_skip = float(skip) self.audio = SoundPlayer(input_data=data, tmp_dir=jarvis.get_home(), start=False, loop_nb=0, frames_per_buffer=1024, blocking=False) if self.audio is not None: self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False)
def setAudioData(self, data, skip=0.0): if self.audio is not None: self.audio.terminate() self.audio = None self.audio_data = data self.audio_skip = float(skip) self.audio = SoundPlayer(input_file_name=data, tmp_dir=jarvis.get_home(), start=False, loop_nb=0, frames_per_buffer=1024, blocking=False) self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False)
def run(self): url = gral_url + "records" continue_reading = True # Create an object of the class MFRC522 MIFAREReader = MFRC522.MFRC522() # Welcome message # This loop keeps checking for chips. If one is near it will get the UID and authenticate while continue_reading: # Scan for cards (status, TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL) # If a card is found if status == MIFAREReader.MI_OK: print("Card detected") # Get the UID of the card (status, uid) = MIFAREReader.MFRC522_Anticoll() # If we have the UID, continue if status == MIFAREReader.MI_OK: rfid = ''.join([ str(hex(i))[2:] if i > 16 else '0' + str(hex(i))[2:] for i in uid ])[:-2] rfidreverse = self.rotate(rfid) rfid = rfid.upper() rfidreverse = rfidreverse.upper() print(rfid, rfidreverse) p = SoundPlayer("/home/pi/guiPythonLABFAB/sounds/BeepIn.mp3", 0) p.play(1) time.sleep(0.001) #try: req = requests.post( url, { 'rfid': rfid, 'lab_id': lab_id }, headers=credentials.totem_credential).json() if not req: req = requests.post( url, { 'rfid': rfidreverse, 'lab_id': lab_id }, headers=credentials.totem_credential).json() if not req: req = rfid self.sig2.emit(req) else: self.sig1.emit(req) else: print('resourse not found:', req) # except: # req = 'Not Internet Conection' # self.sig2.emit(req) time.sleep(5) GPIO.cleanup()
# Sound1c.py import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) #p = SoundPlayer("/home/pi/Music/jew1.mp3", 1) p = SoundPlayer("/home/pi/Music/jew1.wav", 1) print "play whole song" p.play(1, True) # non-blocking, volume = 1 print "done"
# Sound1a.py import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) p = SoundPlayer("/home/pi/sound/salza1.wav", 1) print "play for 10 s with volume 0.5" p.play(0.5) # non-blocking, volume = 0.5 time.sleep(10) print "pause for 5 s" p.pause() time.sleep(5) print "resume for 10 s" p.resume() time.sleep(10) print "stop" p.stop() print "done"
async def read_gamepad_inputs(): global head_light_flag print("Ready to drive!!") turn_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/turn-signal.mp3", card) horn_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/Horn.mp3", card) while not is_connected(): time.sleep(2) # Wait 2 seconds for controller to come up and try again while is_connected() and remote_control.button_b == False: #print(" trigger_right = ", round(remote_control.trigger_right,2),end="\r") x = round(remote_control.joystick_left_x, 2) y = round(remote_control.joystick_left_y, 2) angle = get_angle_from_coords(x, y) if angle > 180: angle = 360 - angle #print("x:", x, " y:", y, " angle: ",angle,end="\r") turn_head(angle) direction = get_motor_direction(x, y) y = adjust_speed(y, angle) #print("x:", x, " y:", y, " direction: ",direction,end="\r") drive_motor(direction, y) if round(remote_control.trigger_right, 2) > 0.0: horn_sound.play(1.0) led.blue() elif round(remote_control.trigger_left, 2) > 0.0: led.cyan() elif remote_control.bump_left: turn_sound.play(1.0) led.turn_left(5) elif remote_control.bump_right: turn_sound.play(1.0) led.turn_right(5) elif remote_control.dpad_up: remote_control.dpad_up = False elif remote_control.dpad_left: remote_control.dpad_left = False elif remote_control.dpad_right: remote_control.dpad_right = False elif remote_control.button_a: remote_control.button_a = False elif head_light_flag == False: led.both_off() led_strip.colorWipe(strip, Color(0, 0, 0)) if turn_sound.isPlaying(): turn_sound.stop() await asyncio.sleep(100e-3) #100ms return
GPIO.setup(P_TON1, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_TON2, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_TON3, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_TON4, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_TON5, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_TON6, GPIO.IN, GPIO.PUD_UP) DoSound.playTone(440, 0.3, dev) DoSound.playTone(550, 0.3, dev) DoSound.playTone(660, 0.3, dev) time.sleep(1) DoSound.playTone([440, 550, 660], 3, dev) setup() print "Bereit..." p = SoundPlayer("/home/pi/maja.mp3", 1) sound1 = SoundPlayer("/home/pi/maja.mp3", 1) sound2 = SoundPlayer("/home/pi/maja.mp3", 1) sound3 = SoundPlayer("/home/pi/maja.mp3", 1) sound4 = SoundPlayer("/home/pi/maja.mp3", 1) sound5 = SoundPlayer("/home/pi/maja.mp3", 1) sound6 = SoundPlayer("/home/pi/maja.mp3", 1) while True: if GPIO.input(P_TON1) == GPIO.LOW: if not button1_pressed: print "Ton1..." sound1.play() else: sound1.stop() button1_pressed = True
# Sound1a.py import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) #p = SoundPlayer("//home/pi/Desktop/Sonido/Beep1.mp3", 0) #p = SoundPlayer("//home/pi/Desktop/Sonido/Beep2.mp3", 0) #p = SoundPlayer("//home/pi/Desktop/Sonido/Alarm.mp3", 0) p = SoundPlayer("//home/pi/Desktop/Sonido/John Cena - My Time Is Now.mp3", 0) print("play for 10 s with volume 0.5") p.play(0.5) # non-blocking, volume = 0.5 (0 a 1) ##print ("isPlaying:", p.isPlaying()) time.sleep(1) ##print ("pause for 5 s") ##p.pause() ##print ("isPlaying:", p.isPlaying()) ##time.sleep(5) ##print ("resume for 10 s") ##p.resume() ##time.sleep(10) ##print ("stop") ##p.stop() ##print ("isPlaying:", p.isPlaying()) ##print ("done")
def setup(): GPIO.setmode(GPIO.BOARD) GPIO.setup(P_STOP, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_PAUSE, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_PLAY, GPIO.IN, GPIO.PUD_UP) GPIO.setup(P_SELECT, GPIO.IN, GPIO.PUD_UP) setup() nbSongs = 4 songID = 0 state = "STOPPED" print "ready->stopped" p = SoundPlayer("/home/pi/songs/song" + str(songID) + ".mp3", 1) while True: if GPIO.input(P_PAUSE) == GPIO.LOW and state == "PLAYING": state = "PAUSED" p.pause() print "playing->paused" elif GPIO.input(P_PLAY) == GPIO.LOW and state == "STOPPED": state = "PLAYING" p.play() print "stopped->playing, song ID", songID elif GPIO.input(P_PLAY) == GPIO.LOW and state == "PAUSED": state = "PLAYING" p.resume() print "paused->playing" elif GPIO.input(P_STOP) == GPIO.LOW and (state == "PAUSED"
] # enter file name name = raw_input('Enter name: ') gender = raw_input('Enter Gender: ') print(gender + ', name: ' + name) # run down while len(freqList) > 0: freqListLength = len(freqList) newTestFreq = random.randint(0, freqListLength - 1) testFreq = freqList[newTestFreq] freqList.pop(newTestFreq) #play Freq SoundPlayer.playTone(testFreq, 1, True, 0) #turn led on GPIO.output(27, GPIO.HIGH) while True: input_state1 = GPIO.input(23) if input_state1 == False: print('Yes ' + testFreq) x = 'yes' #time.sleep(0.4) break input_state2 = GPIO.input(17) if input_state2 == False: print('No ' + testFreq) x = 'no' #time.sleep(0.4) break
# Sound1a.py import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) p = SoundPlayer("/home/pi/Music/song0.mp3", 1) print "play for 10 s with volume 0.5" p.play(0.5) # non-blocking, volume = 0.5 print "isPlaying:", p.isPlaying() time.sleep(10) print "pause for 5 s" p.pause() print "isPlaying:", p.isPlaying() time.sleep(5) print "resume for 10 s" p.resume() time.sleep(10) print "stop" p.stop() print "isPlaying:", p.isPlaying() print "done"
import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) p = SoundPlayer("clappy.wav", 1) print("play for 10 s with volume 0.5") p.play(0.5) # non-blocking, volume = 0.5 print("isPlaying:", p.isPlaying()) time.sleep(10) print("pause for 5 s") p.pause() print("isPlaying:", p.isPlaying()) time.sleep(5) print("resume for 10 s") p.resume() time.sleep(10) print("stop") p.stop() print("isPlaying:", p.isPlaying()) print("done")
def playMusic(file): from soundplayer import SoundPlayer p = SoundPlayer(file, 1) print("playing" + file) p.play() # non-blocking, volume = 0.5 print("done")
GPIO.setup(P_POWSWITCH, GPIO.IN, GPIO.PUD_UP) # Einschalter abfragen Ein == Low GPIO.setwarnings(False) GPIO.setup(P_POWER, GPIO.OUT) GPIO.output(P_POWER, True) # Netzteil eingeschaltet lassen DoSound.playTone(440, 0.3, dev) DoSound.playTone(550, 0.3, dev) DoSound.playTone(660, 0.3, dev) time.sleep(1) DoSound.playTone([440, 550, 660], 3, dev) time.sleep(2) setup() print "Bereit..." p = SoundPlayer("/home/pi/mp3/Nanue.mp3", 1) p.play(1) while True: if GPIO.input(P_TON1) == GPIO.LOW: print "Ton1..." p.stop() DoSound.playTone(440, 0.3, dev) elif GPIO.input(P_TON2) == GPIO.LOW: print "Ton2..." p.stop() DoSound.playTone(550, 0.3, dev) p.play() elif GPIO.input(P_TON3) == GPIO.LOW: print "Ton3..." p.stop()
# Sound2a.py from soundplayer import SoundPlayer import time # Sine tone during 0.1 s, blocking, device 0 dev = 0 SoundPlayer.playTone(900, 0.1, True, dev) # 900 Hz SoundPlayer.playTone(800, 0.1, True, dev) # 600 Hz SoundPlayer.playTone(600, 0.1, True, dev) # 600 Hz time.sleep(1) SoundPlayer.playTone([900, 800, 600], 5, True, dev) # 3 tones together print "done"
# Sound2b.py from soundplayer import SoundPlayer import time # Sine of 1000 Hz during 5 s, non-blocking, device 1 SoundPlayer.playTone(1000, 5, False, 1) n = 0 while SoundPlayer.isPlaying(): print "playing #", n time.sleep(1) n += 1 print "done"
# Sound2b.py from soundplayer import SoundPlayer # Sine tone during 0.1 s, blocking, device 0 dev = 0 SoundPlayer.playTone(900, 0.1, True, dev) # 900 Hz SoundPlayer.playTone(800, 0.1, True, dev) # 600 Hz SoundPlayer.playTone(600, 0.1, True, dev) # 600 Hz print "done"
print("angle:", angle, " adjusted speed:", speed, "\r") return speed if __name__ == "__main__": loop = asyncio.get_event_loop() card = 1 #(default) strip = None signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: loop.add_signal_handler( s, lambda s=s: asyncio.create_task(shutdown_signal(s, loop))) try: card = get_usb_sound_card() reverse_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/CensorBeep.mp3", card) init_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/Bleep.mp3", card) disconnect_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/Disconnected.mp3", card) siren_sound = SoundPlayer( "/home/pi/xbox-raspberrypi-rover/soundfiles/siren.mp3", card) setup() waiting_for_connect = True while waiting_for_connect: remote_control = connect() if (remote_control != None): waiting_for_connect = False
class PyQtOSGWidget(QtOpenGL.QGLWidget): def __init__(self, parent = 0, name = '' ,flags = 0): """constructor """ QtOpenGL.QGLWidget.__init__(self, parent) self.parent = parent self.setFocusPolicy(QtCore.Qt.ClickFocus) self.timer = QtCore.QTimer() self.timer.setInterval(1000.0 / config.FPS_RENDERING) # in milliseconds self.camera = None self.startTime = 0.0 self.loopTime = 10.0 self.is_paused = False self.still_frame = True self.current_time = 0 self.audio = None self.viewer = None self.orginal_data = None self.aspect_ratio = None self.fps_calculator = FPSCalculator(start_time=self.startTime, smoothness=30) self.size = None def initializeGL (self): """initializeGL the context and create the osgViewer, also set manipulator and event handler """ self.gw = self.createContext() self.viewer = None QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout ()"), self.updateGL) def setLoopTime(self, loopTime): self.loopTime = loopTime if self.audio is not None: self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) def embedInContext (self): """create a osg.GraphicsWindow for a Qt.QWidget window""" gw = osgViewer.GraphicsWindowEmbedded(0,0,self.width(),self.height()) return gw def createContext (self): """create a opengl context (currently WindowData classes are not wrapped so we can not inherrit the windowdata) """ ds = osg.DisplaySettings_instance() if False: traits = osg.GraphicsContext.Traits() print traits traits.readDISPLAY() if (traits.displayNum<0): traits.displayNum = 0 traits.windowName = "osgViewerPyQt" traits.screenNum = 0 traits.x = self.x() traits.y = self.y() traits.width = self.width() traits.height = self.height() traits.alpha = 8 #ds.getMinimumNumAlphaBits() traits.stencil = 8 #ds.getMinimumNumStencilBits() traits.windowDecoration = False traits.doubleBuffer = True traits.sampleBuffers = 4 #ds.getMultiSamples() traits.samples = 4 #ds.getNumMultiSamples() gw = osgViewer.GraphicsWindowEmbedded() return gw def createViewer(self): """create a osgViewer.Viewer and set the viewport, camera and previously created graphical context """ global viewerFactory viewer = viewerFactory() #init the default eventhandler # self.viewer.setCameraManipulator(osgGA.TrackballManipulator()) viewer.addEventHandler(osgViewer.StatsHandler()) viewer.addEventHandler(osgViewer.HelpHandler()) viewer.getUpdateVisitor().setTraversalMask(UPDATE_MASK) self.resetCamera(viewer) return viewer def resetCamera(self, viewer): camera = viewer.getCamera() camera.setComputeNearFarMode(False) # camera = osg.Camera() camera.setViewport(osg.Viewport(0,0, self.width(), self.height())) # camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF) CAMERA_ANGLE = 45.0 CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0)) cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE] camera.setProjectionMatrixAsPerspective(CAMERA_ANGLE,float(self.width())/float(self.height()), 0.1, 100.0) eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2]) center = osg.Vec3d(0,0,0) up = osg.Vec3d(0,1,0) camera.setViewMatrixAsLookAt(eye, center, up) camera.getOrCreateStateSet().setAttributeAndModes(osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False) camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) material = osg.Material() color = osg.Vec4(1.0,1.0,1.0,1.0) material.setDiffuse(osg.Material.FRONT_AND_BACK, color) material.setAmbient(osg.Material.FRONT_AND_BACK, color) camera.getOrCreateStateSet().setAttributeAndModes(material) camera.setClearColor(osg.Vec4(0,0,0,0)) camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) camera.setCullMask(VISIBLE_CULL_MASK) if not self.gw: raise Exception("GraphicsWindow not yet created") self.camera = camera # viewer.getCamera().setViewport(osg.Viewport(0,0, self.width(), self.height())) # viewer.getCamera().addChild(camera) camera.setGraphicsContext(self.gw) def texture_build(self): texture = osg.Texture2D() texture.setTextureSize(self.width(), self.height()) texture.setInternalFormat(GL.GL_RGBA) texture.setResizeNonPowerOfTwoHint(False) # bug detected here, if I enable mipmap osg seems to use the view buffer to # do something. If I disable the mipmap it works. # you can view the issue with test_09_gaussian_filter.py #texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR_MIPMAP_LINEAR) texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR) texture.setFilter(osg.Texture.MAG_FILTER, osg.Texture.LINEAR) return texture def camera_build(self): texture = self.texture_build() camera = osg.Camera() camera.setViewport(osg.Viewport(0,0, self.width(), self.height())) camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF) camera.setRenderOrder(osg.Camera.PRE_RENDER) camera.setRenderTargetImplementation(osg.Camera.FRAME_BUFFER_OBJECT) camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, 0, False, 0, 0) CAMERA_ANGLE = 45.0 CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0)) cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE] camera.setProjectionMatrixAsPerspective(CAMERA_ANGLE,float(self.width())/float(self.height()), 0.1, 10000.0) eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2]) center = osg.Vec3d(0,0,0) up = osg.Vec3d(0,1,0) camera.setViewMatrixAsLookAt(eye, center, up) camera.getOrCreateStateSet().setAttributeAndModes(osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False) camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) material = osg.Material() color = osg.Vec4(1.0,1.0,1.0,1.0) material.setDiffuse(osg.Material.FRONT_AND_BACK, color) material.setAmbient(osg.Material.FRONT_AND_BACK, color) camera.getOrCreateStateSet().setAttributeAndModes(material) camera.setClearColor(osg.Vec4(0,0,0,0)) camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) return camera, texture def quad_create(self, texture): stateset = osg.StateSet() stateset.setTextureAttributeAndModes(0, texture) corner = osg.Vec3(-self.aspect_ratio, -1.0, 0) width = osg.Vec3(2 * self.aspect_ratio, 0, 0) height = osg.Vec3(0, 2 * 1.0, 0) geom = osg.createTexturedQuadGeometry(corner, width, height, 0.0, 0.0, 1.0, 1.0) geom.setStateSet(stateset) geode = osg.Geode() geode.addDrawable(geom) return geode def build_wrapping_node(self, data): grp = osg.Group() camera,texture = self.camera_build() grp.addChild(camera) camera.addChild(data) quad = self.quad_create(texture) grp.addChild(quad) grp.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) # ALPHA_TEST should be disabled by default by OpenGL but it's not. # That's why it is required to explicitly disable it. grp.getOrCreateStateSet().setMode(GL.GL_ALPHA_TEST, osg.StateAttribute.OFF) return grp def resetSceneData(self, data): self.timer.stop() if self.viewer is not None: self.viewer.setSceneData(None) if self.audio is not None: self.audio.terminate() self.audio = None def setSceneData(self, data): if data is not None: if self.viewer == None: self.viewer = self.createViewer() self.orginal_data = data data = self.build_wrapping_node(data) self.viewer.setSceneData(data) try: # when working with conservative viewer # to enable prefetch an release of image streams self.viewer.optimizeTimeRanges() except: pass # ready to render self.fps_calculator.reset(self.startTime) self.still_frame = False self.timer.start() def setAudioData(self, data, skip=0.0): if self.audio is not None: self.audio.terminate() self.audio = None self.audio_data = data self.audio_skip = float(skip) self.audio = SoundPlayer(input_data=data, tmp_dir=jarvis.get_home(), start=False, loop_nb=0, frames_per_buffer=1024, blocking=False) if self.audio is not None: self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False) def getosgviewer(self): return self.viewer def size(): def fget(self): return self._size def fset(self, value): if value is None: self._size = None else: width, height = value width = int(round(width)) height = int(round(height)) self._size = (width, height) self.setMinimumSize(width, height) return locals() size = property(**size()) def resizeGL(self, w, h): self.aspect_ratio = w / float(h) self.parent.toolbar.aspect_ratio_btn_update() if self.viewer is not None: self.gw.resized(0, 0, w, h) self.setSceneData(self.orginal_data) self.resetCamera(self.viewer) def paintGL(self): if self.viewer == None: return frame_time = time.time() if self.audio is not None : if self.is_paused or self.still_frame: self.audio.pause() else: self.audio.play(blocking=False) audio_time = self.audio.get_time() - self.audio_skip # self.current_time = self.align_time(audio_time) self.current_time = audio_time else: if self.is_paused or self.still_frame: self.startTime = frame_time - self.current_time else: self.current_time = frame_time - self.startTime if self.current_time >= self.loopTime: self.startTime = frame_time self.current_time = 0.0 fps = self.fps_calculator.get(frame_time) self.parent.toolbar.update_time_info(self.current_time, self.loopTime, fps) self.viewer.frameAtTime(self.align_time(self.current_time)) def align_time(self, t): return min(self.loopTime, max(0.0, round(t * config.FPS_UI) / config.FPS_UI)) def update_time(self, from_ratio=None, from_delta=None): if from_ratio is not None: self.current_time = self.align_time(self.loopTime * from_ratio) self.startTime = time.time() - self.current_time elif from_delta is not None: self.current_time = self.align_time(self.current_time + from_delta) self.startTime = time.time() - self.current_time if self.audio is not None: self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False) def pause(self): self.is_paused = True def play(self): self.is_paused = False def mousePressEvent( self, event ): """put the qt event in the osg event queue""" self.still_frame = True button = mouseButtonDictionary.get(event.button(), 0) self.update_time(from_ratio=float(event.x()) / float(self.width())) self.gw.getEventQueue().mouseButtonPress(event.x(), event.y(), button) def mouseMoveEvent(self, event): """put the qt event in the osg event queue""" self.update_time(from_ratio=float(event.x()) / float(self.width())) self.gw.getEventQueue().mouseMotion(event.x(), event.y()) def mouseReleaseEvent( self, event ): """put the qt event in the osg event queue""" button = mouseButtonDictionary.get(event.button(), 0) self.gw.getEventQueue().mouseButtonRelease(event.x(), event.y(), button) self.still_frame = False def getGraphicsWindow(self): """returns the osg graphicswindow created by osgViewer """ return self.gw
# Sound1b.py from soundplayer import SoundPlayer import time # Use device with ID 0 (mostly standard audio output) # Sound resource in current folder p = SoundPlayer("salza1.wav", 1) print "play for 10 s" p.play() # non blocking, volume = 1 n = 0 while True: if not p.isPlaying(): break print "playing:", n if n == 10: p.stop() print "stopped" n += 1 time.sleep(1) print "done"
import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) p = SoundPlayer("/home/pi/sound/salza1.wav", 1) print("play for 10 s with volume 0.5") p.play(0.5) # non-blocking, volume = 0.5 print("isPlaying:", p.isPlaying()) time.sleep(10) print("pause for 5 s") p.pause() print("isPlaying:", p.isPlaying()) time.sleep(5) print("resume for 10 s") p.resume() time.sleep(10) print("stop") p.stop() print("isPlaying:", p.isPlaying()) print = ("done")
def scope(cv, x, step_x, id): global cvjob global durchgang global linedrop def measure_point(): while not GPIO.input(11): GPIO.output(9, 1) u1 = 0 u2 = 0 for i in range(10000): if GPIO.input(11): GPIO.output(9, 0) u1 = u1 + faktor #u1=u1+1 else: GPIO.output(9, 1) u2 = u2 + faktor #u2=u2+1 GPIO.output(9, 0) spannung = (nulloffset + u1 - u2) / 2000 #print("Spannung: ", spannung, " u0: ",(((u1 - u2) * 1000 / 100000) + 100)) return spannung #ende measure_point() def dbv_coordinate(V): Unull = 1 if V <= 0: V = 0.0001 dBV = 20 * math.log10(V / Unull) if dBV < -20: dBV = -19.9 elif dBV > 20: dBV = 19.9 #print("Spannung: ", V," dBV: ",dBV," Coord: ",(h/2-(dBV*20))) return (h / 2 - (dBV * 20)) #ende dbv_coordiante(dBV) def frequenz_koord(freq): #Dekade feststellen lfreq = 0 m = 0 if freq < 100: m = 0 lfreq = freq / 10 elif freq < 1000: m = 1 lfreq = freq / 100 elif freq < 10000: m = 2 lfreq = freq / 1000 elif freq <= 20000: m = 3 lfreq = freq / 10000 try: logwert = math.log10(lfreq) except: print("freq: ", freq, " lfreq: ", lfreq, " m: ", m, " logwert: ", logwert, " xwert: ", xwert) raise xwert = wh * logwert + (wh * m) #print ("freq: ",freq," lfreq: ",lfreq," m: ",m," logwert: ",logwert," xwert: ",xwert) return round(xwert) #ende frequenz_koord(freq) korr_faktor = 1.0 try: korr_faktor = float(korr_var.get()) except: print("Keine gültige Zahl: ", korr_var.get()) korr_faktor = 1.0 korr_var.set("1.0") if korr_faktor > 4 or korr_faktor <= 0: print("Keine gültige Zahl: ", korr_var.get()) korr_faktor = 1.0 korr_var.set("1.0") if x < len(freqliste): if id: last_y = cv.coords(id)[-1] else: if linedrop == 1: cv.delete("line_point" + str(durchgang)) #Linie wieder löschen linedrop = 0 last_y = h / 2 #x += step_x #Hier Ton erzeugen und dann Messen anschließend Frequenz erhoehen step_x entsprechend erhoehen # id = cv.create_line(x-step_x, last_y , x, measure_point()*2, fill = "black", tag="line_point", width=2) counter.set("Frequenz: " + str(freqliste[x])) old_x = 0 try: SoundPlayer.playTone(freqliste[x], 2, False, dev) #hier auch Zeit einstellen #SoundPlayer.playTone(300, 2, False, dev) mp = 0 messcounter = 0 #Mittelwert mess_liste = [] #Liste while SoundPlayer.isPlaying(): #print("Warte:..",SoundPlayer.isPlaying()) #mp += measure_point() #Mittelwert messcounter += 1 #Mittelwert mess_liste.append(measure_point()) #print("Sound ",freqliste[x]," Hz") #mp=mp/messcounter #Mittelwert bilden #mess_liste.remove(max(mess_liste)) #mess_liste.remove(min(mess_liste)) #messcounter -= 2 mp = median(mess_liste) #DEBUG Ausgaben #print("Listenlänge: ",len(mess_liste)," Max: ",max(mess_liste)," Min: ",min(mess_liste), "Mid: ",mp) old_x = x - step_x if old_x < 0: old_x = 0 messwert.set("Messwert: " + str(round(mp, 2)) + "V") messpunkte.set("Messpunke: " + str(messcounter)) id = cv.create_line(frequenz_koord(freqliste[old_x]), last_y, frequenz_koord(freqliste[x]), dbv_coordinate(mp * korr_faktor), fill=linienfarbe[durchgang], tag="line_point" + str(durchgang), width=2) x += step_x except: print("Exception x:", x, "old_x: ", old_x) raise else: # hier auch Frequenz zurücksetzen x = 0 id = None durchgang += 1 # cv.after(20, scope, cv, x, step_x, id) if startstop == 1 and durchgang < len(linienfarbe): cvjob = cv.after(20, scope, cv, x, step_x, id) else: print("Gestoppt!")
# Sound1a.py import time from soundplayer import SoundPlayer # Use device with ID 1 (mostly USB audio adapter) p = SoundPlayer("hello.mp3", 1) print "playing" p.play() # non-blocking, volume = 0.5 print "done"
class PyQtOSGWidget(QtOpenGL.QGLWidget): def __init__(self, parent=0, name='', flags=0): """constructor """ QtOpenGL.QGLWidget.__init__(self, parent) self.parent = parent self.setFocusPolicy(QtCore.Qt.ClickFocus) self.timer = QtCore.QTimer() self.timer.setInterval(1000.0 / config.FPS_RENDERING) # in milliseconds self.camera = None self.startTime = 0.0 self.loopTime = 10.0 self.is_paused = False self.still_frame = True self.current_time = 0 self.audio = None self.viewer = None self.orginal_data = None self.aspect_ratio = None self.fps_calculator = FPSCalculator(start_time=self.startTime, smoothness=30) self.size = None def initializeGL(self): """initializeGL the context and create the osgViewer, also set manipulator and event handler """ self.gw = self.createContext() self.viewer = None QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout ()"), self.updateGL) def setLoopTime(self, loopTime): self.loopTime = loopTime if self.audio is not None: self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) def embedInContext(self): """create a osg.GraphicsWindow for a Qt.QWidget window""" gw = osgViewer.GraphicsWindowEmbedded(0, 0, self.width(), self.height()) return gw def createContext(self): """create a opengl context (currently WindowData classes are not wrapped so we can not inherrit the windowdata) """ ds = osg.DisplaySettings_instance() if False: traits = osg.GraphicsContext.Traits() print traits traits.readDISPLAY() if (traits.displayNum < 0): traits.displayNum = 0 traits.windowName = "osgViewerPyQt" traits.screenNum = 0 traits.x = self.x() traits.y = self.y() traits.width = self.width() traits.height = self.height() traits.alpha = 8 #ds.getMinimumNumAlphaBits() traits.stencil = 8 #ds.getMinimumNumStencilBits() traits.windowDecoration = False traits.doubleBuffer = True traits.sampleBuffers = 4 #ds.getMultiSamples() traits.samples = 4 #ds.getNumMultiSamples() gw = osgViewer.GraphicsWindowEmbedded() return gw def createViewer(self): """create a osgViewer.Viewer and set the viewport, camera and previously created graphical context """ global viewerFactory viewer = viewerFactory() #init the default eventhandler # self.viewer.setCameraManipulator(osgGA.TrackballManipulator()) viewer.addEventHandler(osgViewer.StatsHandler()) viewer.addEventHandler(osgViewer.HelpHandler()) viewer.getUpdateVisitor().setTraversalMask(UPDATE_MASK) self.resetCamera(viewer) return viewer def resetCamera(self, viewer): camera = viewer.getCamera() camera.setComputeNearFarMode(False) # camera = osg.Camera() camera.setViewport(osg.Viewport(0, 0, self.width(), self.height())) # camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF) CAMERA_ANGLE = 45.0 CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0)) cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE] camera.setProjectionMatrixAsPerspective( CAMERA_ANGLE, float(self.width()) / float(self.height()), 0.1, 100.0) eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2]) center = osg.Vec3d(0, 0, 0) up = osg.Vec3d(0, 1, 0) camera.setViewMatrixAsLookAt(eye, center, up) camera.getOrCreateStateSet().setAttributeAndModes( osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False) camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) material = osg.Material() color = osg.Vec4(1.0, 1.0, 1.0, 1.0) material.setDiffuse(osg.Material.FRONT_AND_BACK, color) material.setAmbient(osg.Material.FRONT_AND_BACK, color) camera.getOrCreateStateSet().setAttributeAndModes(material) camera.setClearColor(osg.Vec4(0, 0, 0, 0)) camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) camera.setCullMask(VISIBLE_CULL_MASK) if not self.gw: raise Exception("GraphicsWindow not yet created") self.camera = camera # viewer.getCamera().setViewport(osg.Viewport(0,0, self.width(), self.height())) # viewer.getCamera().addChild(camera) camera.setGraphicsContext(self.gw) def texture_build(self): texture = osg.Texture2D() texture.setTextureSize(self.width(), self.height()) texture.setInternalFormat(GL.GL_RGBA) texture.setResizeNonPowerOfTwoHint(False) # bug detected here, if I enable mipmap osg seems to use the view buffer to # do something. If I disable the mipmap it works. # you can view the issue with test_09_gaussian_filter.py #texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR_MIPMAP_LINEAR) texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR) texture.setFilter(osg.Texture.MAG_FILTER, osg.Texture.LINEAR) return texture def camera_build(self): texture = self.texture_build() camera = osg.Camera() camera.setViewport(osg.Viewport(0, 0, self.width(), self.height())) camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF) camera.setRenderOrder(osg.Camera.PRE_RENDER) camera.setRenderTargetImplementation(osg.Camera.FRAME_BUFFER_OBJECT) camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, 0, False, 0, 0) CAMERA_ANGLE = 45.0 CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0)) cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE] camera.setProjectionMatrixAsPerspective( CAMERA_ANGLE, float(self.width()) / float(self.height()), 0.1, 10000.0) eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2]) center = osg.Vec3d(0, 0, 0) up = osg.Vec3d(0, 1, 0) camera.setViewMatrixAsLookAt(eye, center, up) camera.getOrCreateStateSet().setAttributeAndModes( osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False) camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False) camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) material = osg.Material() color = osg.Vec4(1.0, 1.0, 1.0, 1.0) material.setDiffuse(osg.Material.FRONT_AND_BACK, color) material.setAmbient(osg.Material.FRONT_AND_BACK, color) camera.getOrCreateStateSet().setAttributeAndModes(material) camera.setClearColor(osg.Vec4(0, 0, 0, 0)) camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) return camera, texture def quad_create(self, texture): stateset = osg.StateSet() stateset.setTextureAttributeAndModes(0, texture) corner = osg.Vec3(-self.aspect_ratio, -1.0, 0) width = osg.Vec3(2 * self.aspect_ratio, 0, 0) height = osg.Vec3(0, 2 * 1.0, 0) geom = osg.createTexturedQuadGeometry(corner, width, height, 0.0, 0.0, 1.0, 1.0) geom.setStateSet(stateset) geode = osg.Geode() geode.addDrawable(geom) return geode def build_wrapping_node(self, data): grp = osg.Group() camera, texture = self.camera_build() grp.addChild(camera) camera.addChild(data) quad = self.quad_create(texture) grp.addChild(quad) grp.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False) # ALPHA_TEST should be disabled by default by OpenGL but it's not. # That's why it is required to explicitly disable it. grp.getOrCreateStateSet().setMode(GL.GL_ALPHA_TEST, osg.StateAttribute.OFF) return grp def resetSceneData(self, data): self.timer.stop() if self.viewer is not None: self.viewer.setSceneData(None) if self.audio is not None: self.audio.terminate() self.audio = None def setSceneData(self, data): if data is not None: if self.viewer == None: self.viewer = self.createViewer() self.orginal_data = data data = self.build_wrapping_node(data) self.viewer.setSceneData(data) # ready to render self.fps_calculator.reset(self.startTime) self.still_frame = False self.timer.start() def setAudioData(self, data, skip=0.0): if self.audio is not None: self.audio.terminate() self.audio = None self.audio_data = data self.audio_skip = float(skip) self.audio = SoundPlayer(input_data=data, tmp_dir=jarvis.get_home(), start=False, loop_nb=0, frames_per_buffer=1024, blocking=False) if self.audio is not None: self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime) self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False) def getosgviewer(self): return self.viewer def size(): def fget(self): return self._size def fset(self, value): if value is None: self._size = None else: width, height = value width = int(round(width)) height = int(round(height)) self._size = (width, height) self.setMinimumSize(width, height) return locals() size = property(**size()) def resizeGL(self, w, h): self.aspect_ratio = w / float(h) self.parent.toolbar.aspect_ratio_btn_update() if self.viewer is not None: self.gw.resized(0, 0, w, h) self.setSceneData(self.orginal_data) self.resetCamera(self.viewer) def paintGL(self): if self.viewer == None: return frame_time = time.time() if self.audio is not None: if self.is_paused or self.still_frame: self.audio.pause() else: self.audio.play(blocking=False) audio_time = self.audio.get_time() - self.audio_skip # self.current_time = self.align_time(audio_time) self.current_time = audio_time else: if self.is_paused or self.still_frame: self.startTime = frame_time - self.current_time else: self.current_time = frame_time - self.startTime if self.current_time >= self.loopTime: self.startTime = frame_time self.current_time = 0.0 fps = self.fps_calculator.get(frame_time) self.parent.toolbar.update_time_info(self.current_time, self.loopTime, fps) self.viewer.frameAtTime(self.align_time(self.current_time)) def align_time(self, t): return min(self.loopTime, max(0.0, round(t * config.FPS_UI) / config.FPS_UI)) def update_time(self, from_ratio=None, from_delta=None): if from_ratio is not None: self.current_time = self.align_time(self.loopTime * from_ratio) self.startTime = time.time() - self.current_time elif from_delta is not None: self.current_time = self.align_time(self.current_time + from_delta) self.startTime = time.time() - self.current_time if self.audio is not None: self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False) def pause(self): self.is_paused = True def play(self): self.is_paused = False def mousePressEvent(self, event): """put the qt event in the osg event queue""" self.still_frame = True button = mouseButtonDictionary.get(event.button(), 0) self.update_time(from_ratio=float(event.x()) / float(self.width())) self.gw.getEventQueue().mouseButtonPress(event.x(), event.y(), button) def mouseMoveEvent(self, event): """put the qt event in the osg event queue""" self.update_time(from_ratio=float(event.x()) / float(self.width())) self.gw.getEventQueue().mouseMotion(event.x(), event.y()) def mouseReleaseEvent(self, event): """put the qt event in the osg event queue""" button = mouseButtonDictionary.get(event.button(), 0) self.gw.getEventQueue().mouseButtonRelease(event.x(), event.y(), button) self.still_frame = False def getGraphicsWindow(self): """returns the osg graphicswindow created by osgViewer """ return self.gw
if len(os.listdir('/media/PiSoundBoard')) > 0: os.system("sudo rm /home/pi/soundfiles/*") os.system("sudo cp -R /media/PiSoundBoard/*.mp3 /home/pi/soundfiles") def readSwitches(): global lastPushedSwitch for i in switches: if GPIO.input(i) == False and lastPushedSwitch != i: lastPushedSwitch = i return switches[i]["filename"] return False GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) initSwitches() initRelais() copyMediaFiles() if __name__ == '__main__': while True: if player != False and player.isPlaying() == False: lastPushedSwitch = False filename = readSwitches() if filename != False and os.path.isfile(SoundFilePath + filename): #print(SoundFilePath + filename) player = SoundPlayer(SoundFilePath + filename, 0) player.play(1.0)