async def read_gamepad_inputs():
    global head_light_flag
    print("Ready to drive!!")
    turn_sound = SoundPlayer(
        "/home/pi/xbox-raspberrypi-rover/soundfiles/turn-signal.mp3", card)
    horn_sound = SoundPlayer(
        "/home/pi/xbox-raspberrypi-rover/soundfiles/Horn.mp3", card)

    while not is_connected():
        time.sleep(2)  # Wait 2 seconds for controller to come up and try again

    while is_connected() and remote_control.button_b == False:
        #print(" trigger_right = ", round(remote_control.trigger_right,2),end="\r")
        x = round(remote_control.joystick_left_x, 2)
        y = round(remote_control.joystick_left_y, 2)
        angle = get_angle_from_coords(x, y)
        if angle > 180:
            angle = 360 - angle
        #print("x:", x, " y:", y, " angle: ",angle,end="\r")
        turn_head(angle)
        direction = get_motor_direction(x, y)
        y = adjust_speed(y, angle)
        #print("x:", x, " y:", y, " direction: ",direction,end="\r")
        drive_motor(direction, y)

        if round(remote_control.trigger_right, 2) > 0.0:
            horn_sound.play(1.0)
            led.blue()
        elif round(remote_control.trigger_left, 2) > 0.0:
            led.cyan()
        elif remote_control.bump_left:
            turn_sound.play(1.0)
            led.turn_left(5)
        elif remote_control.bump_right:
            turn_sound.play(1.0)
            led.turn_right(5)
        elif remote_control.dpad_up:
            remote_control.dpad_up = False
        elif remote_control.dpad_left:
            remote_control.dpad_left = False
        elif remote_control.dpad_right:
            remote_control.dpad_right = False
        elif remote_control.button_a:
            remote_control.button_a = False
        elif head_light_flag == False:
            led.both_off()
            led_strip.colorWipe(strip, Color(0, 0, 0))
            if turn_sound.isPlaying():
                turn_sound.stop()

        await asyncio.sleep(100e-3)  #100ms
    return
示例#2
0
class PyQtOSGWidget(QtOpenGL.QGLWidget):
    def __init__(self, parent=0, name='', flags=0):
        """constructor """
        QtOpenGL.QGLWidget.__init__(self, parent)
        self.parent = parent
        self.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.timer = QtCore.QTimer()
        self.timer.setInterval(1000.0 /
                               config.FPS_RENDERING)  # in milliseconds
        self.camera = None
        self.startTime = 0.0
        self.loopTime = 10.0
        self.is_paused = False
        self.still_frame = True
        self.current_time = 0
        self.audio = None
        self.viewer = None
        self.orginal_data = None
        self.aspect_ratio = None
        self.fps_calculator = FPSCalculator(start_time=self.startTime,
                                            smoothness=30)
        self.size = None

    def initializeGL(self):
        """initializeGL the context and create the osgViewer, also set manipulator and event handler """
        self.gw = self.createContext()
        self.viewer = None

        QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout ()"),
                               self.updateGL)

    def setLoopTime(self, loopTime):
        self.loopTime = loopTime
        if self.audio is not None:
            self.audio.set_loop_time(start_time=self.audio_skip,
                                     end_time=self.audio_skip + self.loopTime)

    def embedInContext(self):
        """create a osg.GraphicsWindow for a Qt.QWidget window"""
        gw = osgViewer.GraphicsWindowEmbedded(0, 0, self.width(),
                                              self.height())
        return gw

    def createContext(self):
        """create a opengl context (currently WindowData classes are not wrapped so we can not inherrit the windowdata) """
        ds = osg.DisplaySettings_instance()
        if False:
            traits = osg.GraphicsContext.Traits()
            print traits
            traits.readDISPLAY()
            if (traits.displayNum < 0): traits.displayNum = 0

            traits.windowName = "osgViewerPyQt"
            traits.screenNum = 0
            traits.x = self.x()
            traits.y = self.y()
            traits.width = self.width()
            traits.height = self.height()
            traits.alpha = 8  #ds.getMinimumNumAlphaBits()
            traits.stencil = 8  #ds.getMinimumNumStencilBits()
            traits.windowDecoration = False
            traits.doubleBuffer = True
            traits.sampleBuffers = 4  #ds.getMultiSamples()
            traits.samples = 4  #ds.getNumMultiSamples()
        gw = osgViewer.GraphicsWindowEmbedded()
        return gw

    def createViewer(self):
        """create a osgViewer.Viewer and set the viewport, camera and previously created graphical context """
        global viewerFactory
        viewer = viewerFactory()
        #init the default eventhandler
        #        self.viewer.setCameraManipulator(osgGA.TrackballManipulator())
        viewer.addEventHandler(osgViewer.StatsHandler())
        viewer.addEventHandler(osgViewer.HelpHandler())
        viewer.getUpdateVisitor().setTraversalMask(UPDATE_MASK)
        self.resetCamera(viewer)
        return viewer

    def resetCamera(self, viewer):
        camera = viewer.getCamera()
        camera.setComputeNearFarMode(False)
        #        camera = osg.Camera()
        camera.setViewport(osg.Viewport(0, 0, self.width(), self.height()))
        #        camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
        CAMERA_ANGLE = 45.0
        CAMERA_Z_TRANSLATE = 2.4142135623730949  #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0))
        cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE]

        camera.setProjectionMatrixAsPerspective(
            CAMERA_ANGLE,
            float(self.width()) / float(self.height()), 0.1, 100.0)

        eye = osg.Vec3d(cameraPosition[0], cameraPosition[1],
                        cameraPosition[2])
        center = osg.Vec3d(0, 0, 0)
        up = osg.Vec3d(0, 1, 0)
        camera.setViewMatrixAsLookAt(eye, center, up)

        camera.getOrCreateStateSet().setAttributeAndModes(
            osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA))
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False)
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False)
        camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        material = osg.Material()
        color = osg.Vec4(1.0, 1.0, 1.0, 1.0)
        material.setDiffuse(osg.Material.FRONT_AND_BACK, color)
        material.setAmbient(osg.Material.FRONT_AND_BACK, color)
        camera.getOrCreateStateSet().setAttributeAndModes(material)
        camera.setClearColor(osg.Vec4(0, 0, 0, 0))
        camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)

        camera.setCullMask(VISIBLE_CULL_MASK)

        if not self.gw:
            raise Exception("GraphicsWindow not yet created")

        self.camera = camera

        #        viewer.getCamera().setViewport(osg.Viewport(0,0, self.width(), self.height()))
        #        viewer.getCamera().addChild(camera)
        camera.setGraphicsContext(self.gw)

    def texture_build(self):
        texture = osg.Texture2D()
        texture.setTextureSize(self.width(), self.height())
        texture.setInternalFormat(GL.GL_RGBA)
        texture.setResizeNonPowerOfTwoHint(False)

        # bug detected here, if I enable mipmap osg seems to use the view buffer to
        # do something. If I disable the mipmap it works.
        # you can view the issue with test_09_gaussian_filter.py
        #texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR_MIPMAP_LINEAR)
        texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR)
        texture.setFilter(osg.Texture.MAG_FILTER, osg.Texture.LINEAR)
        return texture

    def camera_build(self):
        texture = self.texture_build()

        camera = osg.Camera()
        camera.setViewport(osg.Viewport(0, 0, self.width(), self.height()))
        camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
        camera.setRenderOrder(osg.Camera.PRE_RENDER)
        camera.setRenderTargetImplementation(osg.Camera.FRAME_BUFFER_OBJECT)
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, 0, False, 0, 0)

        CAMERA_ANGLE = 45.0
        CAMERA_Z_TRANSLATE = 2.4142135623730949  #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0))
        cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE]

        camera.setProjectionMatrixAsPerspective(
            CAMERA_ANGLE,
            float(self.width()) / float(self.height()), 0.1, 10000.0)

        eye = osg.Vec3d(cameraPosition[0], cameraPosition[1],
                        cameraPosition[2])
        center = osg.Vec3d(0, 0, 0)
        up = osg.Vec3d(0, 1, 0)
        camera.setViewMatrixAsLookAt(eye, center, up)

        camera.getOrCreateStateSet().setAttributeAndModes(
            osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA))
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False)
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False)
        camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        material = osg.Material()
        color = osg.Vec4(1.0, 1.0, 1.0, 1.0)
        material.setDiffuse(osg.Material.FRONT_AND_BACK, color)
        material.setAmbient(osg.Material.FRONT_AND_BACK, color)
        camera.getOrCreateStateSet().setAttributeAndModes(material)
        camera.setClearColor(osg.Vec4(0, 0, 0, 0))
        camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)

        return camera, texture

    def quad_create(self, texture):
        stateset = osg.StateSet()
        stateset.setTextureAttributeAndModes(0, texture)
        corner = osg.Vec3(-self.aspect_ratio, -1.0, 0)
        width = osg.Vec3(2 * self.aspect_ratio, 0, 0)
        height = osg.Vec3(0, 2 * 1.0, 0)
        geom = osg.createTexturedQuadGeometry(corner, width, height, 0.0, 0.0,
                                              1.0, 1.0)
        geom.setStateSet(stateset)
        geode = osg.Geode()
        geode.addDrawable(geom)
        return geode

    def build_wrapping_node(self, data):
        grp = osg.Group()
        camera, texture = self.camera_build()

        grp.addChild(camera)
        camera.addChild(data)

        quad = self.quad_create(texture)
        grp.addChild(quad)

        grp.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        # ALPHA_TEST should be disabled by default by OpenGL but it's not.
        # That's why it is required to explicitly disable it.
        grp.getOrCreateStateSet().setMode(GL.GL_ALPHA_TEST,
                                          osg.StateAttribute.OFF)
        return grp

    def resetSceneData(self, data):
        self.timer.stop()
        if self.viewer is not None:
            self.viewer.setSceneData(None)
        if self.audio is not None:
            self.audio.terminate()
            self.audio = None

    def setSceneData(self, data):
        if data is not None:
            if self.viewer == None:
                self.viewer = self.createViewer()
            self.orginal_data = data
            data = self.build_wrapping_node(data)
            self.viewer.setSceneData(data)

        # ready to render
        self.fps_calculator.reset(self.startTime)
        self.still_frame = False
        self.timer.start()

    def setAudioData(self, data, skip=0.0):
        if self.audio is not None:
            self.audio.terminate()
            self.audio = None
        self.audio_data = data
        self.audio_skip = float(skip)
        self.audio = SoundPlayer(input_data=data,
                                 tmp_dir=jarvis.get_home(),
                                 start=False,
                                 loop_nb=0,
                                 frames_per_buffer=1024,
                                 blocking=False)
        if self.audio is not None:
            self.audio.set_loop_time(start_time=self.audio_skip,
                                     end_time=self.audio_skip + self.loopTime)
            self.audio.set_time(self.audio_skip + self.current_time,
                                compensate_buffer=False)

    def getosgviewer(self):
        return self.viewer

    def size():
        def fget(self):
            return self._size

        def fset(self, value):
            if value is None:
                self._size = None
            else:
                width, height = value
                width = int(round(width))
                height = int(round(height))
                self._size = (width, height)
                self.setMinimumSize(width, height)

        return locals()

    size = property(**size())

    def resizeGL(self, w, h):
        self.aspect_ratio = w / float(h)
        self.parent.toolbar.aspect_ratio_btn_update()
        if self.viewer is not None:
            self.gw.resized(0, 0, w, h)
            self.setSceneData(self.orginal_data)
            self.resetCamera(self.viewer)

    def paintGL(self):
        if self.viewer == None:
            return

        frame_time = time.time()

        if self.audio is not None:
            if self.is_paused or self.still_frame:
                self.audio.pause()
            else:
                self.audio.play(blocking=False)
            audio_time = self.audio.get_time() - self.audio_skip
            # self.current_time = self.align_time(audio_time)
            self.current_time = audio_time
        else:
            if self.is_paused or self.still_frame:
                self.startTime = frame_time - self.current_time
            else:
                self.current_time = frame_time - self.startTime
                if self.current_time >= self.loopTime:
                    self.startTime = frame_time
                    self.current_time = 0.0

        fps = self.fps_calculator.get(frame_time)
        self.parent.toolbar.update_time_info(self.current_time, self.loopTime,
                                             fps)

        self.viewer.frameAtTime(self.align_time(self.current_time))

    def align_time(self, t):
        return min(self.loopTime,
                   max(0.0,
                       round(t * config.FPS_UI) / config.FPS_UI))

    def update_time(self, from_ratio=None, from_delta=None):
        if from_ratio is not None:
            self.current_time = self.align_time(self.loopTime * from_ratio)
            self.startTime = time.time() - self.current_time

        elif from_delta is not None:
            self.current_time = self.align_time(self.current_time + from_delta)
            self.startTime = time.time() - self.current_time

        if self.audio is not None:
            self.audio.set_time(self.audio_skip + self.current_time,
                                compensate_buffer=False)

    def pause(self):
        self.is_paused = True

    def play(self):
        self.is_paused = False

    def mousePressEvent(self, event):
        """put the qt event in the osg event queue"""
        self.still_frame = True
        button = mouseButtonDictionary.get(event.button(), 0)
        self.update_time(from_ratio=float(event.x()) / float(self.width()))
        self.gw.getEventQueue().mouseButtonPress(event.x(), event.y(), button)

    def mouseMoveEvent(self, event):
        """put the qt event in the osg event queue"""
        self.update_time(from_ratio=float(event.x()) / float(self.width()))
        self.gw.getEventQueue().mouseMotion(event.x(), event.y())

    def mouseReleaseEvent(self, event):
        """put the qt event in the osg event queue"""
        button = mouseButtonDictionary.get(event.button(), 0)
        self.gw.getEventQueue().mouseButtonRelease(event.x(), event.y(),
                                                   button)
        self.still_frame = False

    def getGraphicsWindow(self):
        """returns the osg graphicswindow created by osgViewer """
        return self.gw
示例#3
0
# Sound1a.py

import time
from soundplayer import SoundPlayer

# Use device with ID 1  (mostly USB audio adapter)
p = SoundPlayer("/home/pi/sound/salza1.wav", 1)
print "play for 10 s with volume 0.5"
p.play(0.5)  # non-blocking, volume = 0.5
time.sleep(10)
print "pause for 5 s"
p.pause()
time.sleep(5)
print "resume for 10 s"
p.resume()
time.sleep(10)
print "stop"
p.stop()
print "done"
示例#4
0
# Sound1b.py

from soundplayer import SoundPlayer
import time

# Use device with ID 0  (mostly standard audio output)
# Sound resource in current folder
p = SoundPlayer("salza1.wav", 1)
print "play for 10 s"
p.play()  # non blocking, volume = 1
n = 0
while True:
    if not p.isPlaying():
        break
    print "playing:", n
    if n == 10:
        p.stop()
        print "stopped"
    n += 1
    time.sleep(1)
print "done"
        disconnect_sound = SoundPlayer(
            "/home/pi/xbox-raspberrypi-rover/soundfiles/Disconnected.mp3",
            card)
        siren_sound = SoundPlayer(
            "/home/pi/xbox-raspberrypi-rover/soundfiles/siren.mp3", card)

        setup()

        waiting_for_connect = True
        while waiting_for_connect:
            remote_control = connect()
            if (remote_control != None):
                waiting_for_connect = False
            time.sleep(2)

        init_sound.play(1.0)

        strip = led_strip.setup_led()

        led_threading = threading.Thread(
            target=led_thread)  #Define a thread for ws_2812 leds
        led_threading.setDaemon(
            True
        )  #'True' means it is a front thread,it would close when the mainloop() closes
        led_threading.start()  #Thread starts

        tasks = [
            remote_control.read_gamepad_input(),
            remote_control.rumble(),
            read_gamepad_inputs()
        ]
示例#6
0
    def run(self):
        url = gral_url + "records"
        continue_reading = True
        # Create an object of the class MFRC522
        MIFAREReader = MFRC522.MFRC522()
        # Welcome message

        # This loop keeps checking for chips. If one is near it will get the UID and authenticate
        while continue_reading:

            # Scan for cards
            (status,
             TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)

            # If a card is found
            if status == MIFAREReader.MI_OK:
                print("Card detected")
            # Get the UID of the card
            (status, uid) = MIFAREReader.MFRC522_Anticoll()

            # If we have the UID, continue
            if status == MIFAREReader.MI_OK:
                rfid = ''.join([
                    str(hex(i))[2:] if i > 16 else '0' + str(hex(i))[2:]
                    for i in uid
                ])[:-2]
                rfidreverse = self.rotate(rfid)
                rfid = rfid.upper()
                rfidreverse = rfidreverse.upper()
                print(rfid, rfidreverse)
                p = SoundPlayer("/home/pi/guiPythonLABFAB/sounds/BeepIn.mp3",
                                0)
                p.play(1)
                time.sleep(0.001)
                #try:
                req = requests.post(
                    url, {
                        'rfid': rfid,
                        'lab_id': lab_id
                    },
                    headers=credentials.totem_credential).json()
                if not req:
                    req = requests.post(
                        url, {
                            'rfid': rfidreverse,
                            'lab_id': lab_id
                        },
                        headers=credentials.totem_credential).json()
                    if not req:
                        req = rfid
                        self.sig2.emit(req)
                    else:
                        self.sig1.emit(req)
                else:
                    print('resourse not found:', req)

                    # except:
                    #     req = 'Not Internet Conection'
                    #     self.sig2.emit(req)

                    time.sleep(5)
                GPIO.cleanup()
示例#7
0
setup()
print "Bereit..."
p = SoundPlayer("/home/pi/maja.mp3", 1)
sound1 = SoundPlayer("/home/pi/maja.mp3", 1)
sound2 = SoundPlayer("/home/pi/maja.mp3", 1)
sound3 = SoundPlayer("/home/pi/maja.mp3", 1)
sound4 = SoundPlayer("/home/pi/maja.mp3", 1)
sound5 = SoundPlayer("/home/pi/maja.mp3", 1)
sound6 = SoundPlayer("/home/pi/maja.mp3", 1)

while True:
    if GPIO.input(P_TON1) == GPIO.LOW:
        if not button1_pressed:
            print "Ton1..."
            sound1.play()
        else:
            sound1.stop()
        button1_pressed = True
    elif GPIO.input(P_TON2) == GPIO.LOW:
        if not button2_pressed:
            print "Ton2..."
            sound2.play()
        else:
            sound2.stop()
        button2_pressed = True
    elif GPIO.input(P_TON3) == GPIO.LOW:
        if not button3_pressed:
            print "Ton3..."
            sound3.play()
        else:
示例#8
0
文件: osgqt.py 项目: madlag/jarvis
class PyQtOSGWidget(QtOpenGL.QGLWidget):
    def __init__(self, parent = 0, name = '' ,flags = 0):
        """constructor """
        QtOpenGL.QGLWidget.__init__(self, parent)
        self.parent = parent
        self.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.timer = QtCore.QTimer()
        self.timer.setInterval(1000.0 / config.FPS_RENDERING) # in milliseconds
        self.camera = None
        self.startTime = 0.0
        self.loopTime = 10.0
        self.is_paused = False
        self.still_frame = True
        self.current_time = 0
        self.audio = None
        self.viewer = None    
        self.orginal_data = None
        self.aspect_ratio = None
        self.fps_calculator = FPSCalculator(start_time=self.startTime, smoothness=30)
        self.size = None

    def initializeGL (self):
        """initializeGL the context and create the osgViewer, also set manipulator and event handler """
        self.gw = self.createContext()
        self.viewer = None

        QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout ()"), self.updateGL)

    def setLoopTime(self, loopTime):
        self.loopTime = loopTime
        if self.audio is not None:
            self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime)

    def embedInContext (self):
        """create a osg.GraphicsWindow for a Qt.QWidget window"""
        gw = osgViewer.GraphicsWindowEmbedded(0,0,self.width(),self.height())
        return gw

    def createContext (self):
        """create a opengl context (currently WindowData classes are not wrapped so we can not inherrit the windowdata) """
        ds = osg.DisplaySettings_instance()
        if False:
            traits = osg.GraphicsContext.Traits()
            print traits
            traits.readDISPLAY()
            if (traits.displayNum<0): traits.displayNum = 0

            traits.windowName = "osgViewerPyQt"
            traits.screenNum = 0
            traits.x = self.x()
            traits.y = self.y()
            traits.width = self.width()
            traits.height = self.height()
            traits.alpha = 8 #ds.getMinimumNumAlphaBits()
            traits.stencil = 8 #ds.getMinimumNumStencilBits()
            traits.windowDecoration = False
            traits.doubleBuffer = True
            traits.sampleBuffers = 4 #ds.getMultiSamples()
            traits.samples = 4 #ds.getNumMultiSamples()
        gw = osgViewer.GraphicsWindowEmbedded()
        return gw

    def createViewer(self):
        """create a osgViewer.Viewer and set the viewport, camera and previously created graphical context """
        global viewerFactory
        viewer = viewerFactory()
        #init the default eventhandler
#        self.viewer.setCameraManipulator(osgGA.TrackballManipulator())
        viewer.addEventHandler(osgViewer.StatsHandler())
        viewer.addEventHandler(osgViewer.HelpHandler())
        viewer.getUpdateVisitor().setTraversalMask(UPDATE_MASK)
        self.resetCamera(viewer)
        return viewer

    def resetCamera(self, viewer):
        camera = viewer.getCamera()
        camera.setComputeNearFarMode(False)
#        camera = osg.Camera()
        camera.setViewport(osg.Viewport(0,0, self.width(), self.height()))
#        camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
        CAMERA_ANGLE = 45.0
        CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0))
        cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE]

        camera.setProjectionMatrixAsPerspective(CAMERA_ANGLE,float(self.width())/float(self.height()), 0.1, 100.0)

        eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2])
        center = osg.Vec3d(0,0,0)
        up = osg.Vec3d(0,1,0)
        camera.setViewMatrixAsLookAt(eye, center, up)

        camera.getOrCreateStateSet().setAttributeAndModes(osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA))
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False)
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False)
        camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        material = osg.Material()
        color = osg.Vec4(1.0,1.0,1.0,1.0)
        material.setDiffuse(osg.Material.FRONT_AND_BACK, color)
        material.setAmbient(osg.Material.FRONT_AND_BACK, color)
        camera.getOrCreateStateSet().setAttributeAndModes(material)
        camera.setClearColor(osg.Vec4(0,0,0,0))
        camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)

        camera.setCullMask(VISIBLE_CULL_MASK)

        if not self.gw:
            raise Exception("GraphicsWindow not yet created")

        self.camera = camera

#        viewer.getCamera().setViewport(osg.Viewport(0,0, self.width(), self.height()))
#        viewer.getCamera().addChild(camera)
        camera.setGraphicsContext(self.gw)

    def texture_build(self):
        texture = osg.Texture2D()
        texture.setTextureSize(self.width(), self.height())
        texture.setInternalFormat(GL.GL_RGBA)
        texture.setResizeNonPowerOfTwoHint(False)

        # bug detected here, if I enable mipmap osg seems to use the view buffer to
        # do something. If I disable the mipmap it works.
        # you can view the issue with test_09_gaussian_filter.py
        #texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR_MIPMAP_LINEAR)
        texture.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR)
        texture.setFilter(osg.Texture.MAG_FILTER, osg.Texture.LINEAR)
        return texture

    def camera_build(self):
        texture = self.texture_build()

        camera = osg.Camera()
        camera.setViewport(osg.Viewport(0,0, self.width(), self.height()))
        camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
        camera.setRenderOrder(osg.Camera.PRE_RENDER)
        camera.setRenderTargetImplementation(osg.Camera.FRAME_BUFFER_OBJECT)
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, 0, False, 0, 0)

        CAMERA_ANGLE = 45.0
        CAMERA_Z_TRANSLATE = 2.4142135623730949 #1.0 / math.tan(math.radians(CAMERA_ANGLE / 2.0))
        cameraPosition = [0.0, 0.0, CAMERA_Z_TRANSLATE]

        camera.setProjectionMatrixAsPerspective(CAMERA_ANGLE,float(self.width())/float(self.height()), 0.1, 10000.0)

        eye = osg.Vec3d(cameraPosition[0], cameraPosition[1], cameraPosition[2])
        center = osg.Vec3d(0,0,0)
        up = osg.Vec3d(0,1,0)
        camera.setViewMatrixAsLookAt(eye, center, up)

        camera.getOrCreateStateSet().setAttributeAndModes(osg.BlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA))
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_TEST, False)
        camera.getOrCreateStateSet().setMode(GL.GL_DEPTH_WRITEMASK, False)
        camera.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        material = osg.Material()
        color = osg.Vec4(1.0,1.0,1.0,1.0)
        material.setDiffuse(osg.Material.FRONT_AND_BACK, color)
        material.setAmbient(osg.Material.FRONT_AND_BACK, color)
        camera.getOrCreateStateSet().setAttributeAndModes(material)
        camera.setClearColor(osg.Vec4(0,0,0,0))
        camera.setClearMask(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)

        return camera, texture

    def quad_create(self, texture):
        stateset = osg.StateSet()
        stateset.setTextureAttributeAndModes(0, texture)
        corner = osg.Vec3(-self.aspect_ratio, -1.0, 0)
        width = osg.Vec3(2 * self.aspect_ratio, 0, 0)
        height = osg.Vec3(0, 2 * 1.0, 0)
        geom = osg.createTexturedQuadGeometry(corner, width, height, 0.0, 0.0, 1.0, 1.0)
        geom.setStateSet(stateset)
        geode = osg.Geode()
        geode.addDrawable(geom)
        return geode

    def build_wrapping_node(self, data):
        grp = osg.Group()
        camera,texture = self.camera_build()

        grp.addChild(camera)
        camera.addChild(data)

        quad = self.quad_create(texture)
        grp.addChild(quad)

        grp.getOrCreateStateSet().setMode(GL.GL_LIGHTING, False)
        # ALPHA_TEST should be disabled by default by OpenGL but it's not.
        # That's why it is required to explicitly disable it.
        grp.getOrCreateStateSet().setMode(GL.GL_ALPHA_TEST, osg.StateAttribute.OFF)
        return grp

    def resetSceneData(self, data):
        self.timer.stop()
        if self.viewer is not None:
            self.viewer.setSceneData(None)
        if self.audio is not None:
            self.audio.terminate()
            self.audio = None

    def setSceneData(self, data):
        if data is not None:
            if self.viewer == None:
                self.viewer = self.createViewer()
            self.orginal_data = data
            data = self.build_wrapping_node(data)
            self.viewer.setSceneData(data)
            try:
                # when working with conservative viewer
                # to enable prefetch an release of image streams
                self.viewer.optimizeTimeRanges()
            except:
                pass

        # ready to render
        self.fps_calculator.reset(self.startTime)
        self.still_frame = False
        self.timer.start()

    def setAudioData(self, data, skip=0.0):
        if self.audio is not None:
            self.audio.terminate()
            self.audio = None
        self.audio_data = data
        self.audio_skip = float(skip)
        self.audio = SoundPlayer(input_data=data, tmp_dir=jarvis.get_home(), start=False, loop_nb=0, frames_per_buffer=1024, blocking=False)
        if self.audio is not None:
            self.audio.set_loop_time(start_time=self.audio_skip, end_time=self.audio_skip + self.loopTime)        
            self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False)

    def getosgviewer(self):
        return self.viewer

    def size():
        def fget(self):
            return self._size
        def fset(self, value):
            if value is None:
                self._size = None
            else:
                width, height = value
                width = int(round(width))
                height = int(round(height))
                self._size = (width, height)
                self.setMinimumSize(width, height)
        return locals()
    size = property(**size())

    def resizeGL(self, w, h):
        self.aspect_ratio = w / float(h)
        self.parent.toolbar.aspect_ratio_btn_update()
        if self.viewer is not None:
            self.gw.resized(0, 0, w, h)
            self.setSceneData(self.orginal_data)
            self.resetCamera(self.viewer)

    def paintGL(self):
        if self.viewer == None:
            return

        frame_time = time.time()

        if self.audio is not None :
            if self.is_paused or self.still_frame:
                self.audio.pause()
            else:
                self.audio.play(blocking=False)
            audio_time = self.audio.get_time() - self.audio_skip
            # self.current_time = self.align_time(audio_time)
            self.current_time = audio_time
        else:
            if self.is_paused or self.still_frame:
                self.startTime = frame_time - self.current_time
            else:                
                self.current_time = frame_time - self.startTime
                if self.current_time >= self.loopTime:
                    self.startTime = frame_time
                    self.current_time = 0.0

        fps = self.fps_calculator.get(frame_time)
        self.parent.toolbar.update_time_info(self.current_time, self.loopTime, fps)
        
        self.viewer.frameAtTime(self.align_time(self.current_time))

    def align_time(self, t):
        return min(self.loopTime, max(0.0, round(t * config.FPS_UI) / config.FPS_UI))

    def update_time(self, from_ratio=None, from_delta=None):
        if from_ratio is not None:
            self.current_time = self.align_time(self.loopTime * from_ratio)
            self.startTime = time.time() - self.current_time

        elif from_delta is not None:
            self.current_time = self.align_time(self.current_time + from_delta)
            self.startTime = time.time() - self.current_time

        if self.audio is not None:
            self.audio.set_time(self.audio_skip + self.current_time, compensate_buffer=False)

    def pause(self):
        self.is_paused = True

    def play(self):
        self.is_paused = False
        
    def mousePressEvent( self, event ):
        """put the qt event in the osg event queue"""
        self.still_frame = True
        button = mouseButtonDictionary.get(event.button(), 0)
        self.update_time(from_ratio=float(event.x()) / float(self.width()))
        self.gw.getEventQueue().mouseButtonPress(event.x(), event.y(), button)

    def mouseMoveEvent(self, event):
        """put the qt event in the osg event queue"""
        self.update_time(from_ratio=float(event.x()) / float(self.width()))
        self.gw.getEventQueue().mouseMotion(event.x(), event.y())

    def mouseReleaseEvent( self, event ):
        """put the qt event in the osg event queue"""
        button = mouseButtonDictionary.get(event.button(), 0)
        self.gw.getEventQueue().mouseButtonRelease(event.x(), event.y(), button)
        self.still_frame = False

    def getGraphicsWindow(self):
        """returns the osg graphicswindow created by osgViewer """
        return self.gw
示例#9
0
	if len(os.listdir('/media/PiSoundBoard')) > 0:
		os.system("sudo rm /home/pi/soundfiles/*")
		os.system("sudo cp -R /media/PiSoundBoard/*.mp3 /home/pi/soundfiles")


def readSwitches():
	global lastPushedSwitch
	for i in switches:
		if GPIO.input(i) == False and lastPushedSwitch != i:
			lastPushedSwitch = i
			return switches[i]["filename"]
	return False


GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
initSwitches()
initRelais()
copyMediaFiles()

if __name__ == '__main__':
	while True:
		if player != False and player.isPlaying() == False:
			lastPushedSwitch = False

		filename = readSwitches()
		if filename != False and os.path.isfile(SoundFilePath + filename):
			#print(SoundFilePath + filename)
			player = SoundPlayer(SoundFilePath + filename, 0)
			player.play(1.0)
示例#10
0
# Sound1a.py

import time
from soundplayer import SoundPlayer

# Use device with ID 1  (mostly USB audio adapter)
#p = SoundPlayer("//home/pi/Desktop/Sonido/Beep1.mp3", 0)
#p = SoundPlayer("//home/pi/Desktop/Sonido/Beep2.mp3", 0)
#p = SoundPlayer("//home/pi/Desktop/Sonido/Alarm.mp3", 0)
p = SoundPlayer("//home/pi/Desktop/Sonido/John Cena - My Time Is Now.mp3", 0)
print("play for 10 s with volume 0.5")
p.play(0.5)  # non-blocking, volume = 0.5 (0 a 1)
##print ("isPlaying:", p.isPlaying())
time.sleep(1)
##print ("pause for 5 s")
##p.pause()
##print ("isPlaying:", p.isPlaying())
##time.sleep(5)
##print ("resume for 10 s")
##p.resume()
##time.sleep(10)
##print ("stop")
##p.stop()
##print ("isPlaying:", p.isPlaying())
##print ("done")
示例#11
0
setup()
nbSongs = 4
songID = 0
state = "STOPPED"
print "ready->stopped"
p = SoundPlayer("/home/pi/songs/song" + str(songID) + ".mp3", 1)

while True:
    if GPIO.input(P_PAUSE) == GPIO.LOW and state == "PLAYING":
        state = "PAUSED"
        p.pause()
        print "playing->paused"
    elif GPIO.input(P_PLAY) == GPIO.LOW and state == "STOPPED":
        state = "PLAYING"
        p.play()
        print "stopped->playing, song ID", songID
    elif GPIO.input(P_PLAY) == GPIO.LOW and state == "PAUSED":
        state = "PLAYING"
        p.resume()
        print "paused->playing"
    elif GPIO.input(P_STOP) == GPIO.LOW and (state == "PAUSED"
                                             or state == "PLAYING"):
        state = "STOPPED"
        p.stop()
        print "paused/playing->stopped"
    elif GPIO.input(P_SELECT) == GPIO.LOW and state == "STOPPED":
        songID += 1
        if songID == nbSongs:
            songID = 0
        p = SoundPlayer("/home/pi/songs/song" + str(songID) + ".mp3", 1)
示例#12
0
def playMusic(file):
    from soundplayer import SoundPlayer
    p = SoundPlayer(file, 1)
    print("playing" + file)
    p.play()  # non-blocking, volume = 0.5
    print("done")
示例#13
0
               GPIO.PUD_UP)  # Einschalter abfragen Ein == Low
    GPIO.setwarnings(False)
    GPIO.setup(P_POWER, GPIO.OUT)
    GPIO.output(P_POWER, True)  # Netzteil eingeschaltet lassen
    DoSound.playTone(440, 0.3, dev)
    DoSound.playTone(550, 0.3, dev)
    DoSound.playTone(660, 0.3, dev)
    time.sleep(1)
    DoSound.playTone([440, 550, 660], 3, dev)
    time.sleep(2)


setup()
print "Bereit..."
p = SoundPlayer("/home/pi/mp3/Nanue.mp3", 1)
p.play(1)

while True:
    if GPIO.input(P_TON1) == GPIO.LOW:
        print "Ton1..."
        p.stop()
        DoSound.playTone(440, 0.3, dev)
    elif GPIO.input(P_TON2) == GPIO.LOW:
        print "Ton2..."
        p.stop()
        DoSound.playTone(550, 0.3, dev)
        p.play()
    elif GPIO.input(P_TON3) == GPIO.LOW:
        print "Ton3..."
        p.stop()
        DoSound.playTone(660, 0.3, dev)
示例#14
0
# Sound1c.py

import time
from soundplayer import SoundPlayer

# Use device with ID 1  (mostly USB audio adapter)
#p = SoundPlayer("/home/pi/Music/jew1.mp3", 1)
p = SoundPlayer("/home/pi/Music/jew1.wav", 1)
print "play whole song"
p.play(1, True)  # non-blocking, volume = 1
print "done"