示例#1
0
def main():
    global mic, quit_event

    bing = BingVoice(BING_KEY)
    awake = False

    pa = pyaudio.PyAudio()
    mic = Microphone(pa)
    player = Player(pa)

    while not quit_event.is_set():
        if not awake:
            if mic.recognize(keyword='hey respeaker'):
                awake = True
                player.play(hi)
                continue
            else:
                break

        command = mic.recognize(max_phrase_ms=6000, max_wait_ms=6000)
        if command:
            print('recognized: ' + command)
            if command.find('play music') > 0:
                pass

        awake = False

    mic.close()
 def __init__(self, lock_clients, list_client):
     Thread.__init__(self)
     self.logger.debug("Initializing microphone thread")
     self.lock = lock_clients
     self.lclients = list_client
     self.mic = Microphone()
     self.mic.createMicroStream()
示例#3
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            text = mic.recognize(data)
            if text:
                print('Recognized %s' % text)
示例#4
0
 def __init__(self, server_link, elevator_num):
     self.server_link = server_link
     self.elevator_num = elevator_num
     self.light_state = self.LIGHT['OFF']
     self.doors_state = self.DOORS['CLOSED']
     self.doors_sensor = DoorsSensor(self)
     self.weight_sensor = WeightSensor(self)
     self.speaker = Speaker(self)
     self.microphone = Microphone(self)
     self.set_target_floor_button = SetTargetFloorButton(
         self.server_link, self.elevator_num)
     self.call_dispatcher_button = CallDispatcherButton(self)
     self.doors_closing_stage = self.DOORS_CLOSED_STAGE
     self.status = True
     self._threads = []
示例#5
0
def main():
    global mic, quit_event

    bing = BingVoice(BING_KEY)
    awake = False

    pa = pyaudio.PyAudio()
    mic = Microphone(pa)
    player = Player(pa)

    while not quit_event.is_set():
        if not awake:
            if mic.recognize(keyword='hey respeaker'):
                awake = True
                player.play(hi)
                continue
            else:
                break

        data = b''.join(mic.listen())
        if data:
            # recognize speech using Microsoft Bing Voice Recognition
            try:
                text = bing.recognize(data, language='en-US')
                print('Bing:' + text.encode('utf-8'))
                tts_data = bing.synthesize('you said ' + text)
                player.play_raw(tts_data)

                if text.find('start recording') >= 0:
                    mic.record('record.wav')
                elif text.find('stop recording') >= 0:
                    mic.interrupt(stop_recording=True)
                elif text.find('play recording audio') >= 0:
                    player.play('record.wav')
            except UnknownValueError:
                print(
                    "Microsoft Bing Voice Recognition could not understand audio"
                )
            except RequestError as e:
                print(
                    "Could not request results from Microsoft Bing Voice Recognition service; {0}"
                    .format(e))
        else:
            print('no data')

        awake = False

    mic.close()
示例#6
0
    def __init__(self):
        self.sig_hndlr = signal.signal(signal.SIGINT, self.exit_gracefully)

        #set up GPIO
        self.driveLeft  = PWMOutputDevice(PWM_DRIVE_LEFT, True, 0, 1000)
        self.driveRight = PWMOutputDevice(PWM_DRIVE_RIGHT, True, 0, 1000)

        self.forwardLeft  = DigitalOutputDevice(FORWARD_LEFT_PIN)
        self.reverseLeft  = DigitalOutputDevice(REVERSE_LEFT_PIN)
        self.forwardRight = DigitalOutputDevice(FORWARD_RIGHT_PIN)
        self.reverseRight = DigitalOutputDevice(REVERSE_RIGHT_PIN)
        
        # Set up sensors
        self.camera = Camera()
        self.microphone = Microphone()

        self.state = Idle()
示例#7
0
def main(quit_event=None, lan='zh'):
    mic = Microphone(quit_event=quit_event, language=lan)
    from broker import ControllerBroker
    localBroker = ControllerBroker()
    localBroker.client.loop_start()
    alexa = Alexa(mic)
    alexa.get_token()

    os.system('aplay {}/hello.wav'.format(resources_path))

    logging.debug('start')
    while not quit_event.is_set():
        keyword = mic.wakeup(keywords=['ALEXA', 'GREEBLE'])
        logging.debug('Recognized %s' % keyword)
        if keyword and ('HELLO' in keyword and 'ALEXA' in keyword):
            logging.debug('wakeup Alexa')
            os.system('aplay {}/alexayes.wav'.format(resources_path))
            data = mic.listen()
            try:
                alexa.recognize(data)
            except Exception as e:
                logging.warn(e.message)
        elif keyword and ('HELLO' in keyword and 'GREEBLE' in keyword):
            logging.debug('wakeup Zhima')
            os.system('aplay {}/alexayes.wav'.format(resources_path))
            data = mic.listen()
            keyword = mic.recognize(data)
            #keyword = mic.detect()
            logging.debug('Get commands %s' % keyword)
            if keyword and localcommands(keyword, localBroker) == 0:
                os.system('aplay {}/alexaok.wav'.format(resources_path))
            else:
                os.system('aplay {}/error.wav'.format(resources_path))

    mic.close()
    localBroker.client.loop_stop()
    logging.debug('Mission completed')
示例#8
0
def runVisualizer(roomDimensions, microphoneSensitivity, signalPos,
                  sigStrength, micGrid, gridOffset, gridHeight, camPos):
    # Room parameters
    width = roomDimensions[0]
    length = roomDimensions[1]
    ceiling = roomDimensions[2]

    # Define room
    room = Room(width, length, ceiling)

    # Create a microphone array
    microphonePositions = helpers.generateMicArray(micGrid[0], micGrid[1],
                                                   gridOffset, gridHeight)
    if not helpers.allInRoom(microphonePositions, room):
        print(
            "Some microphones fell outside the boundaries of the room, please re-enter data."
        )
        return

    # Microphone parameters
    numMicrophones = len(microphonePositions)
    microphoneArray = []
    micSensitivity = microphoneSensitivity

    # Set up microphone objects
    for x in range(0, numMicrophones):
        microphoneArray.append(
            Microphone(microphonePositions[x], micSensitivity))

    # Configure DSP object
    dsp = DSP(99, microphoneArray)

    # Set up camera controller
    cameraPosition = camPos
    if not helpers.allInRoom([cameraPosition], room):
        print(
            "The camera fell outside the boundaries of the room, please re-enter data."
        )
        return
    cameraOrientation = [0, 0]  # pointing along x-axis in degrees
    cameraController = CameraController(cameraPosition, cameraOrientation, dsp,
                                        room)
    cameraController.setMicSensitivity(micSensitivity)
    cameraController.setMicPositions(microphonePositions)

    # Define Signal parameters
    signalPosition = signalPos
    if not helpers.allInRoom([signalPosition], room):
        print(
            "The signal fell outside the boundaries of the room, please re-enter data."
        )
        return
    signalStrength = sigStrength

    # Send signal to all microphones
    for microphone in microphoneArray:
        microphone.sendSignal(signalPosition, signalStrength)
    print(
        "-> Audio signal at position x: {0}, y: {1}, z: {2} with strength {3} broadcast to all microphones"
        .format(signalPosition[0], signalPosition[1], signalPosition[2],
                signalStrength))

    # Predict signal location using sphere trilateration
    predictedSignalLocation = cameraController.getSignalPosition(
        signalStrength)
    print("->>> Predicted Signal Location: {0}, Actual Signal Location: {1}".
          format(predictedSignalLocation, signalPosition))

    cameraController.rePositionCamera(predictedSignalLocation)

    visualize(cameraController, signalStrength, predictedSignalLocation)
示例#9
0
hi = os.path.join(script_dir, 'audio/hi.wav')
thunder = os.path.join(script_dir, 'audio/thunder-01.wav')
startlearning = os.path.join(script_dir, 'audio/startlearning.wav')
stoplearning = os.path.join(script_dir, 'audio/stoplearning.wav')
sendir = os.path.join(script_dir, 'audio/sendir.wav')

spi = SPI()
spi.write('offline\n')

bing = BingVoice(BING_KEY)

mission_completed = False
awake = False

pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)

spi.write('online\n')


def handle_int(sig, frame):
    global mission_completed
    mission_completed = True
    mic.close()


signal.signal(signal.SIGINT, handle_int)

while not mission_completed:
    if not awake:
示例#10
0
    plt.xscale('log')
    plt.show()


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("Plays a wave file.\n\n" +\
          "Usage: %s filename.wav" % sys.argv[0])
        sys.exit(-1)
    input_file = sys.argv[1]

    # speaker = Speaker()
    # speaker.play_file(input_file)

    if (input_file == 'record'):
        mic = Microphone()
        mic.record_to_file(1, 'output.wav')
        input_file = 'output.wav'
        rate, data = wavfile.read(input_file)
        plot_fourier(data)
    elif (input_file == 'compare'):
        mic = Microphone()
        datasets = []

        again = 'y'
        while (again.lower() == 'y'):
            mic.record_to_file(1, 'input.wav')
            rate, data = wavfile.read('input.wav')
            datasets.append({
                'title': 'sample {}'.format(len(datasets) + 1),
                'data': data
示例#11
0
WW_ENABLED = False
INTERRUPTED = False

try:
	import snowboydecoder
	WW_ENABLED = True
except ImportError:
	WW_ENABLED = False

p = pyaudio.PyAudio()

lex_session = LexSession("TestBot", "$LATEST", "mtavis")
slowStream = p.open(format=p.get_format_from_width(width=2), channels=1, rate=16000, output=True)

m = Microphone()

# prep audio sounds
micOpenWav = wave.open("resources/avs_small_system_state_active_start.wav", 'rb')
fastStream = p.open(format=micOpenWav.getsampwidth(), channels=micOpenWav.getnchannels(), rate=micOpenWav.getframerate(), output=True)
micOpenWavAudio = micOpenWav.readframes(micOpenWav.getnframes())
micOpenWav.close()
micDoneWav = wave.open("resources/avs_small_system_state_user_speech_confirmed.wav", 'rb')
micDoneWavAudio = micDoneWav.readframes(micDoneWav.getnframes())
micDoneWav.close()


if WW_ENABLED:
	detector = snowboydecoder.HotwordDetector("resources/snowboy.umdl", sensitivity=0.5)

def signal_handler(signal, frame):
示例#12
0
import matplotlib.pyplot as plt
import matplotlib.animation as animation

fig, ax = plt.subplots()

ax.set_xlim([0, VISUALIZER_LENGTH])
ax.set_ylim([0, VISUALIZER_HEIGHT])

x_axis = np.arange(VISUALIZER_LENGTH)
y_axis = np.zeros(VISUALIZER_LENGTH)

line, = ax.plot(x_axis, y_axis)

mic = Microphone(FORMAT=FORMAT,
                 CHANNELS=CHANNELS,
                 RATE=RATE,
                 FRAMES_PER_BUFFER=FRAMES_PER_BUFFER,
                 DEVICE_ID=DEVICE_ID)
sig_processor = DSP(ALPHA_SMOOTHING=ALPHA_SMOOTHING)


def init():  # only required for blitting to give a clean slate.
    x_data = np.arange(VISUALIZER_LENGTH)
    y_data = np.zeros(len(x_data))
    line.set_data(x_data, y_data)
    return line,


def animate(i):
    x_data = np.arange(VISUALIZER_LENGTH)
    y_data = np.zeros(len(x_data))
示例#13
0
 def createMicrophone(self):
     self.logger.debug("The server is starting the microphone thread")
     self.micro = Microphone()
     self.micro.createMicroStream()
示例#14
0
 def __init__(self):
     self.mic = Microphone()
     self.cam = Camera() 
示例#15
0
 def __init__(self, master):
     self.microphone = Microphone()
     self.audioToNet = AudioToNet()
     self.trafficPrinter = TrafficPrinter()
     self.master = master