def main():
    global mic, quit_event

    bing = BingVoice(BING_KEY)
    awake = False

    pa = pyaudio.PyAudio()
    mic = Microphone(pa)
    player = Player(pa)

    while not quit_event.is_set():
        if not awake:
            if mic.recognize(keyword='hey respeaker'):
                awake = True
                player.play(hi)
                continue
            else:
                break

        command = mic.recognize(max_phrase_ms=6000, max_wait_ms=6000)
        if command:
            print('recognized: ' + command)
            if command.find('play music') > 0:
                pass

        awake = False

    mic.close()
Пример #2
0
    def __init__(self):
        self.sig_hndlr = signal.signal(signal.SIGINT, self.exit_gracefully)

        #set up GPIO
        self.driveLeft  = PWMOutputDevice(PWM_DRIVE_LEFT, True, 0, 1000)
        self.driveRight = PWMOutputDevice(PWM_DRIVE_RIGHT, True, 0, 1000)

        self.forwardLeft  = DigitalOutputDevice(FORWARD_LEFT_PIN)
        self.reverseLeft  = DigitalOutputDevice(REVERSE_LEFT_PIN)
        self.forwardRight = DigitalOutputDevice(FORWARD_RIGHT_PIN)
        self.reverseRight = DigitalOutputDevice(REVERSE_RIGHT_PIN)
        
        # Set up sensors
        self.camera = Camera()
        self.microphone = Microphone()

        self.state = Idle()
Пример #3
0
class AudioVideo:
    """
    Audio/Video class. Combines motion detection with recording
    of audio and video, then stitches together .h264 video with
    .wav audio to make .mp4 video
    """
    def __init__(self):
        self.mic = Microphone()
        self.cam = Camera() 

    def startRecording(self):
        self.start_time = time.time()
        self.mic.start()
        self.cam.start()

    def stopRecording(self):
        self.mic.stop()
        self.cam.stop()
        print("Recording stops.")    
        print("--- %s seconds ---" % (time.time() - self.start_time))           
Пример #4
0
def main():
    global mic, quit_event

    bing = BingVoice(BING_KEY)
    awake = False

    pa = pyaudio.PyAudio()
    mic = Microphone(pa)
    player = Player(pa)

    while not quit_event.is_set():
        if not awake:
            if mic.recognize(keyword='hey respeaker'):
                awake = True
                player.play(hi)
                continue
            else:
                break

        data = b''.join(mic.listen())
        if data:
            # recognize speech using Microsoft Bing Voice Recognition
            try:
                text = bing.recognize(data, language='en-US')
                print('Bing:' + text.encode('utf-8'))
                tts_data = bing.synthesize('you said ' + text)
                player.play_raw(tts_data)

                if text.find('start recording') >= 0:
                    mic.record('record.wav')
                elif text.find('stop recording') >= 0:
                    mic.interrupt(stop_recording=True)
                elif text.find('play recording audio') >= 0:
                    player.play('record.wav')
            except UnknownValueError:
                print("Microsoft Bing Voice Recognition could not understand audio")
            except RequestError as e:
                print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
        else:
            print('no data')

        awake = False

    mic.close()
Пример #5
0
class Client():
  def __init__(self, master):
    self.microphone = Microphone()
    self.audioToNet = AudioToNet()
    self.trafficPrinter = TrafficPrinter()
    self.master = master

  def call(self, host, port):
    self.microphone.init()
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    self.master.serveOn(s)
    self.sendTo(s, host, port)
    self.microphone.destroy()

  def sendTo(self, socket, host, port):
    print "Sending to {0}, port {1}".format(host, port)
    while True:
      audio = self.microphone.nextAudioChunk()
      chunk = self.audioToNet.convert(audio)
      self.sendChunk(chunk, socket, host, port)
  
  def sendChunk(self, chunk, socket, host, port):
    socket.sendto(chunk, (host, port))
    self.trafficPrinter.addTraffic(len(chunk))
Пример #6
0
class Client():
    def __init__(self, master):
        self.microphone = Microphone()
        self.audioToNet = AudioToNet()
        self.trafficPrinter = TrafficPrinter()
        self.master = master

    def call(self, host, port):
        self.microphone.init()
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.master.serveOn(s)
        self.sendTo(s, host, port)
        self.microphone.destroy()

    def sendTo(self, socket, host, port):
        print "Sending to {0}, port {1}".format(host, port)
        while True:
            audio = self.microphone.nextAudioChunk()
            chunk = self.audioToNet.convert(audio)
            self.sendChunk(chunk, socket, host, port)

    def sendChunk(self, chunk, socket, host, port):
        socket.sendto(chunk, (host, port))
        self.trafficPrinter.addTraffic(len(chunk))
Пример #7
0
from worker import Worker
import time

script_dir = os.path.dirname(os.path.realpath(__file__))

hi = os.path.join(script_dir, 'audio/hi.wav')

bing = BingBase(BING_KEY)
recognizer = BingVoiceRecognizer(bing)
tts = BingTTS(bing)

mission_completed = False
awake = False

pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)
mic.player = player


worker = Worker()
worker.set_tts(tts)
worker.set_player(player)

def handle_int(sig, frame):
    global mission_completed

    print "Terminating..."
    mission_completed = True
    mic.close()
    player.close()
Пример #8
0
from worker import Worker
import time

script_dir = os.path.dirname(os.path.realpath(__file__))

hi = os.path.join(script_dir, 'audio/hi.wav')

bing = BingBase(BING_KEY)
recognizer = BingVoiceRecognizer(bing)
tts = BingTTS(bing)

mission_completed = False
awake = False

pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)

worker = Worker()
worker.set_tts(tts)
worker.set_player(player)

def handle_int(sig, frame):
    global mission_completed

    print "Terminating..."
    mission_completed = True
    mic.close()
    player.close()
    worker.stop()
    pa.terminate()
Пример #9
0
    sys.exit(-1)

script_dir = os.path.dirname(os.path.realpath(__file__))

hi = os.path.join(script_dir, 'audio/hi.wav')

spi = SPI()
spi.write('offline\n')

bing = BingVoice(BING_KEY)

mission_completed = False
awake = False

pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)


def check_internet(host="8.8.8.8", port=53, timeout=6):
    """
    Host: 8.8.8.8 (google-public-dns-a.google.com)
    OpenPort: 53/tcp
    Service: domain (DNS/TCP)
    """
    try:
        socket.setdefaulttimeout(timeout)
        socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
        return True
    except Exception as ex:
        print ex.message
Пример #10
0
 def __init__(self, master):
   self.microphone = Microphone()
   self.audioToNet = AudioToNet()
   self.trafficPrinter = TrafficPrinter()
   self.master = master
Пример #11
0
 def __init__(self):
     self.mic = Microphone()
     self.cam = Camera() 
Пример #12
0
class Robot(State):
    def __init__(self):
        self.sig_hndlr = signal.signal(signal.SIGINT, self.exit_gracefully)

        #set up GPIO
        self.driveLeft  = PWMOutputDevice(PWM_DRIVE_LEFT, True, 0, 1000)
        self.driveRight = PWMOutputDevice(PWM_DRIVE_RIGHT, True, 0, 1000)

        self.forwardLeft  = DigitalOutputDevice(FORWARD_LEFT_PIN)
        self.reverseLeft  = DigitalOutputDevice(REVERSE_LEFT_PIN)
        self.forwardRight = DigitalOutputDevice(FORWARD_RIGHT_PIN)
        self.reverseRight = DigitalOutputDevice(REVERSE_RIGHT_PIN)
        
        # Set up sensors
        self.camera = Camera()
        self.microphone = Microphone()

        self.state = Idle()

    def on_event(self, event):
        self.state = self.state.on_event(event)

    def allStop(self):
        self.forwardLeft.value = False
        self.reverseLeft.value = False
        self.forwardRight.value = False
        self.reverseRight.value = False
        self.driveLeft.value = 0
        self.driveRight.value = 0
        
    def goForward(self):
        self.forwardLeft.value = True
        self.reverseLeft.value = False
        self.forwardRight.value = True
        self.reverseRight.value = False
        self.driveLeft.value = 1.0
        self.driveRight.value = 1.0

    def goBackward(self):
        self.forwardLeft.value = False
        self.everseLeft.value = True
        self.orwardRight.value = False
        self.reverseRight.value = True
        self.driveLeft.value = 1.0
        self.driveRight.value = 1.0

    def rotateRight(self):
        self.forwardLeft.value = True
        self.reverseLeft.value = False
        self.forwardRight.value = False
        self.reverseRight.value = True
        self.driveLeft.value = 0.8
        self.driveRight.value = 0.8

    def rotateLeft(self):
        self.forwardLeft.value = False
        self.reverseLeft.value = True
        self.forwardRight.value = True
        self.reverseRight.value = False
        self.driveLeft.value = 0.8
        self.driveRight.value = 0.8

    def bankRight(self):
        self.forwardLeft.value = True
        self.reverseLeft.value = False
        self.forwardRight.value = True
        self.reverseRight.value = False
        self.driveLeft.value = 0.8
        self.driveRight.value = 0.2

    def bankLeft(self):
        self.forwardLeft.value = True
        self.reverseLeft.value = False
        self.forwardRight.value = True
        self.reverseRight.value = False
        self.driveLeft.value = 0.2
        self.driveRight.value = 0.8
            
    def detectHuman(self):
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

        im = self.camera.read()
        im = imutils.resize(im, width=min(400, im.shape[1]))
        orig = im.copy()

        #detect peope in the image
        (rects, weights) = hog.detectMultiScale(im, winStride=(4, 4), padding=(8, 8), scale=1.05)
        
        # draw the original bounding boxes
        for (x, y, w, h) in rects:
            cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

        # apply non-maxima suppression to the bounding boxes using a fiarly large overlap
        # threshold to try to maintain overlapping
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

        # draw final bounding boxes
        bounded_box = None
        for (xA, yA, xB, yB) in pick:
            # here we assume one person is in the frame
            bounded_box = (xA, yA, xB, yB)
            cv2.rectangle(im, (xA, yA), (xB, yB), (0, 255,0), 2)
            

        # show images
        #cv2.imshow("Before", orig)
        cv2.imshow("After", im)
        print bounded_box
        return bounded_box
        
        

    def followHuman(self):
        # Variables to Change
        binsize = 17
        turningTime = 0.2
        midrange_L = 7
        midrange_R = 11
        bounded_box = self.detectHuman()
        if bounded_box == None:
            return
        mid_pt = (bounded_box[0] + bounded_box[2]) / 2
        if (mid_pt > (400*midrange_L) / binsize and mid_pt < (400*midrange_R) / binsize):
            self.goForward()
            time.sleep(1.0)
            self.allStop()
            print "human in view"
        elif (mid_pt < (400*midrange_L) / binsize):
            print "left side"        
            self.rotateLeft()
            time.sleep(turningTime)
            self.allStop()
        else:
            print "right side"
            self.rotateRight()
            time.sleep(turningTime)
            self.allStop()
    
    def startupSensors(self):
        self.camera.startStream()
        self.microphone.startRecording()
        time.sleep(1)

    def quit(self):
        self.camera.close()
        self.microphone.close()
        self.allStop() 
    
    def runStateMachine(self):
        Thread(target=self.stateMachine, args=()).start()
   
    def runRobot(self):
        while True:
            print "Recording"
            
            if (self.microphone.startRecording()):
                print "Done Recording"
                self.on_event(self.microphone.read())
                print self.state.__str__()
                print self.state.__str__() == 'Idle' 
                if (self.state.__str__() == 'Idle'):
                    self.allStop()
                elif (self.state.__str__() == 'FollowHumanIdle'):
                    #self.camera.startStream()
                    self.allStop()
                elif (self.state.__str__() == 'FollowHuman'):
                    self.followHuman()
                elif (self.state.__str__() == 'VoiceControlIdle'):
                    print "VCIdle"
                    self.allStop()
                elif (self.state.__str__() == 'Forward'):
                    print "for"
                    self.goForward()
                elif (self.state.__str__() == 'RotateLeft'):
                    print "RLeft"
                    self.rotateLeft()
                elif (self.state.__str__() == 'RotateRight'):
                    self.rotateRight()
                elif (self.state.__str__() == 'QuitApp'):
                    self.quit()
                    return
    
                time.sleep(2.0)

    def followSpeech(self):
        while True:
            if (self.microphone.startRecording()):
                print "Done Recording"
                print self.microphone.read()
                self.state = self.microphone.read()
                if (self.state.__str__() == "go"):
                    self.goForward()
                elif(self.state.__str__() == "left"):
                    self.rotateLeft()
                elif(self.state.__str__() == "right"):
                    self.rotateRight()
                time.sleep(1.0)
                self.allStop()
                return
 
    def stateMachine(self):
        print "Starting recording"
        prevState = None 
        self.microphone.startRecording()
        time.sleep(1.0)
        while True:
            if prevState != self.microphone.read():
                prevState = self.microphone.read()
                self.on_event(self.microphone.read())
            time.sleep(1.0)
            if (self.state.__str__() == "Idle"):
                continue
                #self.allStop()
            elif (self.state.__str__() == "FollowHumanIdle"):
                self.camera.startStream()
                self.allStop()
            elif (self.state.__str__() == "FollowHuman"):
                self.followHuman()
            elif (self.state.__str__() == "VoiceControlIdle"):
                self.allStop()
            elif (self.state.__str__() == "RotateLeft"):
                self.rotateLeft()
            elif (self.state.__str__() == "RotateRight"):
                self.rotateRight()
            elif (self.state.__str__() == "QuitApp"):
                self.quit()
                return
             
    def exit_gracefully(self, signal, frame):
        print('Trying to exit gracefully...')
        self.quit()
        sys.exit(0)
Пример #13
0
 def __init__(self, master):
     self.microphone = Microphone()
     self.audioToNet = AudioToNet()
     self.trafficPrinter = TrafficPrinter()
     self.master = master