コード例 #1
0
ファイル: processAudio.py プロジェクト: jugador87/ContextDemo
def record():
    """
    Record a word or words from the microphone and 
    return the data as an array of signed shorts.

    Normalizes the audio, trims silence from the 
    start and end, and pads with 0.5 seconds of 
    blank sound to make sure VLC et al can play 
    it without getting chopped off.
    """
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=1, rate=RATE,
        input=True, output=True,
        input_device_index=0,
        frames_per_buffer=CHUNK_SIZE)

    num_silent = 0
    snd_started = False

    r = array('h')

    while 1:
        # little endian, signed short
        #snd_data = array('h', stream.read(CHUNK_SIZE))
        try:
            raw_data = stream.read(CHUNK_SIZE)
            snd_data = numpy.fromstring(raw_data, dtype=numpy.int16)
        except:
            print "You ruined EVERYTHING!"
            sys.exit()

        if byteorder == 'big':
            snd_data.byteswap()
        r.extend(snd_data)

        silent = is_silent(snd_data)

        if silent and snd_started:
            num_silent += 1
        elif not silent and not snd_started:
            #print "TRIGGERED: ",analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)
            snd_started = True

        if snd_started and num_silent > 30:
            break

        if not snd_started:
            print analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)

    sample_width = p.get_sample_size(FORMAT)
    stream.stop_stream()
    stream.close()
    p.terminate()

    r = normalize(r)
    r = trim(r)
    #r = add_silence(r, 0.5)
    return sample_width, r
コード例 #2
0
ファイル: SpeechNetSVM.py プロジェクト: CodeR57/DeepSentiment
 def load_data_file(self):
     outputdata = []
     for f in gb.glob("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/DC/*.wav"):
         frate, inputdata = sc.read(f)
         pitch=lp.getPitch(f)
         emotion = ""
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         else:
             emotion = filename[0]
             ##emotion =  float(int(hashlib.md5(emotion).hexdigest(), 16))
         outputdata.append(list([loudness,pitch, emotion]))
     for f in gb.glob("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/JE/*.wav"):
         frate, inputdata = sc.read(f)
         pitch = lp.getPitch(f)
         emotion = ""
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         else:
             emotion = filename[0]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         outputdata.append(list([loudness, pitch, emotion]))
     for f in gb.glob("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/JK/*.wav"):
         frate, inputdata = sc.read(f)
         pitch = lp.getPitch(f)
         emotion = ""
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         else:
             emotion = filename[0]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         outputdata.append(list([loudness, pitch, emotion]))
     for f in gb.glob("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/KL/*.wav"):
         frate, inputdata = sc.read(f)
         pitch = lp.getPitch(f)
         emotion = ""
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         else:
             emotion = filename[0]
             ##emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
         outputdata.append(list([loudness, pitch, emotion]))
     return outputdata
コード例 #3
0
ファイル: processAudio.py プロジェクト: jugador87/ContextDemo
def sampleAudio(s):
    "stream and process s seconds from the microphone"
    
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=1, rate=RATE,
        input=True, output=True,
        input_device_index=0,
        frames_per_buffer=CHUNK_SIZE)

    # little endian, signed short
    #snd_data = array('h', stream.read(CHUNK_SIZE))
    try:
        raw_data = stream.read(s*RATE/CHUNK_SIZE)
        snd_data = numpy.fromstring(raw_data, dtype=numpy.int16)
    except:
        print "You ruined EVERYTHING!"
        sys.exit()

    if byteorder == 'big':
        snd_data.byteswap()

    sample_width = p.get_sample_size(FORMAT) # size in bytes
    stream.stop_stream()
    stream.close()
    p.terminate()

    return analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)
コード例 #4
0
ファイル: control.py プロジェクト: mobify/shush-bot
def main():
    # Initial values.
    loudness = -40
    loop_count = 0

    # Main control loop.
    while True:
        loop_count += 1

        # Read raw microphone data
        rawsamps = INPUT_STREAM.read(1024)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # Show the volume and pitch
        loudness, pitch = (
            analyse.loudness(samps),
            analyse.musical_detect_pitch(samps)
        )

        # Poll for config changes.
        if loop_count % 100 == 0:
            print '\n\n Updating config...\n\n\n'
            # request new config and update.

        # Visualize the volume and pitch.
        print loudness, pitch
        show_loudness(loudness)

        if loudness > -7:
            INPUT_STREAM.stop_stream()
            shush()
            INPUT_STREAM.start_stream()
コード例 #5
0
ファイル: control.py プロジェクト: mobify/shush-bot
def main():
    # Initial values.
    loudness = -40
    loop_count = 0

    # Main control loop.
    while True:
        loop_count += 1

        # Read raw microphone data
        rawsamps = INPUT_STREAM.read(1024)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # Show the volume and pitch
        loudness, pitch = (analyse.loudness(samps),
                           analyse.musical_detect_pitch(samps))

        # Poll for config changes.
        if loop_count % 100 == 0:
            print '\n\n Updating config...\n\n\n'
            # request new config and update.

        # Visualize the volume and pitch.
        print loudness, pitch
        show_loudness(loudness)

        if loudness > -7:
            INPUT_STREAM.stop_stream()
            shush()
            INPUT_STREAM.start_stream()
コード例 #6
0
ファイル: detect.py プロジェクト: yinrong/notes
def getNote():
    global last_pitch, last_loudness
    while True:
        try:
            # Read raw microphone data
            rawsamps = stream.read(1024)
            # Convert raw data to NumPy array
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            # Show the volume and pitch
            loudness = analyse.loudness(samps)
            #pitch = analyse.musical_detect_pitch(samps)
            pitch = analyse.musical_detect_pitch(samps)
        except:
            continue
        if not pitch:
            continue

        level = (pitch - 60.018)/ 1.0

        if pitch and last_pitch:
            pitch_diff = pitch - last_pitch
        else:
            pitch_diff = 100
        loudness_diff = loudness - last_loudness
        #print 'pitch:', math.floor(level), 'pitch_diff:', pitch_diff, 'loudness_diff:', loudness_diff

        last_pitch = pitch
        last_loudness = loudness
        if loudness_diff < 0 and pitch_diff > 2.0:
            continue

        print 'OK', round(level), pitch
        last_returned = level
        return level
コード例 #7
0
def pitch_detection(data):
    samps = np.fromstring(data, dtype=np.int16)
    pitch = analyse.musical_detect_pitch(samps)
    if analyse.loudness(samps) > -25 and pitch != None:
        return pitch
    else:
        return -1
コード例 #8
0
ファイル: live_data.py プロジェクト: shridattz/twodegrees
 def live_thread(self):
     """
     Generate a random number every 1 second and emit to a socketio instance (broadcast)
     Ideally to be run in a separate thread?
     """
     #infinite loop of magical random numbers
     print "Making random numbers"
     pyaud = pyaudio.PyAudio()
     stream = pyaud.open( format = pyaudio.paInt16, channels = 1, rate = 44100,  input = True, frames_per_buffer= 44100)
     db = shelve.open("day_data")
     while not thread_stop_event.isSet():
         #number = random.randint(0,50)
         # Read raw microphone data
         rawsamps = stream.read(44100)
         # Convert raw data to NumPy array
         samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
         # Show the volume and pitch
         current= round(abs(-1.15 + 47/abs(analyse.loudness(samps))), 2)
         print ("%f" % current)
         w = numpy.fft.fft(samps)
         freqs = numpy.fft.fftfreq(len(w))
         idx = numpy.argmax(numpy.abs(w))
         freq = freqs[idx]
         #print abs(freq*44100)
         voltage= 230
         if current < 0.5:
             power=0
         else :
             power = round(abs(current * voltage),1)
         print "Power %s " % power
         import time
         timestamp = int(time.time())
         socketio.emit('my response', {'value': power,'timestamp':timestamp*1000 }, namespace='/test')
         db['value'] = db['value'] + power
         db.sync()
コード例 #9
0
def detect():
    p = pyaudio.PyAudio()

    # Open input stream, 16-bit mono at 44100 Hz
    # On my system, device 2 is a USB microphone, your number may differ.
    stream = p.open(
        format = pyaudio.paInt16,
        channels = 1,
        rate = 44100,
        input = True)

    timer = 0
    state = "PEACE"
    doomclock = SECONDS_OF_WAR
    while True:
        # TODO: At certain times of day, skip even listening.

        # Read raw microphone data
        rawsamps = stream.read(1024*50) # 1024*50 is about one second of data.

        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

        # Get the loudness.
        loudness = abs(analyse.loudness(samps))

        if loudness < MAXIMUM_LOUDNESS:
            print "Loudness over threshold: %.2f" % loudness

        # If it gets loud, and we were previously at peace or almost peace, restart the countdown.
        if loudness < MAXIMUM_LOUDNESS:
            if doomclock > 0:
                # Tick down the time til war
                doomclock -= 1
            else:
                # WAR! Reset the doom clock.
                doomclock = SECONDS_OF_WAR
                if state == "PEACE":
                    message("A skirmish has broken out!")
                elif state == "TENSIONS":
                    message("Someone fanned the flames of war!")
                timer = SECONDS_OF_PEACE
                state = "SKIRMISH"

        # When it's quiet and we were almost at peace or at war, tick down the countdown.
        if loudness > MAXIMUM_LOUDNESS and state in ("TENSIONS", "SKIRMISH"):
            if doomclock < SECONDS_OF_WAR:
                # Cool down the doomclock.
                doomclock += 1
            if state == "TENSIONS":
                timer -= 1
            state = "TENSIONS"

        # If the time has elapsed and we were almost at peace, make peace.
        if timer is 0 and state == "TENSIONS":
            state = "PEACE"
            message("Peace has spread across the land.")
コード例 #10
0
def read():
  INPUT_INDEX = int(sys.argv[1])

  # Open input stream, 16-bit mono at 44100 Hz
  # On my system, device 4 is a USB microphone
  stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 8000,
    input_device_index = INPUT_INDEX,
    input = True)

  while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
コード例 #11
0
def main():
    bme = bme680.BME680(i2c_addr=0x77)

    # Initialize BME sensor
    bme.set_humidity_oversample(bme680.OS_2X)
    bme.set_pressure_oversample(bme680.OS_4X)
    bme.set_temperature_oversample(bme680.OS_8X)
    bme.set_filter(bme680.FILTER_SIZE_3)
    bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

    # Initialize USB mic
    pyaud = pyaudio.PyAudio()
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=32000,
                        input_device_index=2,
                        input=True)

    # Main loop
    while (1):
        bme.get_sensor_data()
        tempCelcius = float("{0:.2f}".format(bme.data.temperature))
        # Convert the above variable to fahrenheit
        temperature = float(tempCelcius * (9 / 5) + 32)
        pressure = float("{0:.2f}".format(bme.data.pressure))
        humidity = float("{0:.2f}".format(bme.data.humidity))
        gas = float("{0:.2f}".format(bme.data.gas_resistance))

        # Read from lux sensor
        tsl = TSL2561(debug=True)
        luxVal = tsl.lux()

        # Read from USB mic
        rawsamps = stream.read(2048, exception_on_overflow=False)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        dB = analyse.loudness(samps) + 60

        print('\n')
        currentDT = datetime.datetime.now()
        print(str(currentDT))
        print("      -- BME680 --")
        print("Temperature: {}".format(temperature))
        print("Pressure: {}".format(pressure))
        print("Humidity: {}".format(humidity))
        print("Gas: {}".format(gas))
        print("      -- TSL2561 --")
        print("Lux: {}".format(luxVal))
        print("      -- USB Mic --")
        print("Sound in dB: {}".format(dB))
        print('\n')
        print("---------------------------")

        time.sleep(WAIT_PERIOD)
コード例 #12
0
def dataconverter(filename):
    frate, inputdata = sc.read(filename=filename)
    pitch = lp.getPitch(filename, frate)
    emotion = ""
    loudness = abs(an.loudness(inputdata))
    filename = filename.split("/")[-1].split(".")[0]
    if filename[0] == "s":
        emotion = filename[0:2]
        emotion = ord(emotion[0]) + ord(emotion[1])
    else:
        emotion = filename[0]
        emotion = float(ord(emotion)) / 100
    return [float(loudness), float(pitch), emotion]
コード例 #13
0
def dataconverter(filename):
    frate,inputdata = sc.read(filename=filename)
    pitch = lp.getPitch(filename, frate)
    emotion = ""
    loudness = abs(an.loudness(inputdata))
    filename = filename.split("/")[-1].split(".")[0]
    if filename[0] == "s":
        emotion = filename[0:2]
        emotion = ord(emotion[0])+ord(emotion[1])
    else:
        emotion = filename[0]
        emotion = float(ord(emotion))/100
    return [float(loudness), float(pitch), emotion]
コード例 #14
0
 def load_data(self,filename):
     outputdata=[]
     # Loop to traverse through the input data file path
     for f in gb.glob(filename):
         frate, inputdata = sc.read(f)
         pitch = lp.getPitch(f,frate)
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
         else:
             emotion = filename[0]
         outputdata.append(list([loudness, pitch, emotion]))
     return outputdata
コード例 #15
0
def getPitch(filename):
    from aubio import pitch
    for f in gb.glob(filename):
        frate, inputdata = sc.read(f)
    downsample = 1
    samplerate = frate / downsample
    window = 1024 / downsample  # fft size

    hopsize = 256 / downsample
    sound = source(filename, samplerate, hopsize)
    samplerate = sound.samplerate
    tolerance = 0.8

    # Setting the FFT Algorithm
    pitchlist = pitch("yin", window, hopsize, samplerate)
    pitchlist.set_unit("midi")

    # Setting the tolerance level to 80 percent
    pitchlist.set_tolerance(tolerance)
    total_frames = 0
    pitches = []
    confidences = []
    while True:
        samples, read = sound()
        pitch = pitchlist(samples)[0]
        confidence = pitchlist.get_confidence()
        confidences += [confidence]
        pitches += [pitch]
        total_frames += read
        if read < hopsize: break

    # getting the file list of pitch from various sound samples
    pitches = array(pitches[1:])
    confidences = array(confidences[1:])
    cleaned_pitches = pitches

    loudness = abs(an.loudness(inputdata))

    # EXtracting all those pitch levels that are above the confidence values
    cleaned_pitches = ma.masked_where(confidences < tolerance,
                                      cleaned_pitches,
                                      copy=False)
    cleaned_pitches = cleaned_pitches[~cleaned_pitches.mask]

    # condition to check whether there exists a fundamental frequency for the given sound signal
    if len(cleaned_pitches) == 0:
        maxValue = 0
    else:
        maxValue = max(cleaned_pitches)
    return maxValue, loudness
コード例 #16
0
 def load_data(self, filename):
     outputdata = []
     # Loop to traverse through the input data file path
     for f in gb.glob(filename):
         frate, inputdata = sc.read(f)
         pitch = lp.getPitch(f, frate)
         loudness = abs(an.loudness(inputdata))
         filename = f.split("/")[-1].split(".")[0]
         if filename[0] == "s":
             emotion = filename[0:2]
         else:
             emotion = filename[0]
         outputdata.append(list([loudness, pitch, emotion]))
     return outputdata
コード例 #17
0
def getPitch(filename):
    from aubio import pitch
    for f in gb.glob(filename):
            frate, inputdata = sc.read(f)
    downsample = 1
    samplerate = frate / downsample
    window = 1024 / downsample # fft size

    hopsize = 256/ downsample
    sound = source(filename, samplerate, hopsize)
    samplerate = sound.samplerate
    tolerance = 0.8

    # Setting the FFT Algorithm
    pitchlist = pitch("yin", window, hopsize, samplerate)
    pitchlist.set_unit("midi")

    # Setting the tolerance level to 80 percent
    pitchlist.set_tolerance(tolerance)
    total_frames = 0
    pitches = []
    confidences=[]
    while True:
        samples, read = sound()
        pitch = pitchlist(samples)[0]
        confidence = pitchlist.get_confidence()
        confidences+=[confidence]
        pitches += [pitch]
        total_frames += read
        if read < hopsize: break

    # getting the file list of pitch from various sound samples
    pitches = array(pitches[1:])
    confidences = array(confidences[1:])
    cleaned_pitches = pitches

    loudness = abs(an.loudness(inputdata))
    
    # EXtracting all those pitch levels that are above the confidence values
    cleaned_pitches = ma.masked_where(confidences < tolerance, cleaned_pitches,copy=False)
    cleaned_pitches = cleaned_pitches[~cleaned_pitches.mask]

    # condition to check whether there exists a fundamental frequency for the given sound signal
    if len(cleaned_pitches)==0:
        maxValue = 0
    else:
        maxValue = max(cleaned_pitches)
    return maxValue,loudness
コード例 #18
0
    def load_data(self):
        datadirectory = self.working_directory+"Data/"
        outputdata=[]
        for f in gb.glob(datadirectory+"*.wav"):
            frate, inputdata = sc.read(f)
            pitch = lp.getPitch(f,frate)
            emotion = ""
            loudness = abs(an.loudness(inputdata))
            filename = f.split("/")[-1].split(".")[0]
            if filename[0] == "s":
                emotion = filename[0:2]
            else:
                emotion = filename[0]

            outputdata.append(list([loudness, pitch, emotion]))
        return outputdata
コード例 #19
0
ファイル: oneplayer.py プロジェクト: mikehelland/song-pong
    def checkLevels(self):
        l = False

        try:
            l, data = self.mics[0].read()
        except:
            print "error"

        if not l or len(data) < 2048:
            return (False, 0, 0)

        samps = numpy.fromstring(data, dtype=numpy.int16)

        pitch = analyse.musical_detect_pitch(samps)
        loudness = min(0, max(-32, analyse.loudness(samps)))

        return (True, pitch, loudness)
コード例 #20
0
ファイル: SpeechNet.py プロジェクト: CodeR57/DeepSentiment
    def load_data_file(self):
        outputdata = []
        for f in gb.glob(
                "/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/KL/*.wav"
        ):
            frate, inputdata = sc.read(f)
            pitch = lp.getPitch(f, frate)
            emotion = ""
            loudness = abs(an.loudness(inputdata))
            filename = f.split("/")[-1].split(".")[0]
            if filename[0] == "s":
                emotion = filename[0:2]
                emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
            else:
                emotion = filename[0]
                emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
            outputdata.append(list([loudness, pitch, emotion]))

        return outputdata
コード例 #21
0
 def live_thread(self):
     """
     Generate a random number every 1 second and emit to a socketio instance (broadcast)
     Ideally to be run in a separate thread?
     """
     #infinite loop of magical random numbers
     print "Making random numbers"
     pyaud = pyaudio.PyAudio()
     stream = pyaud.open(format=pyaudio.paInt16,
                         channels=1,
                         rate=44100,
                         input=True,
                         frames_per_buffer=44100)
     db = shelve.open("day_data")
     while not thread_stop_event.isSet():
         #number = random.randint(0,50)
         # Read raw microphone data
         rawsamps = stream.read(44100)
         # Convert raw data to NumPy array
         samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
         # Show the volume and pitch
         current = round(abs(-1.15 + 47 / abs(analyse.loudness(samps))), 2)
         print("%f" % current)
         w = numpy.fft.fft(samps)
         freqs = numpy.fft.fftfreq(len(w))
         idx = numpy.argmax(numpy.abs(w))
         freq = freqs[idx]
         #print abs(freq*44100)
         voltage = 230
         if current < 0.5:
             power = 0
         else:
             power = round(abs(current * voltage), 1)
         print "Power %s " % power
         import time
         timestamp = int(time.time())
         socketio.emit('my response', {
             'value': power,
             'timestamp': timestamp * 1000
         },
                       namespace='/test')
         db['value'] = db['value'] + power
         db.sync()
コード例 #22
0
ファイル: analyse_play.py プロジェクト: illume/eyestabs
def main():
    pyaud = pyaudio.PyAudio()

    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=2,
                        rate=44100,
                        input_device_index=1,
                        input=True)

    last_note = last_vol = last_time = 0

    while True:
        t = timing.get_time()

        rawsamps = stream.read(SAMPLE_SIZE)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

        event = ''

        midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

        if midi_note:
            midi_note += OCTAVE_CORRECTION

            latest_note = notes.midi_to_note(midi_note)
            latest_vol = analyse.loudness(samps)

            attacked = latest_vol - last_vol > ATTACK_THRESHOLD

            if latest_note != last_note or attacked:
                if latest_vol > MINIMUM_VOLUME:
                    event = {'note': latest_note, 'time': t}
                    last_time = t

                last_note = latest_note
                last_vol = latest_vol

        elif last_note:
            last_note = None

        print event
        sys.stdout.flush()
コード例 #23
0
def analyseChunk(CHUNK):
    """ analyseChunk(CHUNK):

	Cette fonction détermine le volume et la hauteur de la trame CHUNK.

	INPUT  = Trame d'échantillions (np.array)
	OUTPUT = list(volume,pitch)

	See Nathan Whitehead's GitHub for further details on the <analyse> module.
	"https://github.com/ExCiteS/SLMPi/tree/master/SoundAnalyse-0.1.1"
	 """
    global prev_pitch
    volume = analyse.loudness(CHUNK)  # Calcul du Volume

    pitch = analyse.detect_pitch(CHUNK)  # Calcul de la hauteur
    if pitch == None:  # Structure conditionnelle de fin
        pitch = prev_pitch
    else:
        prev_pitch = pitch

    return [volume, pitch]
コード例 #24
0
    def load_data_file(self, audiodatapath):
        outputdata = []  # Variable to store the speech features and emotions

        # Looping all the wave files present in the path
        for f in gb.glob(audiodatapath):
            frate, inputdata = sc.read(f)
            # Extracting the pitch from the wav file using Aubio speech API
            pitch = lp.getPitch(f, frate)
            # Extracting loudness of the voice from the Wave file
            loudness = abs(an.loudness(inputdata))

            # Extracting the emotion type from the wave file only for training stage
            filename = f.split("/")[-1].split(".")[0]

            # Condition to differentiate the various types of emotions
            if filename[0] == "s":
                emotion = filename[0:2]
            else:
                emotion = filename[0]
            # Creating the dataset consisting of list of features and corresponding emotion type
            outputdata.append(list([loudness, pitch, emotion]))
        return outputdata
コード例 #25
0
    def load_data_file(self):
        outputdata = []         # Variable to store the speech features and emotions

        # Looping all the wave files present in the path
        for f in gb.glob(self.working_directory+"AudioData/*/*.wav"):
            frate, inputdata = sc.read(f)
            # Extracting the pitch from the wav file using Aubio speech API
            pitch=lp.getPitch(f,frate)
            # Extracting loudness of the voice from the Wave file
            loudness = abs(an.loudness(inputdata))

            # Extracting the emotion type from the wave file only for training stage
            filename = f.split("/")[-1].split(".")[0]

            # Condition to differentiate the various types of emotions
            if filename[0] == "s":
                emotion = filename[0:2]
            else:
                emotion = filename[0]
            # Creating the dataset consisting of list of features and corresponding emotion type
            outputdata.append(list([loudness,pitch, emotion]))
        return outputdata
コード例 #26
0
ファイル: soundeffectfeq.py プロジェクト: cheetahray/Shanghai
def raypitch():
    global strm
    try:
        rawsamps = strm.read(1024) # Read raw microphone data
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16) # Convert raw data to NumPy array
        rayfeq = analyse.musical_detect_pitch(samps)
        if rayfeq > 0:
            #strm.stop_stream()
            rayint = round(rayfeq,1)
            if True:#rayint <= 83:

                rayloud = analyse.loudness(samps)
                rayampval = rayloud + 100 #rayampval = raymap(rayloud, -127, 0, 0, 127)
                #print (rayfeq, rayampval)
                return rayint, rayampval

            #strm.start_stream()
    except IOError, e:
        if e.args[1] == pyaudio.paInputOverflowed:
            rawsamps  = '\x00'
        else:
            raise
コード例 #27
0
def main():
    pyaud = pyaudio.PyAudio()

    stream = pyaud.open(format=pyaudio.paInt16, channels=2, rate=44100, input_device_index=1, input=True)

    last_note = last_vol = last_time = 0

    while True:
        t = timing.get_time()

        rawsamps = stream.read(SAMPLE_SIZE)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

        event = ""

        midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

        if midi_note:
            midi_note += OCTAVE_CORRECTION

            latest_note = notes.midi_to_note(midi_note)
            latest_vol = analyse.loudness(samps)

            attacked = latest_vol - last_vol > ATTACK_THRESHOLD

            if latest_note != last_note or attacked:
                if latest_vol > MINIMUM_VOLUME:
                    event = {"note": latest_note, "time": t}
                    last_time = t

                last_note = latest_note
                last_vol = latest_vol

        elif last_note:
            last_note = None

        print event
        sys.stdout.flush()
コード例 #28
0
ファイル: test.py プロジェクト: nbehdin/121proj1
import numpy
import pyaudio
import analyse

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 2 is a USB microphone, your number may differ.
stream = pyaud.open(
    format=pyaudio.paInt16, channels=1, rate=44100, input_device_index=0, input=True  # Initialize PyAudio
)

while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print analyse.loudness(samps), analyse.detect_pitch(samps)
コード例 #29
0
def main():
    bme = bme680.BME680(i2c_addr=0x77)

    # Initialize db
    con = sqlite3.connect('dataDT.db')
    c = con.cursor()
    c.execute(
        '''CREATE TABLE IF NOT EXISTS data(temp FLOAT, pres FLOAT, hum FLOAT, gas FLOAT, lux INTEGER, db FLOAT, dt DATETIME)'''
    )

    # Initialize sensor
    bme.set_humidity_oversample(bme680.OS_2X)
    bme.set_pressure_oversample(bme680.OS_4X)
    bme.set_temperature_oversample(bme680.OS_8X)
    bme.set_filter(bme680.FILTER_SIZE_3)
    bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

    # Initialize USB mic
    pyaud = pyaudio.PyAudio()
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=32000,
                        input_device_index=2,
                        input=True)

    now = time.strftime('%Y-%m-%d %H:%M:%S')
    print("Readings began " + now)
    print("Press ctrl+c to end readings and close connection.")

    # Main loop
    while (True):
        try:
            # Record time
            now = time.strftime('%Y-%m-%d %H:%M:%S')

            # Read from BME
            bme.get_sensor_data()
            tempCelcius = float("{0:.2f}".format(bme.data.temperature))

            # Convert the above variable to fahrenheit
            temperature = float(tempCelcius * (9 / 5) + 32)
            pressure = float("{0:.2f}".format(bme.data.pressure))
            humidity = float("{0:.2f}".format(bme.data.humidity))
            gas = float("{0:.2f}".format(bme.data.gas_resistance))

            # Read from lux sensor
            tsl = TSL2561(debug=True)
            luxVal = tsl.lux()

            # Read from USB mic
            rawsamps = stream.read(2048, exception_on_overflow=False)
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            decib = analyse.loudness(samps) + 60

            values = (temperature, pressure, humidity, gas, luxVal, decib, now)
            c.execute("INSERT INTO data VALUES(?, ?, ?, ?, ?, ?, ?)", values)
            con.commit()

            time.sleep(WAIT_PERIOD)

        except KeyboardInterrupt:
            con.close()
            break

        except Exception as e:
            pass
            print(e)
コード例 #30
0
ファイル: loudness.py プロジェクト: jugador87/ContextDemo
import thread

timeout=True

def timer(numSeconds, dummy=True):
    global timeout
    time.sleep(numSeconds)
    timeout = True

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# CSC: device 0 is built in mike?
# expected loudness output: -1dB for very loud; downto -36dB "typical silence"
chunk=1024
stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 48000,
    input_device_index = 0,
    input = True)

while True:
	# Read raw microphone data
	rawsamps = stream.read(chunk)
	# Convert raw data to NumPy array
	samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
	# Show the volume and pitch
	print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
コード例 #31
0
ファイル: analyse_ocemp.py プロジェクト: illume/eyestabs
    if not available > sample_size:
        time.sleep(0.01)
        continue

    rawsamps = stream.read(available)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16, count=sample_size)

    event = ''

    midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

    if midi_note:
        midi_note += paramaters.octave_correction * 12

        latest_note = notes.midi_to_note(midi_note)
        latest_vol = analyse.loudness(samps)

        attacked = latest_vol - last_vol > paramaters.attack_threshold

        if latest_note != last_note or attacked:
            if latest_vol > paramaters.minimum_volume:
                event = {'note': latest_note, 'time': t}
                last_time = t

            last_note = latest_note
            last_vol = latest_vol

    elif last_note:
        last_note = None

    print event
コード例 #32
0
import analyse
import matplotlib.pyplot as plt

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
stream = pyaud.open(format=pyaudio.paInt16, channels=1, rate=44100, input_device_index=1, input=True)


vols = []
pitches = []
i = 0
while i < 100:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch

    vols.append(analyse.loudness(samps))
    pitches.append(analyse.musical_detect_pitch(samps) or 0.0)

    print vols[-1]

    i += 1

plt.plot(vols)
plt.plot(pitches, "ro")
plt.show()
コード例 #33
0
import numpy
import pyaudio
import analyse

pyaud = pyaudio.PyAudio()

stream = pyaud.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=16000,
                    input_device_index=2,
                    input=True)

while True:
    rawsamps = stream.read(16000)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    print(analyse.loudness(samps))
#    print(analyse.detect_pitch(samps))
コード例 #34
0
ファイル: Input.py プロジェクト: dasbavaria/echomesh
 def receive(self, frames):
   self.frames = frames
   self.numpy_frames = numpy.fromstring(frames, dtype=self.dtype, count=-1)
   self.level = analyse.loudness(self.numpy_frames)
コード例 #35
0
import alsaaudio
import numpy
import analyse

card = 'sysdefault:CARD=AK5370'
f = open('out.wav', 'wb')
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, card)

inp.setchannels(1)
inp.setrate(8000)
inp.setperiodsize(160)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)

while True:
    length, data = inp.read()

    samps = numpy.fromstring(data, dtype=numpy.int16, count=length)
    # Show the volume and pitch
    print analyse.loudness(samps)
コード例 #36
0
ファイル: sonicpi.py プロジェクト: cheetahray/Shanghai
CHUNK = 8192
lastfeq = 0

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 2 is a USB microphone, your number may differ.
stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 44100,
    input_device_index = 1,
    input = True,
    frames_per_buffer=CHUNK)

while True:
    # Read raw microphone data
    rawsamps = stream.read(CHUNK)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    rayfeq = analyse.musical_detect_pitch(samps)
    if rayfeq > 0 and math.fabs(rayfeq-lastfeq) > 2:
        print(stream.stop_stream())
        title=commands.getoutput("echo \"with_fx :reverb, mix: 0.9, phase: 0.25, room: 1 do sample :guit_em9, rate: 0.5 end\" | sonic_pi")
        time.sleep(30)
        title=commands.getoutput("echo \"stop\" | sonic_pi")
        print(stream.start_stream())
        lastfeq = rayfeq
        print (analyse.loudness(samps), rayfeq)
    else:
        lastfeq = 0
コード例 #37
0
ファイル: record.py プロジェクト: gmkey/raspberrypi
    for i in range(0, int(RATE / chunk * RECORD_SECONDS)):
        try:
            data = stream.read(chunk)
            raw_sample.append(data)
        except:
            pass
	stream.stop_stream()
	stream.close()
	p.terminate()
	return(raw_sample)


volume_array = []
for i in range(1000):
    raw_sample_data = record_sample()
    time.sleep(0.2)

    base_volume_established = False

    if raw_sample_data:
        for raw_sample in raw_sample_data:
            sample = numpy.fromstring(raw_sample, dtype=numpy.int16)
            volume_array.append(analyse.loudness(sample))

        if len(volume_array) > 10:
            base_volume_established = True

        if base_volume_established:


コード例 #38
0
'''
Created on Mar 12, 2016

@author: manish_kelkar
'''

import numpy
import pyaudio
import analyse

pyaud = pyaudio.PyAudio()
#stream = pyaud.open( format = pyaudio.paInt16, channels = 1, rate = 44100, input_device_index = 1, input = True)
stream = pyaud.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=44100,
                    input=True,
                    frames_per_buffer=44100)

while True:
    # Read raw microphone data
    rawsamps = stream.read(44100)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print("%.2f" % (-1.15 + 47 / abs(analyse.loudness(samps))))
    w = numpy.fft.fft(samps)
    freqs = numpy.fft.fftfreq(len(w))
    idx = numpy.argmax(numpy.abs(w))
    freq = freqs[idx]
    #print abs(freq*44100)
コード例 #39
0
ファイル: current.py プロジェクト: shridattz/twodegrees
'''
Created on Mar 12, 2016

@author: manish_kelkar
'''

import numpy
import pyaudio
import analyse


pyaud = pyaudio.PyAudio()
#stream = pyaud.open( format = pyaudio.paInt16, channels = 1, rate = 44100, input_device_index = 1, input = True)
stream = pyaud.open( format = pyaudio.paInt16, channels = 1, rate = 44100,  input = True, frames_per_buffer= 44100)


while True:
        # Read raw microphone data
        rawsamps = stream.read(44100)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # Show the volume and pitch
        print ("%.2f" % (-1.15 + 47/abs(analyse.loudness(samps) ) ) )
        w = numpy.fft.fft(samps)
        freqs = numpy.fft.fftfreq(len(w))
        idx = numpy.argmax(numpy.abs(w))
        freq = freqs[idx]
        #print abs(freq*44100)
コード例 #40
0
import alsaaudio
import numpy
import analyse

card = 'sysdefault:CARD=AK5370'
f = open('out.wav', 'wb')
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, card)

inp.setchannels(1)
inp.setrate(8000)
inp.setperiodsize(160)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)

while True:
  length, data = inp.read()

  samps = numpy.fromstring(data, dtype=numpy.int16, count=length)
  # Show the volume and pitch
  print analyse.loudness(samps)
コード例 #41
0
ファイル: test_pyaudio.py プロジェクト: kimfeel/DBL-Project
#!/usr/bin/env python

import numpy
import pyaudio
import analyse

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 4 is a USB microphone
stream = pyaud.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=44100,
                    input_device_index=2,
                    input=True)

while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
コード例 #42
0
    )  #logs into email address (entered above) using password (also entered above)
    mailServer.sendmail(gmailUser, recipient,
                        message.as_string())  #send the email to the recipient
    mailServer.close()  #Stop doing things with the mail server


print("Starting BarkTracker")

while True:
    #read raw mic data
    rawsamps = stream.read(streamChunk)
    #convert to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

    #send email if the loudness exceeds ambient noise
    if analyse.loudness(samps) >= ambient_db:
        #This is the time at which the sound was detected
        currentTime = datetime.datetime.now()

        #Check to see when the last email was sent
        if (emailSentAt != None):
            timeDifference = currentTime - emailSentAt
        else:
            timeDifference = datetime.timedelta(minutes=email_timer + 1)

        #Only send an email to the user if one hasn't been sent recently
        if (timeDifference > datetime.timedelta(minutes=email_timer)):
            print("Sending email about dog!")
            emailSentAt = currentTime
            p = Process(
                target=sendEmail
コード例 #43
0
ファイル: Loudness.py プロジェクト: soundslash/soundslash
    def _sink_chain(self, pad, buf):
        #this is where we do filtering
        #and then push a buffer to the next element, returning a value saying
        # it was either successful or not.

        samps = numpy.fromstring(buf, dtype=numpy.int16)
        # Show the volume and pitch
        # -40 - 0
        # -1 very loud
        # -36 silence
        volume = analyse.loudness(samps)
        # , analyse.musical_detect_pitch(samps)
        if volume < -40:
            volume = -40

        volume = (volume + 40) * 2.5

        if len(self.queue) < 20:
            self.queue.append(volume)
        else:
            sum = 0
            count = 0
            for elem in self.queue:
                sum += elem
                count += 1
            avg = sum / count

            lvl = 0

            if avg > 70:
                avg -= 30
                lvl = 1
            elif avg < 10:
                avg = 0
                lvl = 2
            elif avg < 20:
                avg += 340
                lvl = 3
            elif avg < 40:
                avg += 230
                lvl = 4

            if lvl == 0:
                avg = 100

            # 0 volume, force new level
            if (self.lvls["last_lvl"][1] == 2):
                self.lvls["last_lvl"] = [time.time(), lvl, avg]

            if (time.time() - self.lvls["last_lvl"][0] > 2
                    and self.lvls["last_lvl"][1] != lvl):
                self.lvls["last_lvl"] = [time.time(), lvl, avg]
            else:
                avg = self.lvls["last_lvl"][2]

            if (self.lvls["last_lvl"][1] == lvl):
                self.lvls["last_lvl"][0] = time.time()

            print avg
            self.__volume.set_property("volume", (avg) / 100)
            self.queue = self.queue[1:]
            self.queue.append(volume)

        return self.srcpad.push(buf)
コード例 #44
0
ファイル: robot.py プロジェクト: thehack/RaspberryBot
def main():
    background=pygame.Surface(screen.get_size())
    background=background.convert()
    background.fill((255, 248, 220))
    screen.blit(background,(0,0))
    boxes=[]

    right_eye = Boxes()
    right_eye.image=pygame.Surface((40, 40))
    right_eye.rect=right_eye.image.get_rect()
    right_eye.rect.center=(140, 100)
    boxes.append(right_eye)

    left_eye = Boxes()
    left_eye.image=pygame.Surface((40, 40))
    left_eye.rect=left_eye.image.get_rect()
    left_eye.rect.center=(500, 100)
    boxes.append(left_eye)

    mouth = Boxes()

    boxes.append(mouth)

    allSprites=pygame.sprite.Group(boxes)
    RESET = allSprites

    running = True
    while running:
        #Python event management
        for event in pygame.event.get ():
            # Quit if x is clicked or escaped is pushed
            if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
                stream.close()
                running = False
        
        # Press 't' to talk.
        if (event.type == pygame.KEYDOWN and event.key == pygame.K_t):

            # Read raw microphone data, the try escape fixes an overflow bug.
            try:
                rawsamps = stream.read(1024)

                # Convert raw data to NumPy array
                samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

                # Show the volume and pitch
                num = max(24, (60 + int(analyse.loudness(samps))))
                # print num
                # print analyse.musical_detect_pitch(samps)
                mouth.image=pygame.Surface((360, num))
            except:
                print "overflow"
        if (event.type == pygame.KEYDOWN and event.key == pygame.K_b):
            right_eye.image=pygame.Surface((40, 10))
            left_eye.image=pygame.Surface((40, 10))
        if event.type == pygame.KEYUP:
            right_eye.image=pygame.Surface((40, 40))
            left_eye.image=pygame.Surface((40, 40))
            mouth.image=pygame.Surface((360, 24))

        #following the CUD Rule (Clear,Update,Draw)
        allSprites.clear(screen,background)
        allSprites.update()
        allSprites.draw(screen)
        pygame.display.flip()
コード例 #45
0
def main():
    try:
        mac_addr = open('/sys/class/net/wlan0/address').readline()

        # Connect to remote mySQL db
        con = MySQLdb.Connection(host=HOST,
                                 port=PORT,
                                 user=USER,
                                 passwd=PASSWORD,
                                 db=DB)
        c = con.cursor()
        c.execute(
            '''CREATE TABLE IF NOT EXISTS data(mac CHAR(17), temp FLOAT, pres FLOAT, hum FLOAT, gas FLOAT, lux INTEGER, db FLOAT, dt DATETIME)'''
        )

        # Initialize Corlysis db
        parser = argparse.ArgumentParser()
        parser.add_argument("db", help="dataDB")
        parser.add_argument("token", help="35d4aa441b94cdbae7404050edd3fad6")
        args = parser.parse_args()
        corlysis_params = {
            "db": args.db,
            "u": "token",
            "p": args.token,
            "precision": "ms"
        }

        # Initialize sensor
        bme = bme680.BME680(i2c_addr=0x77)
        bme.set_humidity_oversample(bme680.OS_2X)
        bme.set_pressure_oversample(bme680.OS_4X)
        bme.set_temperature_oversample(bme680.OS_8X)
        bme.set_filter(bme680.FILTER_SIZE_3)
        bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

        # Initialize USB mic
        pyaud = pyaudio.PyAudio()
        stream = pyaud.open(format=pyaudio.paInt16,
                            channels=1,
                            rate=32000,
                            input_device_index=2,
                            input=True)

        payload = ""
        counter = 1
        problem_counter = 0

        now = time.strftime('%Y-%m-%d %H:%M:%S')
        print("Readings began " + now)
        print("Press ctrl+c to end readings and close connection.")

        animation = "|/-\\"
        aniCount = 0

    except Exception as e:
        print(e)
        f = open("/home/pi/smarterCampus/errors.txt", "w")
        f.write(str(e) + '\n')
        f.close()
        exit(1)

    # Main loop
    while (True):
        # Only have to write to log if an error occured
        ERROR = False
        try:
            # Get time for corlysis and db
            unix_time_ms = int(time.time() * 1000)
            now = time.strftime('%Y-%m-%d %H:%M:%S')

            # Read from BME
            bme.get_sensor_data()
            tempCelcius = float("{0:.2f}".format(bme.data.temperature))
            # Convert the above variable to fahrenheit
            temperature = float(tempCelcius * (9 / 5) + 32)
            pressure = float("{0:.2f}".format(bme.data.pressure))
            humidity = float("{0:.2f}".format(bme.data.humidity))
            # Convert ohms to kohms
            gas = float("{0:.2f}".format((bme.data.gas_resistance / 1000)))

            # Read from lux sensor
            tsl = TSL2561(debug=True)
            luxVal = tsl.lux()

            # Read from USB mic
            rawsamps = stream.read(2048, exception_on_overflow=False)
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            deciVal = analyse.loudness(samps) + 65

            line = "sensors_data temperature={},pressure={},humidity={},gas={},luxVal={},decib={} {}\n".format(
                temperature, pressure, humidity, gas, luxVal, deciVal,
                unix_time_ms)
            payload += line

            if counter % SENDING_PERIOD == 0:
                try:
                    # try to send data to cloud
                    r = requests.post(URL,
                                      params=corlysis_params,
                                      data=payload)
                    if r.status_code != 204:
                        raise Exception("data not written")
                    payload = ""
                except:
                    problem_counter += 1
                    print('cannot write to InfluxDB')
                    if problem_counter == MAX_LINES_HISTORY:
                        problem_counter = 0
                        payload = ""

            counter += 1

            # Print animation
            sys.stdout.write("\rCollecting data... " + animation[aniCount])
            sys.stdout.flush()
            aniCount += 1
            if (aniCount == 4):
                aniCount = 0

            time_diff_ms = int(time.time() * 1000) - unix_time_ms
            # print(time_diff_ms)
            if time_diff_ms < READING_DATA_PERIOD_MS:
                time.sleep((READING_DATA_PERIOD_MS - time_diff_ms) / 1000.0)

            values = (mac_addr, temperature, pressure, humidity, gas, luxVal,
                      deciVal, now)
            add_val = ("INSERT INTO data "
                       "(mac, temp, pres, hum, gas, lux, db, dt)"
                       "VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
            c.execute(add_val, values)
            con.commit()

        except KeyboardInterrupt:
            con.close()
            if ERROR == True:
                f.write("End of log since " + now + "\n\n")
                f.close()
            exit(0)

        except Exception as e:
            pass
            f = open("/home/pi/smarterCampus/Databases/Errors.txt", "w")
            print(e)
            f.write(str(e) + '\n')
            ERROR = True
コード例 #46
0
def main():
    bme = bme680.BME680(i2c_addr=0x77)

    # Initialize db
    con = sqlite3.connect('data.db')
    c = con.cursor()
    c.execute(
        '''CREATE TABLE IF NOT EXISTS data(temp FLOAT, pres FLOAT, hum FLOAT, gas FLOAT, lux INTEGER, db FLOAT)'''
    )

    # Initialize sensor
    bme.set_humidity_oversample(bme680.OS_2X)
    bme.set_pressure_oversample(bme680.OS_4X)
    bme.set_temperature_oversample(bme680.OS_8X)
    bme.set_filter(bme680.FILTER_SIZE_3)
    bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

    # Initialize USB mic
    pyaud = pyaudio.PyAudio()
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=32000,
                        input_device_index=2,
                        input=True)

    # Main loop
    while (True):
        try:
            # Read from BME
            bme.get_sensor_data()
            tempCelcius = float("{0:.2f}".format(bme.data.temperature))
            # Convert the above variable to fahrenheit
            temperature = float(tempCelcius * (9 / 5) + 32)
            pressure = float("{0:.2f}".format(bme.data.pressure))
            humidity = float("{0:.2f}".format(bme.data.humidity))
            gas = float("{0:.2f}".format(bme.data.gas_resistance))

            # Read from lux sensor
            tsl = TSL2561(debug=True)
            luxVal = tsl.lux()

            # Read from USB mic
            rawsamps = stream.read(2048, exception_on_overflow=False)
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            deciVal = analyse.loudness(samps) + 60

            print("      BME680")
            print("Temperature: {}".format(temperature))
            print("Pressure: {}".format(pressure))
            print("Humidity: {}".format(humidity))
            print("Gas: {}".format(gas))
            print('\n')
            print("     TSL2561")
            print("Lux: {}".format(luxVal))
            print('\n')
            print("     USB Mic")
            print("------------------------")
            print("Sound in dB: {}".format(deciVal))

            values = (temperature, pressure, humidity, gas, luxVal, deciVal)
            c.execute("INSERT INTO data VALUES(?, ?, ?, ?, ?, ?)", values)

            con.commit()

            time.sleep(WAIT_PERIOD)
        except Exception as e:
            pass
            print(e)
    con.close()
コード例 #47
0
def main():
    count = 0
    bme = bme680.BME680(i2c_addr=0x77)

    # Initialize db
    con = MySQLdb.Connection(host=HOST,
                             port=PORT,
                             user=USER,
                             passwd=PASSWORD,
                             db=DB)
    c = con.cursor()
    # c.execute(CREATE TABLE IF NOT EXISTS data (temp FLOAT, pres FLOAT, hum FLOAT, gas FLOAT, lux INT, dbs FLOAT))

    # Initialize sensor
    bme.set_humidity_oversample(bme680.OS_2X)
    bme.set_pressure_oversample(bme680.OS_4X)
    bme.set_temperature_oversample(bme680.OS_8X)
    bme.set_filter(bme680.FILTER_SIZE_3)
    bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

    # Initialize USB mic
    pyaud = pyaudio.PyAudio()
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=32000,
                        input_device_index=2,
                        input=True)

    # Main loop
    while (count < REPEAT):
        # Record time
        now = time.strftime('%Y-%m-%d %H:%M:%S')

        # Read from BME
        bme.get_sensor_data()
        tempCelcius = float("{0:.2f}".format(bme.data.temperature))

        # Convert the above variable to fahrenheit
        temperature = float(tempCelcius * (9 / 5) + 32)
        pressure = float("{0:.2f}".format(bme.data.pressure))
        humidity = float("{0:.2f}".format(bme.data.humidity))
        gas = float("{0:.2f}".format(bme.data.gas_resistance))

        # Read from lux sensor
        tsl = TSL2561(debug=True)
        luxVal = tsl.lux()

        # Read from USB mic
        rawsamps = stream.read(2048, exception_on_overflow=False)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        decib = analyse.loudness(samps) + 60

        print("      " + now)
        print("BME680--------------------------")
        print("Temperature: {}".format(temperature))
        print("Pressure: {}".format(pressure))
        print("Humidity: {}".format(humidity))
        print("Gas: {}".format(gas))
        print("TSL2561-------------------------")
        print("Lux: {}".format(luxVal))
        print("USB Mic-------------------------")
        print("Sound in dB: {}".format(decib))
        print("________________________________")

        values = (temperature, pressure, humidity, gas, luxVal, decib, now)
        add_val = ("INSERT INTO data "
                   "(temp, pres, hum, gas, lux, db, dt)"
                   "VALUES (%s, %s, %s, %s, %s, %s, %s)")
        c.execute(add_val, values)

        count += 1

        time.sleep(WAIT_PERIOD)
    con.commit()
    con.close()
コード例 #48
0
ファイル: test2.py プロジェクト: ExCiteS/SLMPi
 tm += clk.tick()
 if tm > 8000: 
     tm = 0
     history = []
 # Read raw microphone data
 rawsamps = stream.read(1024)
 # Convert raw data to NumPy array
 samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
 # Show the volume and pitch
 screen.fill((0, 0, 0))
 tx = int((tm / 8000.0) * 1024.0)
 if tx > 1024: tx -= 1024
 pygame.draw.rect(screen, (0, 255, 0), (tx, 0, 3, 768))
 m, v = None, None
 m = analyse.musical_detect_pitch(samps, samplerate=samplerate)
 v = analyse.loudness(samps)
 if m is not None:
     # m is in range about 40-80
     ty = (m - 40.0) / 40.0
     # ty is in range 0-1
     ty = int(768 - ty * 768.0)
     # now ty is betwee 0 - 768
     pygame.draw.rect(screen, (0, 255, 255), (0, ty, 1024, 3))
     history.append((tx, ty))
 for (x, y) in history:
     pygame.draw.rect(screen, (255, 0, 0), (x, y, 3, 3))
 pygame.display.flip()
 for evt in pygame.event.get():
     if evt.type == pygame.KEYDOWN:
         sys.exit()
     if evt.type == pygame.QUIT:
コード例 #49
0
ファイル: BarkTracker.py プロジェクト: wdvr/BarkTracker
# listen for end of program
signal.signal(signal.SIGINT, signal_handler)

while True:
    stream = pyaud.open(format=audio_format,
                        channels=numChannels,
                        rate=sampleRate,
                        input_device_index=input_device_index,
                        input=True)

    rawsamps = stream.read(streamChunk)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    stream.close()

    current_loudness = analyse.loudness(samps)
    currentTime = datetime.datetime.now()

    timeDifference = currentTime - last_bark

    if current_loudness <= ambient_db:
        if bark_alert and timeDifference > datetime.timedelta(
                seconds=reward_timer):
            print("{0}: Bark stopped. Calm again.".format(
                currentTime.strftime("%H:%M:%S")))
            if session_email_sent:
                send_email_async("Bark alert lifted.", "All is calm again.")
            bark_sessions[-1][1] = currentTime - datetime.timedelta(
                seconds=reward_timer)
            session_email_sent = False
コード例 #50
0
    if not available > sample_size:
        time.sleep(0.01)
        continue

    rawsamps = stream.read(available)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16, count=sample_size)

    event = ''
    
    midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)
    
    if midi_note:
        midi_note += paramaters.octave_correction * 12

        latest_note = notes.midi_to_note(midi_note)
        latest_vol = analyse.loudness(samps)

        attacked = latest_vol - last_vol > paramaters.attack_threshold

        if latest_note != last_note or attacked:
            if latest_vol > paramaters.minimum_volume:
                event = {'note':     latest_note,    'time':     t}
                last_time = t

            last_note = latest_note
            last_vol = latest_vol

    elif last_note:
        last_note = None

    print event
コード例 #51
0
ファイル: analyze.py プロジェクト: t-artistik/mhdproj2010
		format = pyaudio.paInt16,
		channels = 1,
		rate = 44100,
		input_device_index = 1,
		input = True)
	
	chr = 0
	i = 0
	while chr == 0:
		# Read raw microphone data
		rawsamps = AUDIO_INPUT.read(CHUNK_SIZE)
		# Convert raw data to NumPy array
		samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
		# Show the volume and pitch
		
		DATA.loudness.append(analyse.loudness(samps))
		DATA.pitch.append(Pitch(samps, mode = Modes.MINOR_PENTATONIC, key = Notes.C))
		# loop quit, only windows
		if msvcrt.kbhit():
			chr = msvcrt.getch()
		if  i <= 0:
			midi_player.set_instrument(115, channel = 1) # clicky instrument
			midi_player.play(60, 20, channel = 1)
			i = 35
		i -= 1
	AUDIO_INPUT.close()

else:
	# read data
	data = wf.readframes(CHUNK_SIZE)
	while data != '':
コード例 #52
0
def main():
    # Override the 'CTRL+C' handler in order to send end the session when the script is ended.
    # Store the original SIGINT handler

    original_sigint = signal.getsignal(signal.SIGINT)

    def end_session(*args):
        try:
            if raw_input("\nReally quit? (y/n)> ").lower().startswith('y'):
                # Restore the original signal handler as otherwise evil things will happen
                # in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant
                signal.signal(signal.SIGINT, original_sigint)

                # Gracefully close stream and PyAudio
                stream.stop_stream()
                stream.close()
                p.terminate()

                # Log session end and send push
                helpers.logEvent(
                    sessions_endpoint + session['name'],
                    {'session_ended': datetime.datetime.now().isoformat()},
                    id=True)
                helpers.sendPush('Session ended!')
                print('Session ended!')
                sys.exit(1)

        except KeyboardInterrupt:
            # Restore the original signal handler as otherwise evil things will happen
            # in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant
            signal.signal(signal.SIGINT, original_sigint)

            # Gracefully close stream and PyAudio
            stream.stop_stream()
            stream.close()
            p.terminate()

            # Log session end and send push
            helpers.logEvent(
                sessions_endpoint + session['name'],
                {'session_ended': datetime.datetime.now().isoformat()},
                id=True)
            helpers.sendPush('Session ended!')
            print('Session ended!')
            sys.exit(1)

    # Restore the exit gracefully handler here
    signal.signal(signal.SIGINT, end_session)

    # Set up our PyAudio handler
    p = pyaudio.PyAudio()

    # Set up variables used by PyAudio
    chunk = 1024
    sample_rate = int(p.get_device_info_by_index(0)['defaultSampleRate'])
    num_channels = p.get_device_info_by_index(0)['maxInputChannels']
    audio_format = pyaudio.paInt16

    # Set up our variables (these will be moved to config)
    ambient_db = DefaultConfig.AMBIENT_DB  # the ambience noise level in db
    min_push_limit = DefaultConfig.PUSH_TIMER  # number of minutes between e-mails
    last_sent_time = None

    # Log session start and send push
    session = helpers.logEvent(
        sessions_endpoint,
        {'session_started': datetime.datetime.now().isoformat()})
    helpers.sendPush('Session started!')
    print('Session started!')

    # Open input stream
    stream = p.open(format=audio_format,
                    channels=num_channels,
                    rate=sample_rate,
                    input=True)

    # Loop infinitely to process the data stream
    while True:
        raw_sample = stream.read(
            chunk, exception_on_overflow=False)  # Grab a raw chunk of data
        sample = numpy.fromstring(
            raw_sample, dtype=numpy.int16)  # Convert data to NumPy array
        loudness = analyse.loudness(sample)

        # If the loudness is greater than our ambient level, log the sound and send a notification
        if loudness > ambient_db:
            current_time = datetime.datetime.now(
            )  # Time at which the sound was detected

            # Log the noise whether we send a push or not
            p = Process(target=helpers.logEvent,
                        args=(
                            barks_endpoint,
                            {
                                'loudness': loudness,
                                'date': datetime.datetime.now().isoformat(),
                                'session_id': session['name']
                            },
                        ))
            p.start()

            # Check to see when the last push was sent
            if last_sent_time is not None:
                time_delta = current_time - last_sent_time
            else:
                time_delta = datetime.timedelta(minutes=min_push_limit + 1)

            # Only send a push to the user if one hasn't been sent recently
            if (time_delta > datetime.timedelta(minutes=min_push_limit)):
                print("Loudness detected, sending push", str(current_time),
                      loudness)

                # Update last sent time
                last_sent_time = current_time

                # sending the push is in a process so that it won't...cause things to crash
                p = Process(target=helpers.sendPush,
                            args=(DefaultConfig.DOGNAME + " barked!", ))
                p.start()
            else:
                print("Loudness detected, push already sent",
                      str(current_time), loudness)
コード例 #53
0
ファイル: realtime.py プロジェクト: t-artistik/mhdproj2010
	def __init__():
		# START	
		p = pyaudio.PyAudio()



		def countdown(secs):
			while secs > 0:
				print secs
				sleep(1)
				secs -= 1
						
		#
		# STAGE 1
		#
		# Run analyse on input stream
		#
		midi_player.play(60 + Notes.C)
		print "Sing!"

		# Open input stream, 16-bit mono at 44100 Hz
		AUDIO_INPUT = p.open(
			format = pyaudio.paInt16,
			channels = 1,
			rate = 44100,
			input_device_index = 1,
			input = True)


			
		chr = 0
		time = 0
		last_tone = None
		last_played = None
		last_played_time = 0
		while True:
			# Read raw microphone data
			rawsamps = AUDIO_INPUT.read(CHUNK_SIZE)
			# Convert raw data to NumPy array
			samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
			
			# get vol and pitch
			loudness = analyse.loudness(samps)
			pitch = Pitch(samps, mode = Modes.MINOR_PENTATONIC, key = Notes.C)
			
			# analyze pitch
			if loudness > VOLUME_THRESHOLD:
				tone = pitch.tone
				if tone != last_tone:
					last_time = time
					#
					# note changed
					#
					
					if last_played and tone and (abs(last_played - tone) > 11):
						# likely to be stray
						print "OVERLAP", (time - last_played_time)
						if (time - last_played_time) > 7:
							F_Stray = 0.8
						else:
						
							F_stray = 0.0	
					if last_played and tone and last_played == tone:
						print "REPEAT", (time - last_played_time)
						if (time - last_played_time) < 22:

							F_stray = 0.3
						else:
							F_stray = 1.0
					else:
						F_stray = 1.0
					

					D = F_stray
					print "%4s %3.f" % (pitch.to_tone_string() or "--", F_stray)
					
					
					if D > 0.6:
						#midi_player.play(tone)
						SendMidi.sendNote(tone)
						if tone is not None:
							last_played = tone
							last_played_time = time
					
					last_tone = tone
			
			# advance time
			time += 1
コード例 #54
0
ファイル: BarkTracker.py プロジェクト: FOSSRIT/BarkTracker
	mailServer.starttls()
	mailServer.ehlo()
	mailServer.login(gmailUser, gmailPassword)                              #logs into email address (entered above) using password (also entered above)
	mailServer.sendmail(gmailUser, recipient, message.as_string())          #send the email to the recipient
	mailServer.close()                                                      #Stop doing things with the mail server
	
print("Starting BarkTracker")
	
while True:
	#read raw mic data
	rawsamps = stream.read(streamChunk)
	#convert to NumPy array
	samps = numpy.fromstring(rawsamps, dtype = numpy.int16)

	#send email if the loudness exceeds ambient noise
	if analyse.loudness(samps) >= ambient_db:
                #This is the time at which the sound was detected
		currentTime = datetime.datetime.now()   

                #Check to see when the last email was sent
		if(emailSentAt != None):
			timeDifference = currentTime - emailSentAt 
		else:
			timeDifference = datetime.timedelta(minutes=email_timer + 1)

		#Only send an email to the user if one hasn't been sent recently
		if(timeDifference > datetime.timedelta(minutes=email_timer)):
			print ("Sending email about dog!")      
			emailSentAt = currentTime
			p = Process(target=sendEmail)   #sending the email is in a process so that it won't...cause things to crash
			p.start()                       #actually start the process
コード例 #55
0
ファイル: corlysis.py プロジェクト: gpadriga/smarterCampus
def main():
    # Initialize local db
    con = sqlite3.connect('corlysisData.db')
    c = con.cursor()
    c.execute(
        '''CREATE TABLE IF NOT EXISTS data(temp FLOAT, pres FLOAT, hum FLOAT, gas FLOAT, lux INTEGER, db FLOAT, dt DATETIME)'''
    )

    # Initialize db
    parser = argparse.ArgumentParser()
    parser.add_argument("db", help="dataDB")
    parser.add_argument("token", help="35d4aa441b94cdbae7404050edd3fad6")
    args = parser.parse_args()
    corlysis_params = {
        "db": args.db,
        "u": "token",
        "p": args.token,
        "precision": "ms"
    }

    # Initialize sensor
    bme = bme680.BME680(i2c_addr=0x77)
    bme.set_humidity_oversample(bme680.OS_2X)
    bme.set_pressure_oversample(bme680.OS_4X)
    bme.set_temperature_oversample(bme680.OS_8X)
    bme.set_filter(bme680.FILTER_SIZE_3)
    bme.set_gas_status(bme680.ENABLE_GAS_MEAS)

    # Initialize USB mic
    pyaud = pyaudio.PyAudio()
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=32000,
                        input_device_index=2,
                        input=True)

    payload = ""
    counter = 1
    problem_counter = 0

    now = time.strftime('%Y-%m-%d %H:%M:%S')
    print("Readings began " + now)
    print("Press ctrl+c to end readings and close connection.")

    animation = "|/-\\"
    aniCount = 0

    # Main loop
    while (True):
        try:
            # Get time for corlysis and db
            unix_time_ms = int(time.time() * 1000)
            now = time.strftime('%Y-%m-%d %H:%M:%S')

            # Read from BME
            bme.get_sensor_data()
            tempCelcius = float("{0:.2f}".format(bme.data.temperature))
            # Convert the above variable to fahrenheit
            temperature = float(tempCelcius * (9 / 5) + 32)
            pressure = float("{0:.2f}".format(bme.data.pressure))
            humidity = float("{0:.2f}".format(bme.data.humidity))
            gas = float("{0:.2f}".format(bme.data.gas_resistance))

            # Read from lux sensor
            tsl = TSL2561(debug=True)
            luxVal = tsl.lux()

            # Read from USB mic
            rawsamps = stream.read(2048, exception_on_overflow=False)
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            dB = analyse.loudness(samps) + 60

            line = "sensors_data temperature={},pressure={},humidity={},luxVal={},decib={} {}\n".format(
                temperature, pressure, humidity, luxVal, dB, unix_time_ms)
            payload += line

            if counter % SENDING_PERIOD == 0:
                try:
                    # try to send data to cloud
                    r = requests.post(URL,
                                      params=corlysis_params,
                                      data=payload)
                    if r.status_code != 204:
                        raise Exception("data not written")
                    payload = ""
                except:
                    problem_counter += 1
                    print('cannot write to InfluxDB')
                    if problem_counter == MAX_LINES_HISTORY:
                        problem_counter = 0
                        payload = ""

            counter += 1

            sys.stdout.write("\rCollecting data... " + animation[aniCount])
            sys.stdout.flush()
            aniCount += 1
            if (aniCount == 4):
                aniCount = 0

            time_diff_ms = int(time.time() * 1000) - unix_time_ms
            # print(time_diff_ms)
            if time_diff_ms < READING_DATA_PERIOD_MS:
                time.sleep((READING_DATA_PERIOD_MS - time_diff_ms) / 1000.0)

            values = (temperature, pressure, humidity, gas, luxVal, dB, now)
            c.execute("INSERT INTO data VALUES(?, ?, ?, ?, ?, ?, ?)", values)

            con.commit()

        except KeyboardInterrupt:
            con.close()
            break

        except Exception as e:
            pass
            print(e)
コード例 #56
0
import numpy
import pyaudio
import analyse

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# Device 2 is USB mic
stream = pyaud.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=16000,
                    input_device_index=2,
                    frames_per_buffer=1024,
                    input=True)

while True:
    # Read raw microphone data
    rawsamps = stream.read(2048, exception_on_overflow=False)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print("Sound in dB: " + str(analyse.loudness(samps)))
コード例 #57
0
ファイル: record.py プロジェクト: gmkey/raspberrypi
def main():
    volume_array = []
    prev_state = "down"
    l = LED_controll()
    while True:
        raw_sample_data = record_sample()
        base_volume_established = False
        if raw_sample_data:
            for raw_sample in raw_sample_data:
                sample = numpy.fromstring(raw_sample, dtype=numpy.int16)
		vol = analyse.loudness(sample)
		if vol > -14:
                	volume_array.append(analyse.loudness(sample))
			print(vol)

            if len(volume_array) > 10:
                base_volume_established = True

            if base_volume_established:
                for i in range(0, len(volume_array) - 10):
                    volume_array.pop(0)
                current_input = [int(abs(i - sum(volume_array)/10)*10) for i in volume_array if i < 19]

                if current_input[0] > current_input[9] and current_input[0] > current_input[4] and not prev_state == "up" and current_input[0]-current_input[4] > 3:
                    print('up', current_input[4]-current_input[0])
                    prev_state = "up"
                    l.spaceship_min()
                    if current_input[0]-current_input[4] > 4:
                        l.flip_painting()
                    if current_input[0]-current_input[4] > 6:
                        l.flip_robot()
                        l.painting_min(2, rnd=0.9)
                        l.spaceship_min()

                if current_input[0] < current_input[9] and current_input[0] < current_input[4] and not prev_state == "down" and current_input[4]-current_input[0] > 3:
                    print('down', current_input[0]-current_input[4])
                    prev_state = "down"
                    l.spaceship_min()
                    if current_input[4]-current_input[0] > 4:
                        l.flip_painting()
                    if current_input[4]-current_input[0] > 6:
                        l.flip_robot()
                        l.painting_min(2, rnd=0.9)
                        l.spaceship_min()

                if current_input[0] < current_input[9] and current_input[0] < current_input[4]:
                    prev_state = "down"
                if current_input[0] > current_input[9] and current_input[0] > current_input[4]:
                    prev_state = "up"


                if abs(current_input[-1] - current_input[-2]) > 2:
                    if random.random() < 0.5:
                        l.spaceship_min(2, s=0.002, rnd=0.3)
                    else:
                        l.spaceship_max(2, s=0.002, rnd=0.9)
                if current_input[-1] > 20:
                    if random.random() > 0.5:
                        l.painting_max(rnd=0.8)
                    else:
                        l.spaceship_max(rnd=0.8)
                if current_input[-1] > 40:
                    l.painting_max()
                    l.spaceship_max()
                if current_input[-1] > 47:
                    for i in range(40):
                        l.flip_robot()