Esempio n. 1
0
def testcross(data):
    if verbose:
        print 'cross'
    if audioop.cross(data[0], 1) != 0 or \
              audioop.cross(data[1], 2) != 0 or \
              audioop.cross(data[2], 4) != 0:
        return 0
    return 1
Esempio n. 2
0
def testcross(data):
    if verbose:
        print 'cross'
    if audioop.cross(data[0], 1) <> 0 or \
              audioop.cross(data[1], 2) <> 0 or \
              audioop.cross(data[2], 4) <> 0:
        return 0
    return 1
Esempio n. 3
0
    def cross(self):
        """Return the number of zero crossings in frames.

        :returns: number of zero crossing

        """
        return audioop.cross(self._frames, self._sampwidth)
Esempio n. 4
0
    def cross(self):
        """ Return the number of zero crossings in frames.

        :returns: number of zero crossing

        """
        return audioop.cross(self._frames, self._sampwidth)
Esempio n. 5
0
    def cross(self):
        """
        Return the number of zero crossings in frames.

        @return number of zero crossing

        """
        return audioop.cross(self.frames, self.sampwidth)
Esempio n. 6
0
 def get_cross(self):
     """
     Return the number of zero crossings.
     
     @return number of zero crossing
     
     """
     return audioop.cross( self.frames )
Esempio n. 7
0
 def test_cross(self):
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.cross(b'', w), -1)
         self.assertEqual(audioop.cross(bytearray(), w), -1)
         self.assertEqual(audioop.cross(memoryview(b''), w), -1)
         p = packs[w]
         self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
         self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
         self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
         self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
         self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
Esempio n. 8
0
 def test_cross(self):
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.cross(b"", w), -1)
         self.assertEqual(audioop.cross(bytearray(), w), -1)
         self.assertEqual(audioop.cross(memoryview(b""), w), -1)
         p = packs[w]
         self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
         self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
         self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
         self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
         self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
Esempio n. 9
0
def cross(fragment, sampwidth):
    """
    Return the number of zero crossings in the fragment passed as an argument.

    @param fragment (string) input frames.
    @param sampwidth (int) sample width of the frames.
    @return number of zero crossing

    """
    return audioop.cross(fragment, sampwidth)
Esempio n. 10
0
def speech_recognize(str_data):
    global recording, record_file, access_token
    if audioop.cross(str_data, SAM_WID) > CROSS_NUM:
        if not recording:
            recording = True
            record_file = ''  #start a new record
        record_file = record_file + str_data
        #store audio data in record_file
    else:
        if recording:
            record_file = record_file + str_data
            #upload recorf file to cloud server and get result
            call_cloud(record_file)
            recording = False
Esempio n. 11
0
def speech_recognize(str_data):
    global recording, record_file, access_token
    if audioop.cross(str_data, SAM_WID) > CROSS_NUM:
        if not recording:
            recording = True
            record_file = ''  #start a new record
        record_file = record_file + str_data
        #store audio data in record_file
    else:
        if recording:
            record_file = record_file + str_data
            #upload recorf file to cloud server and get result
            call_cloud(record_file)
            recording = False
Esempio n. 12
0
 def test_cross(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.cross(b'', w), -1)
         p = packs[w]
         self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
         self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
         self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
         self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
         self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
Esempio n. 13
0
def speech_recognize(str_data):
    global recording, record_file
    if audioop.cross(str_data, SAM_WID) > CROSS_NUM:
        if not recording:
            recording = True
            record_file = wave.open(TEMP_REC_FILE, 'wb')
            record_file.setnchannels(CH_NUM)
            record_file.setsampwidth(SAM_WID)
            record_file.setframerate(SAM_RAT)
        record_file.writeframes(str_data)
        #store audio data in record_file
    else:
        if recording:
            record_file.writeframes(str_data)
            record_file.close()
            recording = False
            #upload record file to cloud server and get result
            call_cloud()
Esempio n. 14
0
 def test_cross(self):
     self.assertEqual(audioop.cross(data[0], 1), 0)
     self.assertEqual(audioop.cross(data[1], 2), 0)
     self.assertEqual(audioop.cross(data[2], 4), 0)
Esempio n. 15
0
 def test_cross(self):
     self.assertEqual(audioop.cross(data[0], 1), 0)
     self.assertEqual(audioop.cross(data[1], 2), 0)
     self.assertEqual(audioop.cross(data[2], 4), 0)
Esempio n. 16
0
def testcross(data):
	if audioop.cross(data[0], 1) <> 0 or \
		  audioop.cross(data[1], 2) <> 0 or \
		  audioop.cross(data[2], 4) <> 0:
		return 0
	return 1
Esempio n. 17
0
    def handle_audio(self, buf=None, frame_count=None, time_info=None, status=None, realtime=True):
        if not self.mic and realtime: buf = self.wf.readframes(frame_count)

        if not buf: return

        self.ichunk += 1


        meanRMS = 1.0*sum(self.deque_mean)/max(len(self.deque_mean),1)
        meanFreq = 1.0*sum(self.deque_freq)/max(len(self.deque_freq),1)


        mean_rms = audioop.rms(buf,self.sampwidth) 
        mean_freq = 0.5*audioop.cross(buf,self.sampwidth) * self.RATE / self.CHUNK

        fourier = np.fft.fft(self.framesToNumpy(buf))
        magnitudes = np.abs(fourier[self.freqs_indices])


        low_indices = ((50.0 < self.freqs) & (self.freqs < 500.0)).astype(bool)
        mid_indices = ((500.0 < self.freqs) & (self.freqs < 2000.0)).astype(bool)
        high_indices = (2000.0 < self.freqs).astype(bool)

        low_freqs = self.freqs[low_indices]
        low_mags = magnitudes[low_indices]
        low_weighted = np.dot(low_freqs, low_mags)/np.sum(low_mags)
        low_mag_sum = np.sum(low_mags)

        mid_freqs = self.freqs[mid_indices]
        mid_mags = magnitudes[mid_indices]
        mid_weighted = np.dot(mid_freqs, mid_mags)/np.sum(mid_mags)
        mid_mag_sum = np.sum(mid_mags)

        high_freqs = self.freqs[high_indices]
        high_mags = magnitudes[high_indices]
        high_weighted = np.dot(high_freqs, high_mags)/np.sum(high_mags)
        high_mag_sum = np.sum(high_mags)

        mag_sum = low_mag_sum + mid_mag_sum + high_mag_sum

        # print "%4.0f %4.0f %4.0f %4.0f %4.0f %4.0f" % (low_weighted, mid_weighted, high_weighted, \
        #                                                np.sum(low_mags), np.sum(mid_mags), np.sum(high_mags))


        
        fact = max(0.1,min(1.0,meanRMS / 5000.0))
        rgb = [fact*100.0*low_mag_sum/mag_sum, fact*100.0*mid_mag_sum/mag_sum, fact*100.0*high_mag_sum/mag_sum]
        rgb = rgb[::-1]
        try:
            pass
            if RPI and realtime:
                self.led.switch_color_rgb(rgb[0], rgb[1], rgb[2])
        except: pass

        # print peak_frequencies

        # now want to find frequencies ordered by magnitudes

        # try:
            # plt.cla()
        # except: pass

        # try:
        #     # plt.plot(self.freqs, magnitudes)
        #     print time.time()-self.t0, meanFreq
        #     plt.scatter(time.time()-self.t0, meanFreq)
        #     plt.show()
        # except: 
        #     pass





        self.deque_mean.append(mean_rms)
        self.deque_freq.append(mean_freq)
        self.deque_time.append(time.time())




        if realtime:
            hue = max(0.11, min(0.9,1.0*(meanFreq-500)/1500))
            lum = max(0.05, min(1.0,1.0*(meanRMS-1500)/8000))
            # self.led.switch_color(hue, lum)

            meas_chunk_time = max((self.deque_time[-1] - self.deque_time[0]) / max(len(self.deque_time)-1,1), 0.001)
            pred_chunk_time = 1.0*self.CHUNK/self.RATE
            perfpct = 100.0*pred_chunk_time/meas_chunk_time # < 100%, we're computing slowly, >100% we're time-traveling (good?). ~100% we're good
            t = int(1.0 * self.ichunk * self.CHUNK / self.RATE)
            line = self.draw_bar( meanRMS, 0,500, 30, extra="[%3.0f%% speed] [%4is] [%5.1f Hz] [%1.2f %1.2f]" % (perfpct, t, meanFreq, hue,lum) )
            sys.stdout.write("\r"+ " "*100) # clear the whole line
            sys.stdout.write("\r" + line + " ")
            sys.stdout.flush()

        return (buf, pyaudio.paContinue)
Esempio n. 18
0
from pylab import *

sample_rate = 44100
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)

inp.setchannels(1)
inp.setrate(sample_rate)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)

inp.setperiodsize(1600)

DATA = []

for x in xrange(10000):
    l, data = inp.read()
    DATA.append(data)
    cross = audioop.cross(data, 2)
    print cross, ' ',
    if cross > 0:
        print (44100.0/5.0)/cross
    print 
    time.sleep(5.0/sample_rate)

for i in DATA:
    inp.write(data)

print len(DATA)

#plot(DATA)
#show()
bucket = s3.Bucket(input_bucket)

# Downloading an audio file from s3 to local system
bucket.download_file('****{wave file URL ob s3}****.wav',
                     r'../path_for_local_system/recording.wav')

with open(r'../../decibel_values.csv', 'a') as f:

    f.write('recording_name', 'sample_width', 'n_frames', 'avg', 'rms',
            'avgpp', 'zero_crossings', 'maxpp', 'min_max')
    f.write(
        str(a, wav.getsampwidth(), wav.getnframes(),
            audioop.avg(string_wav, wav.getsampwidth()),
            audioop.rms(string_wav, wav.getsampwidth()),
            audioop.avgpp(string_wav, wav.getsampwidth()),
            audioop.cross(string_wav, wav.getsampwidth()),
            audioop.maxpp(string_wav, wav.getsampwidth()),
            audioop.minmax(string_wav, wav.getsampwidth())))

wav = wave.open(r'../../recordings.wav')
string_wav = wav.readframes(wav.getnframes())

print('getsampwidth', wav.getsampwidth())

print('get n frmaes', wav.getnframes())
print('avg: ', audioop.avg(string_wav, wav.getsampwidth()))
print('rms: ', audioop.rms(string_wav, wav.getsampwidth()))
print('avgpp: ', audioop.avgpp(string_wav, wav.getsampwidth()))
print('zero_crossings: ', audioop.cross(string_wav, wav.getsampwidth()))
print('maxpp: ', audioop.maxpp(string_wav, wav.getsampwidth()))
print('max min: ', audioop.minmax(string_wav, wav.getsampwidth()))