Example #1
0
 def test_maxpp(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.maxpp(b'', w), 0)
         self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
         self.assertEqual(audioop.maxpp(datas[w], w),
                          maxvalues[w] - minvalues[w])
Example #2
0
def get_swipe(dev='/dev/audio'):
    audio = ossaudiodev.open(dev, 'r')
    audio.setparameters(ossaudiodev.AFMT_S16_LE, 1, 44100)

    baselines = deque([2**15] * 4)
    bias = 0
    while 1:
        data, power = get_chunk(audio, bias)

        baseline = sum(baselines) / len(baselines) * THRESHOLD_FACTOR
        print power, baseline, power / (baseline or 1)

        chunks = []
        while power > baseline:
            print power, baseline, power / (baseline or 1), '*'
            chunks.append(data)
            data, power = get_chunk(audio, bias)

        if len(chunks) > 1:
            data = old_data + ''.join(chunks) + data
            while audioop.maxpp(data[:3000], 2) < baseline / 2:
                data = data[1000:]
            while audioop.maxpp(data[-3000:], 2) < baseline / 2:
                data = data[:-1000]

            return audioop.bias(data, 2, -audioop.avg(data, 2))

        old_data = data

        bias = -audioop.avg(data, 2)

        baselines.popleft()
        baselines.append(power)
 def test_maxpp(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.maxpp(b'', w), 0)
         self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
         self.assertEqual(audioop.maxpp(datas[w], w),
                          maxvalues[w] - minvalues[w])
Example #4
0
def get_swipe(audio):
    print("READY")
    baselines = deque([2**15] * 4)
    bias = 0
    old_data = b""
    while 1:
        data, power = get_chunk(audio, bias)

        baseline = sum(baselines) / len(baselines) * THRESHOLD_FACTOR
        logging.debug((power, baseline, power / (baseline or 1)))

        chunks = []
        while power > baseline:
            logging.debug((power, baseline, power / (baseline or 1), "*"))
            chunks.append(data)
            data, power = get_chunk(audio, bias)

        if len(chunks) > 1:
            data = old_data + b"".join(chunks) + data
            while audioop.maxpp(data[:3000], 2) < baseline / 2:
                data = data[1000:]
            while audioop.maxpp(data[-3000:], 2) < baseline / 2:
                data = data[:-1000]

            return audioop.bias(data, 2, -audioop.avg(data, 2))

        old_data = data

        bias = -audioop.avg(data, 2)

        baselines.popleft()
        baselines.append(power)
Example #5
0
def get_swipe(dev='/dev/audio'):
    audio = ossaudiodev.open(dev, 'r')
    audio.setparameters(ossaudiodev.AFMT_S16_LE, 1, 44100)
    
    baselines = deque([2**15] * 4)
    bias = 0
    while 1:
        data, power = get_chunk(audio, bias)
        
        baseline = sum(baselines) / len(baselines) * THRESHOLD_FACTOR
        print power, baseline, power / (baseline or 1)
        
        chunks = []
        while power > baseline:
            print power, baseline, power / (baseline or 1), '*'
            chunks.append(data)
            data, power = get_chunk(audio, bias)

        if len(chunks) > 1:
            data = old_data + ''.join(chunks) + data
            while audioop.maxpp(data[:3000], 2) < baseline / 2:
                data = data[1000:]
            while audioop.maxpp(data[-3000:], 2) < baseline / 2:
                data = data[:-1000]
            
            return audioop.bias(data, 2, -audioop.avg(data, 2))

        old_data = data
        
        bias = -audioop.avg(data, 2)
        
        baselines.popleft()
        baselines.append(power)
Example #6
0
def testmaxpp(data):
    if verbose:
        print 'maxpp'
    if audioop.maxpp(data[0], 1) != 0 or \
              audioop.maxpp(data[1], 2) != 0 or \
              audioop.maxpp(data[2], 4) != 0:
        return 0
    return 1
def testmaxpp(data):
    if verbose:
        print 'maxpp'
    if audioop.maxpp(data[0], 1) <> 0 or \
              audioop.maxpp(data[1], 2) <> 0 or \
              audioop.maxpp(data[2], 4) <> 0:
        return 0
    return 1
Example #8
0
def segment_input_file(threshold, power_threshold, frequency_threshold, begin_threshold, WAVE_INPUT_FILE, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION):
    audioFrames = []

    wf = wave.open(WAVE_INPUT_FILE, 'rb')
    number_channels = wf.getnchannels()
    total_frames = wf.getnframes()
    frame_rate = wf.getframerate()
    frames_to_read = round( frame_rate * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT )
    
    files_recorded = 0
    delay_threshold = 0
    if( begin_threshold < 0 ):
        delay_threshold = begin_threshold * -1
        begin_threshold = 1000

    audio = pyaudio.PyAudio()
    record_wave_file_count = 0
    index = 0
    while( wf.tell() < total_frames ):
        index = index + 1
        raw_wav = wf.readframes(frames_to_read * number_channels)
        
        # If our wav file is shorter than the amount of bytes ( assuming 16 bit ) times the frames, we discard it and assume we arriveed at the end of the file
        if (len(raw_wav) != 2 * frames_to_read * number_channels ):
            break;
        else:
            audioFrames.append(raw_wav)
            if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ):
                audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:]
                intensity = [
                    audioop.maxpp( audioFrames[0], 4 ) / 32767,
                    audioop.maxpp( audioFrames[1], 4 ) / 32767
                ]
                highestintensity = np.amax( intensity )
                    
                byteString = b''.join(audioFrames)
                fftData = np.frombuffer( byteString, dtype=np.int16 )
                frequency = get_loudest_freq( fftData, RECORD_SECONDS )
                power = get_recording_power( fftData, RECORD_SECONDS )

                print( "Segmenting file " + WAVE_INPUT_FILE + ": " + str( math.ceil(wf.tell() / total_frames * 100) ) + "%" , end="\r" )
                if( frequency > frequency_threshold and highestintensity > threshold and power > power_threshold ):
                    record_wave_file_count += 1
                    if( record_wave_file_count <= begin_threshold and record_wave_file_count > delay_threshold ):
                        files_recorded += 1
                        waveFile = wave.open(WAVE_OUTPUT_FILENAME + str(index) + WAVE_OUTPUT_FILE_EXTENSION, 'wb')
                        waveFile.setnchannels(number_channels)
                        waveFile.setsampwidth(audio.get_sample_size(FORMAT))
                        waveFile.setframerate(frame_rate)
                        waveFile.writeframes(byteString)
                        waveFile.close()
                else:
                    record_wave_file_count = 0
        
    print( "Extracted " + str(files_recorded) + " segmented files from " + WAVE_INPUT_FILE )
    wf.close()
Example #9
0
def classify_audioframes(audioQueue, audio_frames, classifier, high_speed):
    if (not audioQueue.empty()):
        audio_frames.append(audioQueue.get())

        # In case we are dealing with frames not being met and a buffer being built up,
        # Start skipping every other audio frame to maintain being up to date,
        # Trading being up to date over being 100% correct in sequence
        if (audioQueue.qsize() > 1):
            print("SKIP FRAME", audioQueue.qsize())
            audioQueue.get()

        if (len(audio_frames) >= 2):
            audio_frames = audio_frames[-2:]

            highestintensity = np.amax(
                audioop.maxpp(audio_frames[1], 4) / 32767)
            wavData = b''.join(audio_frames)

            # SKIP FEATURE ENGINEERING COMPLETELY WHEN DEALING WITH SILENCE
            if (high_speed == True
                    and highestintensity < SILENCE_INTENSITY_THRESHOLD):
                probabilityDict, predicted, frequency = create_empty_probability_dict(
                    classifier, {}, 0, highestintensity, 0)
            else:
                power = fftData = np.frombuffer(wavData, dtype=np.int16)
                power = get_recording_power(
                    fftData,
                    classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS))
                probabilityDict, predicted, frequency = predict_raw_data(
                    wavData, classifier, highestintensity, power)

            return probabilityDict, predicted, audio_frames, highestintensity, frequency, wavData

    return False, False, audio_frames, False, False, False
def downsampleWav(src,
                  dst,
                  inrate=44100,
                  outrate=22050,
                  inchannels=1,
                  outchannels=1):
    if not os.path.exists(src):
        print('Source not found!')
        return False

    if not os.path.exists(os.path.dirname(dst)):
        os.makedirs(os.path.dirname(dst))

    try:
        s_read = wave.open(src, 'r')
        s_write = wave.open(dst, 'w')
    except:
        print('Failed to open files!')
        return False

    n_frames = s_read.getnframes()

    data = s_read.readframes(n_frames)

    data = audioop.tomono(data, 2, 0.5, 0.5)

    try:
        converted = audioop.ratecv(data, 2, inchannels, inrate, outrate, None)
        #if outchannels == 1 & inchannels != 1:
        #converted[0] = audioop.tomono(converted[0], 2, 1, 0)
    except:
        print('Failed to downsample wav')
        return False

    #divide by the max time domain values
    try:
        max_val = audioop.maxpp(converted[0], 2)
        coverted = audioop.mul(converted[0], 2, 1 / max_val)
    except:
        print('FAILURE IN TIME DOMAIN CONVERSION')
        return False

    try:
        s_write.setparams((outchannels, 2, outrate, 0, 'NONE', 'Uncompressed'))
        s_write.writeframes(converted[0])
    except:
        print('Failed to write wav')
        return False

    try:
        s_read.close()
        s_write.close()
    except:
        print('Failed to close wav files')
        return False

    return True
def get_highest_intensity_of_wav_file( wav_file ):
	intensity = []
	with wave.open( wav_file ) as fd:
		params = fd.getparams()
		for i in range( 0, int(RATE / CHUNK * RECORD_SECONDS)):
			data = fd.readframes(CHUNK)
			peak = audioop.maxpp( data, 4 ) / 32767
			intensity.append( peak )
	
	return np.amax( intensity )
Example #12
0
def get_highest_intensity_of_wav_file(wav_file, record_seconds):
    intensity = []
    with wave.open(wav_file) as fd:
        number_channels = fd.getnchannels()
        total_frames = fd.getnframes()
        frame_rate = fd.getframerate()
        frames_to_read = round(frame_rate * record_seconds)
        data = fd.readframes(frames_to_read)
        peak = audioop.maxpp(data, 4) / 32767
        intensity.append(peak)

    return np.amax(intensity)
 def test_maxpp(self):
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.maxpp(b"", w), 0)
         self.assertEqual(audioop.maxpp(bytearray(), w), 0)
         self.assertEqual(audioop.maxpp(memoryview(b""), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
         self.assertEqual(audioop.maxpp(datas[w], w), maxvalues[w] - minvalues[w])
Example #14
0
def get_swipe():
    p = pyaudio.PyAudio()

    stream = p.open(format = FORMAT,
                channels = CHANNELS,
                rate = RATE,
                input = True,
                frames_per_buffer = CHUNK)
    
    baselines = deque([2**15] * 4)
    bias = 0
    while 1:
        data, power = get_chunk(stream, bias)
        
        baseline = sum(baselines) / len(baselines) * THRESHOLD_FACTOR
        print power, baseline, power / (baseline or 1)
        
        chunks = []
        while power > baseline:
            print power, baseline, power / (baseline or 1), '*'
            chunks.append(data)
            data, power = get_chunk(stream, bias)

        if len(chunks) > 1:
            data = old_data + ''.join(chunks) + data
            while audioop.maxpp(data[:3000], 2) < baseline / 2:
                data = data[1000:]
            while audioop.maxpp(data[-3000:], 2) < baseline / 2:
                data = data[:-1000]
            
            return audioop.bias(data, 2, -audioop.avg(data, 2))

        old_data = data
        
        bias = -audioop.avg(data, 2)
        
        baselines.popleft()
        baselines.append(power)
Example #15
0
def get_swipe():
    p = pyaudio.PyAudio()

    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

    baselines = deque([2**15] * 4)
    bias = 0
    while 1:
        data, power = get_chunk(stream, bias)

        baseline = sum(baselines) / len(baselines) * THRESHOLD_FACTOR
        print power, baseline, power / (baseline or 1)

        chunks = []
        while power > baseline:
            print power, baseline, power / (baseline or 1), '*'
            chunks.append(data)
            data, power = get_chunk(stream, bias)

        if len(chunks) > 1:
            data = old_data + ''.join(chunks) + data
            while audioop.maxpp(data[:3000], 2) < baseline / 2:
                data = data[1000:]
            while audioop.maxpp(data[-3000:], 2) < baseline / 2:
                data = data[:-1000]

            return audioop.bias(data, 2, -audioop.avg(data, 2))

        old_data = data

        bias = -audioop.avg(data, 2)

        baselines.popleft()
        baselines.append(power)
Example #16
0
 def test_maxpp(self):
     for w in (1, 2, 3, 4):
         self.assertEqual(audioop.maxpp(b'', w), 0)
         self.assertEqual(audioop.maxpp(bytearray(), w), 0)
         self.assertEqual(audioop.maxpp(memoryview(b''), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
         self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
         self.assertEqual(audioop.maxpp(datas[w], w),
                          maxvalues[w] - minvalues[w])
Example #17
0
def get_stream_wav_segment(stream, frames):
    stream.start_stream()
    range_length = int(RATE / CHUNK * RECORD_SECONDS)
    remove_half = int(range_length / 2)
    frames = frames[remove_half:]
    frame_length = len(frames)

    intensity = []
    for i in range(frame_length, range_length):
        data = stream.read(CHUNK)
        peak = audioop.maxpp(data, 4) / 32767
        intensity.append(peak)
        frames.append(data)

    highestintensity = np.amax(intensity)
    stream.stop_stream()
    return frames, highestintensity
Example #18
0
    def voice_detection(self, in_data):
        # look for high input level
        input_frames = np.fromstring(in_data, dtype=np.uint16)
        level = audioop.maxpp(input_frames, 2)

        #print("length: %d, avg: %f" % (len(in_data), level))

        if level > VoiceDetector.THRESHOLD_ACTIVATION_LEVEL or \
            (level > VoiceDetector.THRESHOLD_DEACTIVATION_LEVEL and self.active):
            if not self.active:
                print("Voice detected")
                self.active = True
            self.active_ts = time.time()

        if self.active and (
            (time.time() - self.active_ts) > VoiceDetector.MIN_DURATION):
            if self.active:
                print("Voice stopped")
            self.active = False

        return self.active
Example #19
0
def get_peaks(data):
    peak_threshold = audioop.maxpp(data[:1000], 2) * FIRST_PEAK_FACTOR
    
    samples = get_samples(data)
    
    i = 0
    old_i = 0
    sign = 1
    while i < len(samples):
        peak = 0
        while samples[i] * sign > peak_threshold:
            peak = max(samples[i] * sign, peak)
            i += 1

        if peak:
            if old_i:
                yield i - old_i
            old_i = i
            sign *= -1
            peak_threshold = peak * SECOND_PEAK_FACTOR
        
        i += 1
Example #20
0
def get_peaks(data):
    peak_threshold = audioop.maxpp(data[:1000], 2) * FIRST_PEAK_FACTOR

    samples = get_samples(data)

    i = 0
    old_i = 0
    sign = 1
    while i < len(samples):
        peak = 0
        while samples[i] * sign > peak_threshold:
            peak = max(samples[i] * sign, peak)
            i += 1

        if peak:
            if old_i:
                yield i - old_i
            old_i = i
            sign *= -1
            peak_threshold = peak * SECOND_PEAK_FACTOR

        i += 1
Example #21
0
def Ignite():
    global button
    global key
    global set_frequency
    global initial_volume
    global set_volume
    if (set_frequency == 0):
        tkMessageBox.showwarning("Tune First",
                                 "You must run the tuning setup first.")
    else:
        e = Tkinter.Button(top,
                           text="Cancel",
                           command=lambda *args: ButtonBack('exit'))
        e.grid(row=1, column=4)
        b = Tkinter.Button(top,
                           text="Fine-tune",
                           command=lambda *args: ButtonBack('R'))
        b.grid(row=2, column=4)
        set_volume = initial_volume
        Note(set_frequency, set_volume).play(-1)
        tkMessageBox.showinfo("Inject bubble", "Inject a bubble now.")
        mode = False
        desired_amplitude = 0
        display_status = Label(top, text="")
        display_status.grid(row=0, column=1)
        display_status.configure(text="Status: Manual")
        while (button != 'exit'):
            Note(set_frequency).stop()
            Note(set_frequency, set_volume).play(-1)
            stream.start_stream()
            data = stream.read(2048)
            stream.stop_stream()
            amplitude = (audioop.maxpp(data, 2) / 65536) * 1.41
            display_volume.configure(text="Volume: " + str(set_volume))
            display_amplitude.configure(text=("Amplitude: %.2f" % amplitude))
            display_frequency.configure(text="Frequency: " +
                                        str(set_frequency))
            top.update()
            top.update_idletasks()

            if (amplitude < desired_amplitude and mode):
                max_amplitude_temp = 0
                display_status.configure(text="Status: Scanning")
                for i in range(set_frequency - 5, set_frequency + 15):
                    Note(i).stop()
                    Note(i, set_volume).play(-1)
                    stream.start_stream()
                    data = stream.read(2048)
                    stream.stop_stream()
                    amplitude = (audioop.maxpp(data, 2) / 65536) * 1.41
                    if (amplitude > max_amplitude_temp):
                        max_amplitude_temp = amplitude
                        set_frequency = i
                    display_frequency.configure(text="Frequency: " + str(i))
                if (amplitude < desired_amplitude):
                    set_volume += 0.02
            elif (mode):
                display_status.configure(text="Status: Ampl. Lock")
            if (key == 'Up'):
                key = ''
                set_volume = set_volume + 0.005
            if (key == 'Down'):
                key = ''
                set_volume = set_volume - 0.005
            if (key == 'Left'):
                key = ''
                set_frequency = set_frequency - 1
            if (key == 'Right'):
                key = ''
                set_frequency = set_frequency + 1
            if (key == 'space'):
                mode = not mode
                desired_amplitude = amplitude
                if (mode):
                    display_status.configure(text="Status: Ampl. Lock")
                # key = ''
                # for frequency in range(set_frequency-20,set_frequency+20,1):
                #     Note(set_frequency).stop()
                #     Note(frequency,set_volume).play(-1)
                #     sleep(0.1)
                #     stream.start_stream()
                #     data=stream.read(2048)
                #     stream.stop_stream()
                #     amplitude = (audioop.maxpp(data,2)/65536)*1.41
                #     w.create_rectangle(int(((frequency-frequency_range[0])/float(frequency_range[1]-frequency_range[0]))*500.0), 50, int(((frequency-frequency_range[0])/float(frequency_range[1]-frequency_range[0]))*500.0)+1, 50-((amplitude/1.41)*50), fill="red")
                #     w.update()
                #     top.title("PyLuminescence Auto-Tun e Fine")
                #     display_percentage.configure(text=str(((frequency-(set_frequency-50))))+"% Fine")
                #     if(amplitude > temp_best_amplitude):
                #         temp_best_amplitude=amplitude
                #         temp_best_frequency=frequency
                #         display_amplitude.configure(text=("Best Amplitude: %.2f" % amplitude))
                #         display_frequency.configure(text=("Best Frequency: "+ str(frequency)))
                # set_frequency = temp_best_frequency
        b.destroy()
        e.destroy()
Example #22
0
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100

p = pyaudio.PyAudio()

# open stream
stream = p.open(format=FORMAT,
                channels=CHANNELS,
                rate=RATE,
                input=True,
                output=True,
                frames_per_buffer=chunk)

# read data
while True:
    data = stream.read(chunk)
    #Return the maximum of the absolute value of all samples in a fragment.
    a = audioop.max(data, 2)
    #Return the maximum peak-peak value in the sound fragment.
    b = audioop.maxpp(data, 2)
    #if a>5000:
    #    print 'a=',a
    if b > 5000:
        print 'Sound Detected'
    time.sleep(0.01)

stream.close()
p.terminate()
Example #23
0
 def test_maxpp(self):
     self.assertEqual(audioop.maxpp(data[0], 1), 0)
     self.assertEqual(audioop.maxpp(data[1], 2), 0)
     self.assertEqual(audioop.maxpp(data[2], 4), 0)
Example #24
0
 def test_maxpp(self):
     self.assertEqual(audioop.maxpp(data[0], 1), 0)
     self.assertEqual(audioop.maxpp(data[1], 2), 0)
     self.assertEqual(audioop.maxpp(data[2], 4), 0)
Example #25
0
def Tune():
    global initial_volume
    global set_frequency
    volume = initial_volume
    display_volume.configure(text="Volume: " + str(volume))
    temp_best_amplitude = 0.0
    temp_best_frequency = 0.0
    display_percentage = Label(top, text="")
    display_percentage.grid(row=1, column=4)
    display_percentage.configure(text="%")
    for frequency in range(21000, 25000, 10):
        Note(frequency, volume).play(-1)
        sleep(0.1)
        stream.start_stream()
        data = stream.read(2048)
        stream.stop_stream()
        amplitude = (audioop.maxpp(data, 2) / 65536) * 1.41
        Note(frequency).stop()
        w.create_rectangle(
            int(((frequency - frequency_range[0]) /
                 float(frequency_range[1] - frequency_range[0])) * 500.0),
            50,
            int(((frequency - frequency_range[0]) /
                 float(frequency_range[1] - frequency_range[0])) * 500.0) + 1,
            50 - ((amplitude / 1.41) * 50),
            fill="blue")
        w.update()
        top.title("PyLuminescence Auto-Tune Coarse")
        #w.delete("all")
        display_percentage.configure(text=str(
            int(((frequency - frequency_range[0]) /
                 float(frequency_range[1] - frequency_range[0])) * 100.0)) +
                                     "% Coarse")
        if (amplitude > temp_best_amplitude):
            temp_best_amplitude = amplitude
            temp_best_frequency = frequency
            display_amplitude.configure(text=("Best Amplitude: %.2f" %
                                              amplitude))
            display_frequency.configure(text=("Best Frequency: " +
                                              str(frequency)))
    set_frequency = temp_best_frequency
    for frequency in range(set_frequency - 50, set_frequency + 50, 1):
        Note(frequency, volume).play(-1)
        sleep(0.1)
        stream.start_stream()
        data = stream.read(2048)
        stream.stop_stream()
        amplitude = (audioop.maxpp(data, 2) / 65536) * 1.41
        Note(frequency).stop()
        w.create_rectangle(
            int(((frequency - frequency_range[0]) /
                 float(frequency_range[1] - frequency_range[0])) * 500.0),
            50,
            int(((frequency - frequency_range[0]) /
                 float(frequency_range[1] - frequency_range[0])) * 500.0) + 1,
            50 - ((amplitude / 1.41) * 50),
            fill="red")
        w.update()
        top.title("PyLuminescence Auto-Tune Fine")
        display_percentage.configure(
            text=str(((frequency - (set_frequency - 50)))) + "% Fine")
        if (amplitude > temp_best_amplitude):
            temp_best_amplitude = amplitude
            temp_best_frequency = frequency
            display_amplitude.configure(text=("Best Amplitude: %.2f" %
                                              amplitude))
            display_frequency.configure(text=("Best Frequency: " +
                                              str(frequency)))
    set_frequency = temp_best_frequency
    display_percentage.destroy()
Example #26
0
def get_chunk(src, bias):
    data = audioop.bias(src.read(10000), 2, bias)
    return data, audioop.maxpp(data, 2)
Example #27
0
def audioPP(mode, fData):
    return min(audioop.maxpp(fData, 2), 100)
# Downloading an audio file from s3 to local system
bucket.download_file('****{wave file URL ob s3}****.wav',
                     r'../path_for_local_system/recording.wav')

with open(r'../../decibel_values.csv', 'a') as f:

    f.write('recording_name', 'sample_width', 'n_frames', 'avg', 'rms',
            'avgpp', 'zero_crossings', 'maxpp', 'min_max')
    f.write(
        str(a, wav.getsampwidth(), wav.getnframes(),
            audioop.avg(string_wav, wav.getsampwidth()),
            audioop.rms(string_wav, wav.getsampwidth()),
            audioop.avgpp(string_wav, wav.getsampwidth()),
            audioop.cross(string_wav, wav.getsampwidth()),
            audioop.maxpp(string_wav, wav.getsampwidth()),
            audioop.minmax(string_wav, wav.getsampwidth())))

wav = wave.open(r'../../recordings.wav')
string_wav = wav.readframes(wav.getnframes())

print('getsampwidth', wav.getsampwidth())

print('get n frmaes', wav.getnframes())
print('avg: ', audioop.avg(string_wav, wav.getsampwidth()))
print('rms: ', audioop.rms(string_wav, wav.getsampwidth()))
print('avgpp: ', audioop.avgpp(string_wav, wav.getsampwidth()))
print('zero_crossings: ', audioop.cross(string_wav, wav.getsampwidth()))
print('maxpp: ', audioop.maxpp(string_wav, wav.getsampwidth()))
print('max min: ', audioop.minmax(string_wav, wav.getsampwidth()))
Example #29
0
def record_consumer(threshold, power_threshold, frequency_threshold,
                    begin_threshold, WAVE_OUTPUT_FILENAME,
                    WAVE_OUTPUT_FILE_EXTENSION, FULL_WAVE_OUTPUT_FILENAME,
                    audio, stream):
    global recordQueue

    files_recorded = 0
    j = 0
    record_wave_file_count = 0
    audioFrames = []

    # Set the proper thresholds for starting recordings
    delay_threshold = 0
    if (begin_threshold < 0):
        delay_threshold = begin_threshold * -1
        begin_threshold = 1000

    totalAudioFrames = []
    try:
        with KeyPoller() as key_poller:
            while (True):
                if (not recordQueue.empty()):
                    audioFrames.append(recordQueue.get())
                    totalAudioFrames.append(audioFrames[-1])
                    if (len(audioFrames) >= SLIDING_WINDOW_AMOUNT):
                        j += 1
                        audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:]

                        intensity = [
                            audioop.maxpp(audioFrames[0], 4) / 32767,
                            audioop.maxpp(audioFrames[1], 4) / 32767
                        ]
                        highestintensity = np.amax(intensity)

                        byteString = b''.join(audioFrames)
                        fftData = np.frombuffer(byteString, dtype=np.int16)
                        frequency = get_loudest_freq(fftData, RECORD_SECONDS)
                        power = get_recording_power(fftData, RECORD_SECONDS)

                        fileid = "%0.2f" % ((j) * RECORD_SECONDS)

                        if (record_controls(key_poller, recordQueue) == False):
                            stream.stop_stream()
                            break

                        if (frequency > frequency_threshold
                                and highestintensity > threshold
                                and power > power_threshold):
                            record_wave_file_count += 1
                            if (record_wave_file_count <= begin_threshold and
                                    record_wave_file_count > delay_threshold):
                                files_recorded += 1
                                print(
                                    "Files recorded: %0d - Power: %0d - Freq: %0d - Saving %s"
                                    %
                                    (files_recorded, power, frequency, fileid))
                                waveFile = wave.open(
                                    WAVE_OUTPUT_FILENAME + fileid +
                                    WAVE_OUTPUT_FILE_EXTENSION, 'wb')
                                waveFile.setnchannels(CHANNELS)
                                waveFile.setsampwidth(
                                    audio.get_sample_size(FORMAT))
                                waveFile.setframerate(RATE)
                                waveFile.writeframes(byteString)
                                waveFile.close()
                            else:
                                print(
                                    "Files recorded: %0d - Power: %0d - Freq: %0d"
                                    % (files_recorded, power, frequency))
                        else:
                            record_wave_file_count = 0
                            print(
                                "Files recorded: %0d - Power: %0d - Freq: %0d"
                                % (files_recorded, power, frequency))

                        # Persist the total wave only once every six frames
                        if (len(totalAudioFrames) % 6):
                            byteString = b''.join(totalAudioFrames)
                            waveFile = wave.open(FULL_WAVE_OUTPUT_FILENAME,
                                                 'wb')
                            waveFile.setnchannels(CHANNELS)
                            waveFile.setsampwidth(
                                audio.get_sample_size(FORMAT))
                            waveFile.setframerate(RATE)
                            waveFile.writeframes(byteString)
                            waveFile.close()

    except Exception as e:
        print("----------- ERROR DURING RECORDING -------------- ")
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb)
        stream.stop_stream()
                    frames_per_buffer=chunk)
pre_init(96000, -16, 1, 1024)
pygame.init()
best_frequency = 0
amplitude = 0




#msgbox(msg="First, I need to tune.", title="", ok_button="Begin!")

for frequency in range(22700,23500,5):
    data=stream.read(6048)
    Note(frequency).stop()
    Note(frequency).play(-1)
    sleep(0.05)
    print(audioop.maxpp(data,2))
    if(audioop.maxpp(data,2) > amplitude):
        amplitude = audioop.maxpp(data,2)
        best_frequency = frequency
frequency = best_frequency
Note(frequency,0.8).play(-1)
print(frequency)
msgbox(msg="Now, inject a bubble.", title="", ok_button="Done!")
volume = 0.1
while(True):
    Note(frequency,volume).play(-1)
    volume = volume+0.05
    sleep(0.1)
    msgbox('Lit')
Example #31
0
def get_chunk(src, bias):
    data = audioop.bias(src.read(10000), 2, bias)
    return data, audioop.maxpp(data, 2)
Example #32
0
def testmaxpp(data):
	if audioop.maxpp(data[0], 1) <> 0 or \
		  audioop.maxpp(data[1], 2) <> 0 or \
		  audioop.maxpp(data[2], 4) <> 0:
		return 0
	return 1
Example #33
0
            l[i], data[i] = mic[i].read()

        # greedy algorithm which selects most loud sound
        #rms = [aud.rms(data[i],1) for i in range(micnum)]
        #rmssorted = sorted(rms)
        '''
        L = 100
        mxix = [aud.findmax(data[i],L) for i in range(micnum)]
        prms = [aud.rms(data[i][mxix[i]*2:(mxix[i]+L)*2],1) for i in range(micnum)]
        prmssorted = sorted(prms)
        print prmssorted
        '''
        direction = 'UNDEFINED'
        ind = 0
        #if prmssorted[2] - prmssorted[0] > 20:
        maxpp = [aud.maxpp(data[i],1) for i in range(micnum)]
        print maxpp
        maxppsorted = sorted(maxpp)
        if maxppsorted[2] - maxppsorted[1] > 20:
            # possible value of dirscore: 4,5,6,8,9,10
            #dirscore = 4 * prms.index(prmssorted[2]) + 2 * prms.index(prmssorted[1]) +  prms.index(prmssorted[0])
            dirscore = 4 * maxpp.index(maxppsorted[2]) + 2 * maxpp.index(maxppsorted[1]) +  maxpp.index(maxppsorted[0])
            print dirscore
            dictionary = {'0':'UNDEFINED',
                          '1':'UNDEFINED',
                          '2':'UNDEFINED',
                          '3':'UNDEFINED',
                          '4':'LEFT',
                          '5':'BACKLEFT',
                          '6':'FRONTLEFT',
                          '7':'UNDEFINED',
Example #34
0
    def _callback( self, data, frame_count, time_info, status):
        # Если микрофон замьючен - ничего не делаем
        if self.muted :
            self.buffer.clear()
            self.ignoreFirstFrame = True
            return None, pyaudio.paContinue
        # А еще игнорируем первый фрейм после unmute:
        if self.ignoreFirstFrame :
            self.ignoreFirstFrame = False
            return None, pyaudio.paContinue

        # Контролируем размер буфера. В режиме ожидания 1с, в активном режиме 3с
        maxBufferSize = int(CHUNKS_PER_SECOND * (3 if self.active else 1) )

        while len(self.buffer)>maxBufferSize : 
            self.buffer.pop(0)

        data = numpy.fromstring( data, dtype='int16')

        if self.sampleRate != VOICE_SAMPLING_RATE:
            data, self.__ratecvState = audioop.ratecv( data.tobytes(), 2, self.channels, self.sampleRate, VOICE_SAMPLING_RATE, self.__ratecvState )
            data = numpy.fromstring( data, dtype='int16')

        #print(f"channels:")

        # Раскидываем на каналы
        channels = [0]*self.channels
        for ch in range(self.channels):
            channels[ch] = data[ch::self.channels].tobytes()
            #print(numpy.fromstring(channels[ch], dtype='int16'))


        # "Оптимальный уровень громкости"
        if config.micSelection == "rms":

            # Вариант 2: "наибольший RMS, но без искажений".
            chBest = -1
            rmsBest = 0
            maxBest = 0
            chGood = -1
            rmsGood = 100000
            maxGood = 0

            for ch in config.microphones:
                __rms = audioop.rms( channels[ch], self.sampleSize )
                __maxpp = audioop.maxpp( channels[ch], self.sampleSize )

                if (__rms>rmsBest) and (__rms<5000) and (__maxpp<64000) :
                    rmsBest = __rms
                    maxBest = __maxpp
                    chBest = ch
                if (chGood<0) or (__rms < rmsGood) :
                    rmsGood = __rms
                    rmsBest = __maxpp
                    chGood = ch

            #print(f'rms:[{__rms[0]},{__rms[1]}], maxpp:[{__maxpp[0]},{__maxpp[1]}], rmsBest={rmsBest}({chBest}), rmsGood={rmsGood}({chGood})')
            #print(f'rmsBest={rmsBest}({chBest}), rmsGood={rmsGood}({chGood})')
            if chBest>=0:
                self.channel = chBest
                self.__rms = rmsBest
                self.__maxpp = maxBest
            else:
                self.channel = chGood
                self.__rms = rmsGood
                self.__maxpp = maxGood

            data = channels[self.channel]

        # "Среднее по микрофонным каналам":
        else :
            self.channel = "avg"
            factor = 1.0 / len(config.microphones)
            #print(f'factor={factor} ')
            data = None
            for ch in config.microphones :
                if data==None :
                    data = audioop.mul( channels[ch], 2, factor )
                else :
                    data = audioop.add( data, audioop.mul( channels[ch], 2, factor ), 2 )

            self.__rms = audioop.rms( data, self.sampleSize )
            self.__maxpp = audioop.maxpp( data, self.sampleSize )

        #print(f"Final data: channel={self.channel}, rms={self.rms}, maxpp={self.maxpp} ")
        #print(numpy.fromstring(data, dtype='int16'))

        # Сохранить фрагмент в буфере:
        self.buffer.append( data )

        if not self.active:
            # Если уровень звука "немного меньше" фонового шума - снизить значение порогового шума
            if self.__rms + config.noiseThreshold < self.__noiseLevel:
                self.__noiseLevel = self.__rms + int( config.noiseThreshold / 4 )

            # Посчитать VAD index (self.vadLevel)
            vadFrameSize = int(VOICE_SAMPLING_RATE*self.sampleSize/1000 * VAD_FRAME)
            p = 0
            voiceFrames = 0
            totalFrames = 0
            while p+vadFrameSize <= len(data):
                totalFrames += 1
                if self.vad.is_speech( data[p:p+vadFrameSize], VOICE_SAMPLING_RATE ): voiceFrames += 1
                p += vadFrameSize

            self.vadLevel = int(voiceFrames*100/totalFrames)

            isVoice = (totalFrames>0) and (self.vadLevel>=config.vadConfidence)

            if isVoice and (self.rms > self.triggerLevel):
                self.active = True
        else:
            if self.__rms + config.noiseThreshold > self.__noiseLevel :
                self.__noiseLevel = self.__rms

        return None, pyaudio.paContinue