示例#1
0
 def test_findfit(self):
     self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
     self.assertEqual(audioop.findfit(bytearray(datas[2]),
                                      bytearray(datas[2])), (0, 1.0))
     self.assertEqual(audioop.findfit(memoryview(datas[2]),
                                      memoryview(datas[2])), (0, 1.0))
     self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
                      (1, 8038.8))
     self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
                      (30, 1.0))
示例#2
0
def echocancel(outputdata, inputdata):
    pos = audioop.findmax(outputdata, 800)  # one tenth second
    out_test = outputdata[pos * 2:]
    in_test = inputdata[pos * 2:]
    ipos, factor = audioop.findfit(in_test, out_test)
    prefill = '\0' * (pos + ipos) * 2
    postfill = '\0' * (len(inputdata) - len(prefill) - len(outputdata))
    outputdata = prefill + audioop.mul(outputdata, 2 - factor) + postfill
    return audioop.add(inputdata, outputdata, 2)
def echocancel(outputdata, inputdata):
    pos = audioop.findmax(outputdata, 800)   # one tenth second
    out_test = outputdata[pos*2:]
    in_test = inputdata[pos*2:]
    ipos, factor = audioop.findfit(in_test,out_test)
    prefill = '\0'*(pos+ipos)*2
    postfill = '\0'*(len(inputdata)-len(prefill)-len(outputdata))
    outputdata = prefill + audioop.mul(outputdata,2-factor) + postfill
    return audioop.add(inputdata, outputdata,2)
示例#4
0
 def echocancel(outputdata, inputdata):
     """Try to identify an echo and remove it.
     Should contain 2-byte samples"""
     pos = audioop.findmax(outputdata, 800)
     out_test = outputdata[pos*2:]
     in_test = inputdata[pos*2:]
     ipos, factor = audioop.findfit(in_test, out_test)
     factor = audioop.findfactor(in_test[ipos*2:ipos*2+len(out_test)], out_test)
     prefill = '\0'*(pos+ipos)*2
     postfill = '\0'*(len(inputdata) - len(prefill) - len(outputdata))
     outputdata = prefill + audioop.mul(outputdata, 2, -factor) + postfill
     return audioop.add(inputdata, outputdata, 2)
示例#5
0
def signal_analysis(bpm):
    stream = pa.open(format=format,
            channels=channels,
            rate=rate,
            input=True,
            frames_per_buffer=input_frames_per_block)       
    block = stream.read(input_frames_per_block)
    offset,factor = audioop.findfit(block, sin(bpm))
    #print("factor is ", factor)
    z=offset*bpm/(4*rate*60)
    phase = z - math.floor(z)
    print(offset,phase)
    return phase
示例#6
0
def findfit(scratch_frag, final_frag, sound_file):
  '''Calculates the offset (in seconds) between scratch_frag & final_frag.
     Both fragments are assumed to contain the same, loud "clapper" event.
     The SoundFile object is used for common stream parameters.'''
  import audioop
  nchannels = sound_file.stream.getnchannels()
  framerate = sound_file.stream.getframerate()
  width = sound_file.stream.getsampwidth()
  assert(width==2)

  # Simplify the sound streams to make it quicker to find a match.
  # Left channel only.
  if nchannels > 1:
    scratch_frag_ = audioop.tomono(scratch_frag, width, 1, 0)
    final_frag_   = audioop.tomono(final_frag,   width, 1, 0)
  else:
    scratch_frag_ = scratch_frag
    final_frag_   = final_frag
  nchannels_ = 1

  # Downsample to 8000/sec
  framerate_ = 8000
  scratch_frag_,state =\
      audioop.ratecv(scratch_frag_, width, nchannels_, framerate, framerate_, None)
  final_frag_,state =\
      audioop.ratecv(final_frag_,   width, nchannels_, framerate, framerate_, None)
  bytes_per_second_ = nchannels_ * framerate_ * width

  # Find the clapper in final
  length_samples = int(0.001 * framerate * nchannels_) # 0.1 sec
  final_off_samples = audioop.findmax(final_frag_, length_samples)

  # Search for a 2 second 'needle' centred on where we found the 'clapper'
  needle_bytes = 2 * bytes_per_second_
  b0 = max(0, final_off_samples * width - int(needle_bytes/2))
  print '"clapper" at final:', 1.0*b0/bytes_per_second_, 'sec'
  b1 = b0 + needle_bytes
  final_clapper_frag = final_frag_[b0:b1]
  scratch_off_samples,factor = audioop.findfit(scratch_frag_, final_clapper_frag)
  scratch_off_bytes = scratch_off_samples * width
  print 'match at scratch:', 1.0*scratch_off_bytes/bytes_per_second_, 'sec', " factor =",factor

  # Calculate the offset (shift) between the two fragments.
  shift_sec = (scratch_off_bytes - b0) * 1.0 / bytes_per_second_
  print 'shift =', shift_sec, 'seconds'
  return shift_sec
示例#7
0
 def test_findfit(self):
     self.assertEqual(audioop.findfit(data[1], data[1]), (0, 1.0))
示例#8
0
 def test_findfit(self):
     self.assertEqual(audioop.findfit(data[1], data[1]), (0, 1.0))
 def test_findfit(self):
     self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
     self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
                      (1, 8038.8))
     self.assertEqual(
         audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]), (30, 1.0))
示例#10
0
def testfindfit(data):
	if audioop.findfit(data[1], data[1]) <> (0, 1.0):
		return 0
	return 1
示例#11
0
def testfindfit(data):
    if verbose:
        print 'findfit'
    if audioop.findfit(data[1], data[1]) <> (0, 1.0):
        return 0
    return 1
示例#12
0
def testfindfit(data):
    if verbose:
        print 'findfit'
    if audioop.findfit(data[1], data[1]) != (0, 1.0):
        return 0
    return 1
示例#13
0
 def test_findfit(self):
     self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
     self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)), (1, 8038.8))
     self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]), (30, 1.0))
示例#14
0
文件: test.py 项目: Pehat/sound
def decompose(swave,
              framerate,
              instruments=[tri_string, square_string, sin_string, saw_string]):
    matches = {}
    sample_width = 2
    total_samples = len(swave) / sample_width
    block_start = 0
    block_end = min(total_samples, block_start + BLOCK_SIZE)

    next_block = None

    while block_end - block_start == BLOCK_SIZE:
        if next_block is None:
            lblock = swave[block_start * sample_width:block_end * sample_width]
        else:
            lblock = next_block

        if DEBUG_MODE:
            debug_show_and_play_sample(lblock)

        noisy_block = False
        for j in xrange(PEAKS_IN_BLOCK):
            if next_block is None:
                peaks, phases = get_peaks_and_phases(lblock)
            else:
                peaks = next_peaks
                phases = next_phases

            if DEBUG_MODE:
                debug_display_peaks(peaks)

            peak = max(peaks)
            #TODO: normal level detection
            if peak < 1:
                print "too quiet."
                noisy_block = False
                continue
            peak_index = peaks.index(peak)
            phase = phases[peak_index]
            peaks[peak_index] = 0

            next_block = swave[block_end *
                               sample_width:(2 * block_end - block_start) *
                               sample_width]
            next_peaks, next_phases = get_peaks_and_phases(next_block)
            next_phase = next_phases[peak_index]
            phase_delta = next_phase - phase

            freq_res = float(framerate) / FFT_BLOCK_SIZE
            base_freq = (peak_index + phase_delta / (2 * math.pi)) * freq_res

            peak = 1000

            periods = int(BLOCK_SIZE * base_freq / framerate)

            wavelength = int(framerate / base_freq)
            window_left = max(0, block_start - wavelength / 2)
            window_right = min(total_samples, block_end + wavelength / 2)
            window = swave[window_left * sample_width:window_right *
                           sample_width]

            dc_offset = sum(str_to_list(window)) * sample_width / len(window)
            window = audioop.bias(window, sample_width, -dc_offset)

            best_offset = None
            best_factor = None
            best_periods = 0
            best_freq = base_freq
            best_rms = audioop.rms(window, sample_width)
            best_instrument = 0

            for p in xrange(1, periods):
                for inst, wavegen in enumerate(instruments):

                    freq = base_freq
                    pattern = wavegen(framerate, freq, peak, p)

                    offset, factor = audioop.findfit(window, pattern)
                    #window_pattern = window[offset * sample_width:offset * sample_width + len(pattern)]
                    window_pattern = window
                    pattern = complete_silence(pattern, offset,
                                               len(window) / sample_width)

                    fitted_pattern = audioop.mul(pattern, sample_width,
                                                 -factor)
                    applied_pattern = audioop.add(window_pattern,
                                                  fitted_pattern, sample_width)
                    dc_offset = sum(str_to_list(
                        applied_pattern)) * sample_width / len(applied_pattern)
                    applied_pattern = audioop.bias(applied_pattern,
                                                   sample_width, -dc_offset)

                    rms = audioop.rms(applied_pattern, sample_width)

                    # if DEBUG_MODE and p > 3:
                    # print "debug: ", p, freq, rms, best_rms, offset
                    # matplotlib.pyplot.plot(str_to_list(window_pattern), 'b')
                    # matplotlib.pyplot.plot(str_to_list(fitted_pattern), 'r')
                    # matplotlib.pyplot.plot(str_to_list(applied_pattern), 'g')

                    # matplotlib.pyplot.show()
                    # matplotlib.pyplot.close()

                    if ((best_rms > 0) and
                        (rms < best_rms * 1.02)) or (best_rms == rms == 0):
                        best_rms = rms
                        best_periods = p
                        best_factor = factor
                        best_offset = offset + window_left
                        best_instrument = inst
                        best_freq = freq

            print "found: ", best_periods, best_freq, best_rms
            if not best_freq in matches:
                matches[best_freq] = []

            if best_periods < 3:

                #block_start = max(best_offset, block_start + 1)
                #block_end = min(total_samples, block_start + BLOCK_SIZE)
                block_start = block_end
                block_end = min(total_samples, block_start + BLOCK_SIZE)
                noisy_block = True

                print "too short period"
                break

            if not best_factor:
                print "no waveforms found."
                continue
            amp = best_factor * peak
            wavegen = instruments[best_instrument]
            print "%5.2f Hz at level %5.2f for %4i periods" % (best_freq, amp,
                                                               best_periods)
            matches[best_freq].append(
                (best_offset, amp, best_periods, best_instrument))
            pattern = wavegen(framerate, best_freq, -int(amp), best_periods)
            complement = complete_silence(pattern, best_offset, total_samples)

            if DEBUG_MODE:
                waveout = wave.open("tmp.wav", "wb")
                waveout.setparams(
                    (2, 2, 44100, 531788, 'NONE', 'not compressed'))
                outdata = join_stereo(pattern, pattern)
                waveout.writeframes(outdata)
                waveout.close()
                subprocess.Popen(
                    r"C:\Program Files (x86)\Winamp\winamp.exe tmp.wav")

                matplotlib.pyplot.plot(
                    str_to_list(swave[block_start * sample_width:block_end *
                                      sample_width]))
                matplotlib.pyplot.plot(
                    str_to_list(
                        complement[block_start * sample_width:block_end *
                                   sample_width]), 'r')
                matplotlib.pyplot.show(block=True)
                matplotlib.pyplot.close()

            swave = audioop.add(swave, complement, sample_width)

        if not noisy_block:
            block_start = block_end
            block_end = min(total_samples, block_start + BLOCK_SIZE)
        print block_start, block_end
        print "block processed."

    result = audioop.mul(swave, sample_width, 0)
    for best_freq, notes in matches.iteritems():
        for note in notes:
            offset, amp, periods, best_instrument = note
            wavegen = instruments[best_instrument]
            pattern = wavegen(framerate, best_freq, amp, periods)
            prepared_sample = complete_silence(pattern, offset,
                                               len(result) / 2)
            result = audioop.add(result, prepared_sample, sample_width)
    return result
示例#15
0
  def run(self):
    #print "Started input"
    while True:
      #print "loop"
      if not self.queue.empty():
        message = self.queue.get()
        if message == "writedata":
          print "Writing Data"
          print "Acquire Lock"
          self.lock.acquire()
          wfo = wave.open(self.filepath_player, 'wb')
          wfo.setparams(self.params)

          print "Writing "+str(len(self.workdata))+" BYTES"
          wfo.writeframes(self.workdata)
          wfo.writeframes('')

          print "Releasing Lock"
          wfo.close()
          self.lock.release()
          self.queue.task_done()
      
      # Main writing loop
      else:
        # Get New Mic Info
        try:
          mic_buff = self.audio_input.read(INPUT_CHUNK/2)
          #print str(len(mic_buff))
          (mic_sec, self.state) = audioop.ratecv(mic_buff,BYTES,CHANNELS,44100,8000,self.state)
          
          sys.stdout.write(str(len(mic_buff)))
	  #print "To: "+str(len(micdata))
          sys.stdout.write(' :To: ' + str(len(mic_sec))+'\n')
          
          #for x in xrange(0,len(mic_sec),800):
 	  micdata = mic_sec[0:800]
	  sys.stdout.write(' :To: ' + str(len(micdata))+'\n')
          sys.stdout.flush()
        except:
          print "***** Ignored oveflow *****" 
          pass

        in_pos  = self.OFFSET*BYTES
        out_pos = in_pos+(self.SEGMENT*RATE*BYTES)
        if out_pos > self.WAV_LENGTH:
          out_pos = self.WAV_LENGTH

        seg_wavdata = self.wavdata[in_pos:out_pos]
        seg_workdata = self.workdata[in_pos:out_pos]

        point, factorM = audioop.findfit(seg_wavdata,micdata)
        seg_in_pos = point*2
        seg_out_pos = seg_in_pos+len(micdata)
        offst, factorW = audioop.findfit(seg_wavdata[seg_in_pos:seg_out_pos],seg_workdata[seg_in_pos:seg_out_pos])

        #print "Searched ("+str(in_pos)+", "+str(out_pos)+")" 
        #print "len "+str(len(self.wavdata))
        #print "format "+str(BYTES)

        if factorM > 0:
          if abs(1-factorM) < abs(1-factorW):
            print "Matched @ " + str((in_pos+seg_in_pos)) + ", with factor:" + str(factorM)

            #micpart = audioop.mul(micdata,2,(factorM*0.9))
            #workpart = audioop.mul(seg_workdata[seg_in_pos:seg_out_pos],2,0.1)
          #else:
            #print "WeakMat @ " + str((in_pos+seg_in_pos)) + ", with factor:" + str(factorM)

            #micpart = audioop.mul(micdata,2,(factorM*0.1))
            #workpart = audioop.mul(seg_workdata[seg_in_pos:seg_out_pos],2,0.9)

            msg = []
            msg.append(self.workdata[0:in_pos])
            msg.append(seg_workdata[0:seg_in_pos])
            msg.append(micdata)
            #msg.append(audioop.add(micpart,workpart,BYTES))
            msg.append(seg_workdata[seg_out_pos:len(seg_workdata)])
            msg.append(self.workdata[out_pos:self.WAV_LENGTH])

            self.workdata = b''.join(msg)
          #print "new work len "+str(len(self.workdata))

        #Increment Chunk
        self.TIMING += 1
        if self.TIMING > 100:
          # Then move offset
          self.OFFSET = self.OFFSET+(self.SEGMENT*RATE)
          if out_pos == self.WAV_LENGTH:
            self.OFFSET = 0
          # And Reset TIMING
          self.TIMING =0