def callback(in_data, frame_count, time_info, status): in_data = wf.readframes(frame_count) frames = list(quietnet.chunks(quietnet.unpack(in_data), chunk)) for frame in frames: if not in_frames.full(): in_frames.put(frame, False) return (in_data, pyaudio.paContinue)
frame_length = options.frame_length chunk = options.chunk search_freq = options.freq rate = options.rate sigil = [int(x) for x in options.sigil] frames_per_buffer = chunk * 10 in_length = 4000 in_frames = Queue.Queue(in_length) points = Queue.Queue(in_length) bits = Queue.Queue(in_length / frame_length) bottom_threshold = 8000 rate, in_data = wav.read('Amogh.wav') print(in_data) #This is the Flow of Data needed to be Process points-->Process Frames-->Process bits-->Text Output #In_data is the Raw data which is taken from Wav frames = list(quietnet.chunks(quietnet.unpack(in_data), chunk)) #This is the outermost for loop ( Frame wise Division) for frame in frames: # Fill all the incomming data to the frame till it is filled completly if not in_frames.full(): in_frames.put(frame, False) #Once the frame is full with required data if in_frames.full(): frame = in_frames.get(False) fft = quietnet.fft(frame) point = quietnet.has_freq(fft, search_freq, rate, chunk) points.put(point) cur_points = [] while len(cur_points) < frame_length:
def callback(in_data, frame_count, time_info, status): frames = list(quietnet.chunks(quietnet.unpack(in_data), chunk)) for frame in frames: if not in_frames.full(): in_frames.put(frame, False) return (in_data, pyaudio.paContinue)