def laplacian_edges(image): l1_img = apply(image, k_laplacian1) l2_img = apply(image, k_laplacian2) f, a = plt.subplots(1, 2) a[0].imshow(l1_img, cmap='gray') a[0].axis('off') a[1].imshow(l2_img, cmap='gray') a[1].axis('off') plt.show() return l1_img, l2_img
def GetFrameStats(data, inRunningSum, inRunningVoiceSum, inRunningCount): Ndata = len(data) voicefilter = filters.mult(filters.hpf(Ndata, 80), filters.lpf(Ndata, 1000)) (voicedata, voicefreq) = filters.apply(data, voicefilter) maxVal = max([abs(d) for d in data]) outRunningCount = inRunningCount + Ndata outRunningSum = inRunningSum + sum([abs(d) for d in data]) outRunningVoiceSum = inRunningVoiceSum + sum([abs(d) for d in voicedata]) outRunningAvg = outRunningSum / outRunningCount outRunningVoiceAvg = outRunningVoiceSum / outRunningCount voiceMax = max([abs(d) for d in voicedata]) return (maxVal, voicedata, voiceMax, outRunningCount, outRunningSum, outRunningAvg, outRunningVoiceSum, outRunningVoiceAvg)
def perframe( channels, lambdaOp=None, filt=None, numFrames=1 ): assert( numFrames > 0 ) if numFrames > 1: queue = deque() while True: samples = dict( ( ( channel, [] ) for channel in channels ) ) parsed = read_one_frame() if not parsed: break ( header, frameSamples ) = parsed for channel, data in frameSamples.items(): if channel not in channels: continue samples[channel].extend( data ) if lambdaOp: samples = dict( ( ( c, lambdaOp( samples[c] ) ) for c in channels ) ) if filt: samples = dict(( ( c, filters.apply( samples[c], filt ) ) for c in channels )) if numFrames == 1: yield ( header, samples ) else: queue.append( samples ) if len( queue ) > numFrames: queue.popleft() header.num_samples *= len( queue ) # generate merged dictionary samples = dict( ( ( channel, [] ) for channel in channels ) ) for queueItem in queue: for channel, data in queueItem.items(): samples[channel].extend( data ) yield ( header, samples )
def h_play(self, key): """Plays currently selected track Also applies filters, if any are selected. """ app = self.parentApp if not app.current_track: app.notify('No track selected') return for filename in app.current_track_nos: track_no = app._filenames_list.index(filename) + 1 try: self.get_widget('track-list').values.remove(track_no) except ValueError: pass self.get_widget('track-list').value = [] app.notify('Applying filters...') track = filters.apply(app.current_track, self.parentApp.filters) track = track[:app._track_length] self.get_widget('position').entry_widget.out_of = len(track) / 1000 self.get_widget('position').display() audio.play(track, notifier=self.update_slider) app.notify('Playing!') self.set_status('Playing')
def preprocessing(image): p_image = apply(image, k_gaussian) plt.imshow(p_image, cmap='gray') plt.show() return p_image