class SDRThread(QThread): signal = pyqtSignal(object) def __init__(self, sample_rate=2.4e6, center_freq=100.0e6, freq_correction=60, gain=33.8, chunks=1024): QThread.__init__(self) # configure device try: self.sdr = RtlSdr() except LibUSBError: print("No Hardware Detected") self.isRunning = False else: self.sdr.sample_rate = sample_rate # Hz self.sdr.center_freq = center_freq # Hz self.sdr.freq_correction = freq_correction # PPM self.sdr.gain = gain # dB self.isRunning = True self.CHUNK = chunks def __del__(self): if self.isRunning: self.wait() def stop_thread(self): self.isRunning = False self.sdr.cancel_read_async() self.sdr.close() def sdr_tune(self, cf): self.sdr.center_freq = cf # Hz def sdr_gain(self, gain=33.8): self.sdr.gain = gain def run(self): if self.isRunning: self.sdr.read_samples_async(self.sdr_async_callback, self.CHUNK, None) def sdr_async_callback(self, iq, ctx): power, _ = mlab.psd(iq, NFFT=self.CHUNK, Fs=self.sdr.sample_rate, scale_by_freq=False) self.signal.emit(np.sqrt(power))
class FMRadio(QtGui.QMainWindow,Ui_MainWindow): sample_buffer = Queue.Queue(maxsize=10) base_spectrum = np.ones(5780) plotOverall = True plotChannel = False plotPlaying = False plotWaveform = False useStereo = False stereoWidth = 10 useMedianFilt = True useLPFilt = True demodFiltSize = 100000 useAudioFilter = True audioFilterSize = 16 toDraw = True demodMain = True demodSub1 = False demodSub2 = False toDrawWaterfalls = True toDrawPlots = False prevCutoff = 0 prevSpec1 = np.zeros(128) prevSpec2 = np.zeros(128) prevSpec3 = np.zeros(128) prevConvo1 = np.zeros(128) prevConvo2 = np.zeros(128,dtype='complex') limiterMax = np.zeros(32) toPlot = (np.cumsum(np.ones(5780)),np.cumsum(np.ones(5780))) def __init__(self,freq,N_samples): self.spectrogram = np.zeros((512,400)) self.chspectrogram = np.zeros((512,400)) self.plspectrogram = np.zeros((256,400)) self.cur_spectrogram = self.spectrogram QtGui.QMainWindow.__init__(self) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.createQtConnections() ftxt = "%.1f MHz" % (freq/1e6) self.ui.curFreq.setText(ftxt) self.sample_rate = 2.4e5 # tried 1.024e6 - not so great self.decim_r1 = 1 # 1.024e6/2.4e5 # for wideband fm down from sample_rate self.decim_r2 = 2.4e5/48000 # for baseband recovery self.center_freq = freq #+250e3 self.gain = 16 self.N_samples = N_samples self.is_sampling = False self.sdr = RtlSdr() #self.sdr.direct_sampling = 1 self.sdr.sample_rate = self.sample_rate self.sdr.center_freq = self.center_freq self.sdr.gain = self.gain self.pa = pyaudio.PyAudio() self.stream = self.pa.open( format = pyaudio.paFloat32, channels = 2, rate = 48000, output = True) self.PLL = PhaseLockedLoop(self.N_samples,19000,self.sample_rate) self.initOpenCV() self.noisefilt = np.ones(6554) b,a = signal.butter(1, 2122/48000*2*np.pi, btype='low') self.demph_zf = signal.lfilter_zi(b, a) adj = 0 hamming = np.kaiser(self.N_samples/4 + adj,1) lpf = np.append( np.zeros(self.N_samples*3/8),hamming) self.lpf = np.fft.fftshift(np.append(lpf,np.zeros(self.N_samples*3/8))) #,int(-.25*self.N_samples)) hamming = 10*signal.hamming(self.N_samples/16) lpf = np.append(np.zeros(self.N_samples*15/32),hamming) self.lpf_s1 = (np.append(lpf,np.zeros(int(self.N_samples*15/32)))) #self.lpf_s1 = np.roll(temp,int(.5*self.N_samples*67/120)) #self.lpf_s1 += np.roll(temp,int(-.5*self.N_samples*67/120)) self.lpf_s1 = np.fft.fftshift(self.lpf_s1) #self.lpf_s1 += np.fft.fftshift(self.lpf_s1) # fig = plt.figure() # ax = fig.add_subplot(111) # ax.plot(range(self.lpf_s1.size),self.lpf_s1) # fig.show() hamming = 10*signal.hamming(self.N_samples/32) lpf = np.append(np.zeros(self.N_samples*31/64),hamming) self.lpf_s2 = (np.append(lpf,np.zeros(int(self.N_samples*31/64)))) #self.lpf_s2 = np.roll(temp,int(.5*self.N_samples*92/120)) #self.lpf_s2 += np.roll(temp,int(-.5*self.N_samples*92/120)) self.lpf_s2 = np.fft.fftshift(self.lpf_s2) # Not currently used def getSamples(self): return self.sdr.read_samples(self.N_samples); def getSamplesAsync(self): #Asynchronous call. Initiates a continuous loop with the callback fn self.is_sampling = True samples = self.sdr.read_samples_async(self.sampleCallback,self.N_samples,context=self) def sampleCallback(self,samples,sself): self.is_sampling = False self.sample_buffer.put(samples) #print 'put some samples in the jar' # recursive loop #sself.getSamplesAsync() def demodulate_th(self): # Initiates a loop to process all the incoming blocks of samples from the Queue # This should be run in its own thread, or the program will become unresponsive while(1): try: samples = self.sample_buffer.get() #samples2 = self.sample_buffer.get() except: #print "wtf idk no samples?" # even though this can't happen... (although I'm not sure why not) #print 'gonna try to finish off the to-do list' #self.sample_buffer.join() break out1 = self.demodulate(samples) self.sample_buffer.task_done() #out2 = self.demodulate(samples2) #self.sample_buffer.task_done() audio_out = out1 #np.append(out1,out2) self.play(audio_out) def gen_spectrogram(self,x,m,prevSpec): itsreal = np.isreal(x[0]) m = int(m) lx = x.size nt = (lx) // m #NT = (lx +m -1)//m #padsize = NT*m -lx cutsize = lx -nt*m padsize = int(cutsize + m/2) if not prevSpec.size == padsize: prevSpec = np.zeros(padsize) xp = np.append(x[cutsize:],prevSpec) prevSpec = x[:(padsize)] xh = np.zeros((m,nt*2), dtype='complex') for n in range(int(nt*2)): block = xp[m*n//2:m*(n+2)//2] xh[:,n] = block*np.hanning(block.size) #if self.prevSpec.size == padsize: # xb = np.append(self.prevSpec[:prevSpec.size-m/2],x) # xc = np.append(self.prevSpec,x[:lx-m/2]) # self.prevSpec = x[lx-m/2:] #else: # xb = np.append(x,np.zeros(-lx+nt*m)) # xc = np.append(x[m/2:],np.zeros(nt*m - lx + m/2)) #xr = np.reshape(xb, (m,nt), order='F') * np.outer(np.hanning(m),np.ones(nt)) #xs = np.reshape(xc, (m,nt), order='F') * np.outer(np.hanning(m),np.ones(nt)) #xm = np.zeros((m,2*nt),dtype='complex') #xm[:,::2] = xr #xm[:,1::2] = xs if itsreal: spec = np.fft.fft(xh,m,axis=0) spec = spec[:m//2,:] else: spec = np.fft.fftshift(np.fft.fft(xh,m,axis=0)) #mx = np.max(spec) pwr = np.log(np.abs(spec) + 1e-6) return (np.real(pwr),prevSpec) def initOpenCV(self): cv2.namedWindow("Spectrogram") def demodulate(self,samples): # DEMODULATION CODE - And the core function # samples must be passed in by the caller self.count += 1 #spectral_window = signal.hanning #spectrum = np.fft.fftshift(np.fft.fft(samples*spectral_window(samples.size))) self.spectrogram = np.roll(self.spectrogram, 16,axis=1) stft,self.prevSpec1 = self.gen_spectrogram(samples,samples.size//8,self.prevSpec1) self.spectrogram[:,:16] = stft[::8,:] # np.log(np.abs(spectrum[::100])) if(self.plotOverall): # and self.count % 10 == 9): #self.drawSpectrum() self.cur_spectrogram = self.spectrogram self.drawCurSpectrum() # cutoff = self.demodFiltSize # h = signal.firwin(128, cutoff,nyq=self.sample_rate/2) # lp = signal.fftconvolve(samples[::self.decim_r1],h,mode='full') # lps = lp.size # hs = h.size # prev = lp[lps-hs:] # lp[:hs/2] += self.prevConvo2[hs/2:] # lp = np.append(self.prevConvo2[:hs/2],lp) # self.prevConvo2 = prev # lp_samples = lp[:lps-hs+1] lp_samples = samples power = np.abs(self.mad(lp_samples)) self.ui.signalMeter.setValue(20*(np.log10(power))) # polar discriminator dphase = np.zeros(lp_samples.size, dtype='complex') A = lp_samples[1:lp_samples.size] B = lp_samples[0:lp_samples.size-1] dphase[1:] = ( A * np.conj(B) ) dphase[0] = lp_samples[0] * np.conj(self.prevCutoff) #dphase[dphase.size-2] self.prevCutoff = lp_samples[lp_samples.size-1] # limiting dphase /= np.abs(dphase) # if self.useMedianFilt: # rebuilt = signal.medfilt(np.angle(dphase)/np.pi,self.demodFiltSize) # np.cos(dphase) # else: # rebuilt = self.lowpass(np.angle(dphase),self.demodFiltSize) rebuilt = np.real(np.angle(dphase) / np.pi) demodMain = False demodSub1 = False demodSub2 = False isStereo = False if self.demodMain: demodMain = True if self.useStereo: isStereo = True elif self.demodSub1: demodSub1 = True elif self.demodSub2: demodSub2 = True #spectrum = np.fft.fft(rebuilt* spectral_window(rebuilt.size)) #print rebuilt.size/8 self.chspectrogram = np.roll(self.chspectrogram, 16,axis=1) stft,self.prevSpec2 = self.gen_spectrogram(rebuilt,rebuilt.size/8,self.prevSpec2) self.chspectrogram[:,:16] = stft[::-4,:] # np.log(np.abs(spectrum[spectrum.size/2:spectrum.size:50])) if(self.plotChannel):# and self.count % 10 == 9): self.cur_spectrogram = self.chspectrogram self.drawCurSpectrum() #plotspectrum = np.abs(channel_spectrum[::100]) #self.toPlot = (np.linspace(-np.pi,np.pi,plotspectrum.size),plotspectrum) #self.replot() n_z = rebuilt.size if demodMain: h = signal.firwin(128,16000,nyq=1.2e5) output = signal.fftconvolve(rebuilt,h,mode='full') outputa = output # could be done in place but I'm not concerned with memory outputa[:h.size/2] += self.prevConvo1[h.size/2:] #add the latter half of tail end of the previous convolution outputa = np.append(self.prevConvo1[:h.size/2], outputa) # also delayed by half size of h so append the first half self.prevConvo1 = output[output.size-h.size:] # set the tail for next iteration output = outputa[:output.size-h.size:self.decim_r2] # chop off the tail and decimate #stereo_spectrum = spectrum if isStereo: #pilot = rebuilt * np.cos(2*np.pi*19/240*(np.r_[0:rebuilt.size])) h = signal.firwin(512,[18000,20000],pass_zero=False,nyq=1.2e5) pilot_actual = signal.fftconvolve(rebuilt,h,mode='same') self.PLL.adjust(pilot_actual) moddif = rebuilt * np.real(np.square(self.PLL.pll)) #np.cos(2*np.pi*38/240*(np.r_[0:ss] - phase_shift)) h = signal.firwin(128,16000,nyq=1.2e5) moddif = signal.fftconvolve(moddif,h,mode='same') h = signal.firwin(64,16000,nyq=48000/2) diff = signal.fftconvolve(moddif[::self.decim_r2],h,mode='same') diff = np.real(diff) # rdbs = rebuilt * np.power(self.PLL.pll,3) # h = signal.hanning(1024) # rdbs = signal.fftconvolve(rdbs,h) # if np.mean(rdbs) > 0: # # bit ONE # else: # # bit ZERO elif demodSub1: demod = rebuilt * np.exp(-2j*np.pi*67650/2.4e5*np.r_[0:rebuilt.size]) h = signal.firwin(128,7500,nyq=2.4e5/2) lp_demod = signal.fftconvolve(demod,h,mode='same') decim = lp_demod[::self.decim_r2] dphase = np.zeros(decim.size, dtype='complex') # A = decim[1:decim.size] B = decim[0:decim.size-1] # dphase[1:] = np.real(np.angle( A * np.conj(B) )) h = signal.firwin(128,7500,nyq=24000) output = signal.fftconvolve(dphase,h,mode='same') elif demodSub2: demod = rebuilt * np.exp(-2j*np.pi*92000/2.4e5*np.r_[0:rebuilt.size]) h = signal.firwin(128,7500,nyq=2.4e5/2) lp_demod = signal.fftconvolve(demod,h,mode='same') decim = lp_demod[::self.decim_r2] dphase = np.zeros(decim.size, dtype='complex') # A = decim[1:decim.size] B = decim[0:decim.size-1] # dphase[1:] = np.real(np.angle( A * np.conj(B) )) h = signal.firwin(128,7500,nyq=24000) output = signal.fftconvolve(dphase,h,mode='same') # DC block filter, lol, srsly output = np.real(output) - np.mean(np.real(output)) if np.isnan(output[0]): #print "error" # for some reason, output is NaN for the first 2 loops return np.zeros(6554) # deemphasis - # 2122/samplerate*2 *pi - butterworth filter b,a = signal.butter(1, 2122/48000*2*np.pi, btype='low') output, zf = signal.lfilter(b, a, output,zi=self.demph_zf) self.demph_zf = zf stereo = np.zeros(output.size*2) if (isStereo): diff = signal.lfilter(b,a,diff) w = self.stereoWidth # adjust to change stereo wideness left = output + w/10 * diff right = output - w/10 * diff if(self.useAudioFilter): left = self.lowpass(left,self.audioFilterSize) right = self.lowpass(right,self.audioFilterSize) stereo[0:stereo.size:2] = left stereo[1:stereo.size:2] = right else: if self.useAudioFilter: output = self.lowpass(output,self.audioFilterSize) # just the tip (kills the 19k pilot) stereo[0:stereo.size:2] = output stereo[1:stereo.size:2] = output #normalize to avoid any possible clipping when playing stereo /= 2*np.max(stereo) #spectrum = np.fft.fft(stereo[::2]) output = .5*(stereo[::2]+stereo[1::2]) #spectrum = np.fft.fft(.5*(stereo[::2]+stereo[1::2])*spectral_window(output.size)) self.plspectrogram = np.roll(self.plspectrogram, 24,axis=1) stft,self.prevSpec3 = self.gen_spectrogram(output,512,self.prevSpec3) self.plspectrogram[:,:24] = stft[::-1,:] # np.log(np.abs(spectrum[spectrum.size/2:spectrum.size:20])) if(self.plotPlaying): # and self.count % 2 == 0): #if self.toDrawWaterfalls: self.cur_spectrogram = self.plspectrogram self.drawCurSpectrum(invert=True) #self.drawPlspectrum() #else: # sm = np.abs(np.fft.fftshift(spectrum[::20])) # toPlot = (np.linspace(-2.4e4,2.4e4,sm.size),sm) # self.replot(toPlot) #if(self.toDraw and self.plotWaveform): # if self.toDrawWaterfalls: # sm = np.real(output[::20]) # toPlot = (np.linspace(0,output.size/48000,sm.size),sm) # self.replot(toPlot) # else: # sm = np.real(self.PLL.pll[::200]) # toPlot = (np.linspace(0,output.size/48000,sm.size),sm) # self.replot(toPlot) return np.real(stereo) # Alternate demodulator. Not used, but extremely simple def demodulate2(self,samples): # DEMODULATION CODE # LIMITER goes here # low pass & down sampling h = signal.firwin(128,80000,nyq=1.2e5) lp_samples = signal.fftconvolve(samples, h) # polar discriminator A = lp_samples[1:lp_samples.size] B = lp_samples[0:lp_samples.size-1] dphase = ( A * np.conj(B) ) / np.pi dphase.resize(dphase.size+1) dphase[dphase.size-1] = dphase[dphase.size-2] h = signal.firwin(128,16000,nyq=1.2e5) rebuilt = signal.fftconvolve(dphase,h) output = rebuilt[::self.decim_r2] output = self.lowpass(output, self.audioFilterSize) return np.real(output) # utility functions # def lowpass(self,x,width): #wndw = np.sinc(np.r_[-15:16]/np.pi)/np.pi #wndw = np.kaiser(width,6) wndw = signal.firwin(16,width*999,nyq=24000) #wndw /= np.sum(wndw) new_array = signal.fftconvolve(x, wndw, mode='same') return new_array # calculate mean average deviation # def mad(self,samples): ave = np.mean(samples) return np.mean(np.abs(samples-ave)) # calculate rms for power # def rms(self,samples): meansq = np.mean(np.square(samples)) return np.sqrt(meansq) def play(self,samples): self.stream.write( samples.astype(np.float32).tostring() ) # starting point def start(self): # Initiates running things self.streamer = MakeDaemon(self.demodulate_th) # run demodulation in the 'background' self.streamer.start() self.count = 0 self.sampler_t = threading.Thread(target=self.getSamplesAsync) # sampler loop self.sampler_t.start() def createQtConnections(self): QtCore.QObject.connect(self.ui.freqSelect, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.setFreq) QtCore.QObject.connect(self.ui.checkBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.setUseStereo) QtCore.QObject.connect(self.ui.mainchannel, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDemodMain) QtCore.QObject.connect(self.ui.subband1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDemodSub1) QtCore.QObject.connect(self.ui.subband2, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDemodSub2) QtCore.QObject.connect(self.ui.stereoWidthSlider, QtCore.SIGNAL(_fromUtf8("sliderMoved(int)")), self.setStereoWidth) QtCore.QObject.connect(self.ui.spectrum_overall, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setSpectrumOverall) QtCore.QObject.connect(self.ui.spectrum_channel, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setSpectrumChannel) QtCore.QObject.connect(self.ui.spectrum_playing, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setSpectrumPlaying) QtCore.QObject.connect(self.ui.spectrum_waveform, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setSpectrumWaveform) QtCore.QObject.connect(self.ui.demodFiltMedian, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDemodFiltMedian) QtCore.QObject.connect(self.ui.demodFiltLP, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDemodFiltLP) QtCore.QObject.connect(self.ui.demodFilterSize, QtCore.SIGNAL(_fromUtf8("sliderMoved(int)")), self.setDemodFiltSize) QtCore.QObject.connect(self.ui.audioFilterActive, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setAudioFiltUse) QtCore.QObject.connect(self.ui.audioFilterSizeSlider, QtCore.SIGNAL(_fromUtf8("sliderMoved(int)")), self.setAudioFiltSize) QtCore.QObject.connect(self.ui.exitButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.terminate) QtCore.QObject.connect(self.ui.drawPlot, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDrawSpec) QtCore.QObject.connect(self.ui.waterfallButton, QtCore.SIGNAL(_fromUtf8('toggled(bool)')), self.setDrawWaterfalls) # QtCore.QObject.connect(self.ui.plotButton, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.setDrawPlot) self.bindPlot() def bindPlot(self): self.dpi = 100 self.fig = Figure((4.31,2.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.ui.plotFrame) self.initplot() def initplot(self): self.axes = self.fig.add_subplot(111, aspect=200/431) self.axes.xaxis.set_major_locator(ticker.NullLocator()) self.axes.yaxis.set_major_locator(ticker.NullLocator()) self.fig.tight_layout() #self.axes.invert_yaxis() #self.anim = animation.FuncAnimation(self.fig,self.drawCurSpectrum,interval=750) def replot(self,toPlot): self.axes.clear() self.axes.plot(toPlot[0],toPlot[1]) self.axes.set_aspect('auto',anchor='C') self.canvas.draw() def setDrawSpec(self,s): self.toDraw = s def drawCurSpectrum(self,invert=False): #self.axes.clear() #self.axes.imshow(self.cur_spectrogram, cmap='spectral') #self.axes.xaxis.set_major_locator(ticker.NullLocator()) #self.axes.yaxis.set_major_locator(ticker.NullLocator()) #self.axes.set_aspect('auto',adjustable='box',anchor='NW') #self.canvas.draw() mx = np.max(self.cur_spectrogram) mn = np.min(self.cur_spectrogram) if invert: self.cur_spectrogram = -cv2.convertScaleAbs(self.cur_spectrogram,alpha=255 / (mx)) else: self.cur_spectrogram = cv2.convertScaleAbs(self.cur_spectrogram,alpha=255 / (mn)) self.cur_spectrogram = cv2.GaussianBlur(self.cur_spectrogram,(5,5),.6) cmapped =cv2.applyColorMap(self.cur_spectrogram,cv2.COLORMAP_JET) cv2.imshow('Spectrogram',cmapped) cv2.waitKey(1); def drawSpectrum(self): self.axes.clear() self.axes.imshow(self.spectrogram, cmap='spectral') self.axes.xaxis.set_major_locator(ticker.NullLocator()) self.axes.yaxis.set_major_locator(ticker.NullLocator()) self.axes.set_aspect('auto',adjustable='box',anchor='NW') self.canvas.draw() def drawChspectrum(self): self.axes.clear() self.axes.imshow(self.chspectrogram, cmap='spectral') self.axes.xaxis.set_major_locator(ticker.NullLocator()) self.axes.yaxis.set_major_locator(ticker.NullLocator()) self.axes.set_aspect('auto',adjustable='box',anchor='NW') self.canvas.draw() def drawPlspectrum(self): self.axes.clear() self.axes.imshow(self.plspectrogram, cmap='spectral') self.axes.xaxis.set_major_locator(ticker.NullLocator()) self.axes.yaxis.set_major_locator(ticker.NullLocator()) self.axes.set_aspect('auto',adjustable='box',anchor='NW') self.canvas.draw() def setDrawPlots(self,s): self.toDrawPlots = s self.toDrawWaterfalls = not s def setDrawWaterfalls(self,s): self.toDrawWaterfalls = s self.toDrawPlots = not s def setFreq(self,freq): if freq % 2 == 0: freq += 1 freq /= 10.0 text = "%.1f MHz" % freq self.ui.curFreq.setText(text) self.center_freq = freq*1e6 #+ 250e3 setf_t = threading.Thread(target=self.setF_th, args=[self.center_freq,]) setf_t.start() setf_t.join() # This function is what is used to adjust the tuner on the RTL # Currently, it causes the program to crash if used after an unspecified period of inactivity # commented lines are attempts that didn't work def setF_th(self,f): while(self.is_sampling == True): pass #self.sdr.cancel_read_async() time.sleep(.1) self.sdr.center_freq = f #self.getSamplesAsync() def setUseStereo(self,u): self.useStereo = u def setStereoWidth(self,w): self.stereoWidth = w/5 def setDemodMain(self,s): self.demodMain = s self.demodSub1 = not s self.demodSub2 = not s #self.useStereo = True def setDemodSub1(self,s): self.demodMain = not s self.demodSub1 = s self.demodSub2 = not s #self.useStereo = False def setDemodSub2(self,s): self.demodMain = not s self.demodSub1 = not s self.demodSub2 = s #self.useStereo = False def setSpectrumOverall(self,s): #self.initplot() #self.cur_spectrogram = self.spectrogram self.plotOverall = s self.plotChannel = not s self.plotPlaying = not s self.plotWaveform = not s def setSpectrumChannel(self,s): #self.initplot() self.plotChannel = s self.plotOverall = not s self.plotPlaying = not s self.plotWaveform = not s def setSpectrumPlaying(self,s): #self.initplot() self.plotPlaying = s self.plotChannel = not s self.plotOverall= not s self.plotWaveform = not s def setSpectrumWaveform(self,s): self.plotWaveform = s self.plotPlaying = not s self.plotChannel = not s self.plotOverall= not s def setDemodFiltMedian(self,s): self.useMedianFilt = s self.useLPFilt = not s def setDemodFiltLP(self,s): self.useLPFilt = s self.useMedianFilt = not s def setDemodFiltSize(self,s): #if(s % 2 == 0): # s+=1 self.demodFiltSize = s def setAudioFiltUse(self,s): self.useAudioFilter = s def setAudioFiltSize(self,s): self.audioFilterSize = s def terminate(self): self.__del__() # Destructor - also used to exit the program when user clicks "Quit" def __del__(self): # Program will continue running in the background unless the RTL is told to stop sampling self.sdr.cancel_read_async() print "sdr closed" self.sdr.close() print "pyaudio terminated" self.pa.terminate() cv2.destroyAllWindows()
class RtlReader(object): def __init__(self, **kwargs): super(RtlReader, self).__init__() self.signal_buffer = [] self.sdr = RtlSdr() self.sdr.sample_rate = modes_sample_rate self.sdr.center_freq = modes_frequency self.sdr.gain = "auto" # sdr.freq_correction = 75 self.debug = kwargs.get("debug", False) self.raw_pipe_in = None self.stop_flag = False def _process_buffer(self): messages = [] # signal_array = np.array(self.signal_buffer) # pulses_array = np.where(np.array(self.signal_buffer) < th_amp, 0, 1) # pulses = "".join(str(x) for x in pulses_array) buffer_length = len(self.signal_buffer) i = 0 while i < buffer_length: if self.signal_buffer[i] < th_amp: i += 1 continue # if pulses[i : i + pbits * 2] == preamble: if self._check_preamble(self.signal_buffer[i : i + pbits * 2]): frame_start = i + pbits * 2 frame_end = i + pbits * 2 + (fbits + 1) * 2 frame_length = (fbits + 1) * 2 frame_pulses = self.signal_buffer[frame_start:frame_end] msgbin = "" for j in range(0, frame_length, 2): p2 = frame_pulses[j : j + 2] if len(p2) < 2: break if p2[0] < th_amp and p2[1] < th_amp: break elif p2[0] >= p2[1]: c = "1" elif p2[0] < p2[1]: c = "0" else: msgbin = "" break msgbin += c # advance i with a jump i = frame_start + j if len(msgbin) > 0: msghex = pms.bin2hex(msgbin) if self._check_msg(msghex): messages.append([msghex, time.time()]) if self.debug: self._debug_msg(msghex) elif i > buffer_length - 500: # save some for next process break else: i += 1 # keep reminder of buffer for next iteration self.signal_buffer = self.signal_buffer[i:] return messages def _check_preamble(self, pulses): if len(pulses) != 16: return False for i in range(16): if abs(pulses[i] - preamble[i]) > th_amp_diff: return False return True def _check_msg(self, msg): df = pms.df(msg) msglen = len(msg) if df == 17 and msglen == 28: if pms.crc(msg) == 0: return True elif df in [20, 21] and msglen == 28: return True elif df in [4, 5, 11] and msglen == 14: return True def _debug_msg(self, msg): df = pms.df(msg) msglen = len(msg) if df == 17 and msglen == 28: print(msg, pms.icao(msg), pms.crc(msg)) elif df in [20, 21] and msglen == 28: print(msg, pms.icao(msg)) elif df in [4, 5, 11] and msglen == 14: print(msg, pms.icao(msg)) else: # print("[*]", msg) pass def _read_callback(self, data, rtlsdr_obj): # scaling signal (imporatant) amp = np.absolute(data) amp_norm = np.interp(amp, (amp.min(), amp.max()), (0, 1)) self.signal_buffer.extend(amp_norm.tolist()) if len(self.signal_buffer) >= buffer_size: messages = self._process_buffer() self.handle_messages(messages) def handle_messages(self, messages): """re-implement this method to handle the messages""" for msg, t in messages: # print("%15.9f %s" % (t, msg)) pass def stop(self, *args, **kwargs): self.sdr.cancel_read_async() def run(self, raw_pipe_in=None, stop_flag=None): self.raw_pipe_in = raw_pipe_in self.stop_flag = stop_flag self.sdr.read_samples_async(self._read_callback, read_size)
class RadioReceiver: def __init__(self, center_freq=446.1e6, sample_rate=250e3, freq_correction=0, buffer_size=2048, device_index=0): self._registeredQueueList = list() self._registeredQueueListLock = threading.Lock() self._sdr = RtlSdr(device_index) self._sdr.sample_rate = sample_rate self._sdr.center_freq = center_freq if self._sdr.freq_correction != freq_correction: self._sdr.freq_correction = freq_correction self._thread = threading.Thread(target=self._read_thread, args=(buffer_size, )) self._thread.start() @property def sample_rate(self): return self._sdr.sample_rate @sample_rate.setter def sample_rate(self, value): self._sdr.sample_rate = value @property def center_freq(self): return self._sdr.center_freq @center_freq.setter def center_freq(self, value): self._sdr.center_freq = value @property def freq_correction(self): return self._sdr.sample_rate @freq_correction.setter def freq_correction(self, value): if self._sdr.freq_correction != value: self._sdr.freq_correction = value def registerQueue(self, q: queue): with self._registeredQueueListLock: self._registeredQueueList.append(q) def unregisterQueue(self, q: queue): with self._registeredQueueListLock: if q in self._registeredQueueList: self._registeredQueueList.remove(q) @staticmethod def _read_samples_callback(buffer, self): with self._registeredQueueListLock: for q in self._registeredQueueList: if q.full(): q.get_nowait() q.put_nowait(buffer) def _read_thread(self, buffer_size): try: self._sdr.read_samples_async(RadioReceiver._read_samples_callback, buffer_size, self) except IOError: #IOError is raised when read_async is canceled pass def close(self): self._sdr.cancel_read_async() self._thread.join() with self._registeredQueueListLock: for q in self._registeredQueueList: if q.full(): q.get_nowait() q.put_nowait(None)