def main(): pars = parse_command_line_arguments() # Initialize the AIR-T receiver, set sample rate, gain, and frequency sdr = SoapySDR.Device() sdr.setSampleRate(SOAPY_SDR_RX, pars.channel, pars.samp_rate) if pars.rx_gain.lower() == 'agc': # Turn on AGC sdr.setGainMode(SOAPY_SDR_RX, pars.channel, True) else: # set manual gain sdr.setGain(SOAPY_SDR_RX, pars.channel, float(pars.rx_gain)) sdr.setFrequency(SOAPY_SDR_RX, pars.channel, pars.freq) # Initialize the AIR-T transmitter, set sample rate, gain, and frequency sdr.setSampleRate(SOAPY_SDR_TX, pars.channel, pars.samp_rate) sdr.setGain(SOAPY_SDR_TX, pars.channel, float(pars.tx_gain)) sdr.setFrequency(SOAPY_SDR_TX, pars.channel, pars.freq) # Create SDR shared memory buffer, detector buff = cusignal.get_shared_mem(pars.buff_len, dtype=cp.complex64) detr = PowerDetector(buff, pars.threshold) # Turn on the transmitter tx_stream = sdr.setupStream(SOAPY_SDR_TX, SOAPY_SDR_CF32, [pars.channel]) sdr.activateStream(tx_stream) # Setup thread subclass to asynchronously execute transmit requests tx_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) # Turn on the receiver rx_stream = sdr.setupStream(SOAPY_SDR_RX, SOAPY_SDR_CF32, [pars.channel]) sdr.activateStream(rx_stream) # Start processing Data print('Looking for signals to repeat. Press ctrl-c to exit.') while True: try: sr = sdr.readStream(rx_stream, [buff], pars.buff_len) # Read data if sr.ret == SOAPY_SDR_OVERFLOW: # Data was dropped print('O', end='', flush=True) continue detected_sig = detr.detect(buff) if detected_sig is not None: # AIR-T transmitter currently only accepts numpy arrays or lists tx_sig = cp.asnumpy(detected_sig) tx_executor.submit(tx_task_fn, sdr, tx_stream, tx_sig, pars.buff_len) detr.plot_envelope(buff) # Plot the signal end envelope except KeyboardInterrupt: break sdr.closeStream(rx_stream) sdr.closeStream(tx_stream)
def main(): pars = parse_command_line_arguments() # Initialize the AIR-T receiver, set sample rate, gain, and frequency sdr = Device() sdr.setSampleRate(SOAPY_SDR_RX, pars.channel, pars.samp_rate) if pars.gain == 'agc': sdr.setGainMode(SOAPY_SDR_RX, pars.channel, True) # Set AGC else: sdr.setGain(SOAPY_SDR_RX, pars.channel, float(pars.gain)) # set manual gain sdr.setFrequency(SOAPY_SDR_RX, pars.channel, pars.freq) # Create SDR shared memory buffer, detector, file writer, and plotter (if desired) buff = cusignal.get_shared_mem(pars.buff_len, dtype=cp.complex64) detr = PowerDetector(buff, pars.seg_len, pars.dec, pars.threshold) writer = PowerDetectorWriter(pars.output_path, pars.label, pars.num_files) if pars.visualization: plotter = PowerDetectorPlot(pars.buff_len, pars.dec, pars.samp_rate, pars.seg_len, pars.threshold) # Turn on radio rx_stream = sdr.setupStream(SOAPY_SDR_RX, SOAPY_SDR_CF32, [pars.channel]) sdr.activateStream(rx_stream) print('Looking for signals to record. Press ctrl-c to exit.') while True: # Start processing Data try: sr = sdr.readStream(rx_stream, [buff], pars.buff_len) # Read data to buffer if sr.ret == SOAPY_SDR_OVERFLOW: # Data was dropped, i.e., overflow print('O', end='', flush=True) else: det_signal = detr.detect(buff) writer.tofile(det_signal) if pars.visualization: # Displays the detected data if desired plotter.update(cp.asnumpy(buff), detr.det_index, detr.amp_sq) except KeyboardInterrupt: sdr.deactivateStream(rx_stream) sdr.closeStream(rx_stream) break
def __init__(self, sfs, afs, mult, cuda): super(AnalogTest, self).__init__() self.sfs = int(sfs) self.afs = int(afs) self.tau = 75e-6 self.mult = int(mult) self.cuda = cuda self.number = 500 self.sdr_buff = 1024 self.dsp_buff = self.sdr_buff * self.mult self.wbfm = WBFM(self.tau, self.sfs, self.afs, cuda) self.mfm = MFM(self.tau, self.sfs, self.afs, cuda) if self.cuda: import cusignal as sig self.buff = sig.get_shared_mem(self.dsp_buff, dtype=np.complex64) else: self.buff = np.zeros([self.dsp_buff], dtype=np.complex64)
def process(self, inputs): infile = self.conf.get('wavefile') nsecs = self.conf.get('duration', 1) with wave.open(infile) as wf: wparams = wf.getparams() # buf = wf.readframes(nframes) # int2float = (2**15 - 1) # wdata = np.frombuffer(buf, dtype=np.int16) # wdata_float = wdata.astype(np.float64)/int2float # iq_data = wdata_float.view(dtype=np.complex128) nframes = min(int(wparams.framerate * nsecs), wparams.nframes) if sf is None: data = wave_reader(infile, nframes) framerate = wparams.framerate else: data, framerate = sf.read(infile, frames=nframes) # IQ data cpu_signal = data.view(dtype=np.complex128).reshape(nframes) if self.conf.get('use_cpu', False): out = {'signal': cpu_signal} else: # Create mapped, pinned memory for zero copy between CPU and GPU gpu_signal_buf = cusignal.get_shared_mem(nframes, dtype=np.complex128) gpu_signal_buf[:] = cpu_signal # zero-copy conversion from Numba CUDA array to CuPy array gpu_signal = cp.asarray(gpu_signal_buf) out = {'signal': gpu_signal} out['framerate'] = float(framerate) return out
import cusignal as signal import polyphase_plot buffer_size = 2**19 # Number of complex samples per transfer t_test = 20 # Test time in seconds freq = 1350e6 # Tune frequency in Hz fs = 62.5e6 # Sample rate # Create polyphase filter fc = 1. / max(16, 25) # cutoff of FIR filter (rel. to Nyquist) nc = 10 * max(16, 25) # reasonable cutoff for our sinc-like function win = signal.fir_filter_design.firwin(2 * nc + 1, fc, window=('kaiser', 0.5)) win = cupy.asarray(win, dtype=cupy.float32) # Init buffer and polyphase filter buff = signal.get_shared_mem(buffer_size, dtype=cupy.complex64) s = signal.resample_poly(buff, 16, 25, window=win, use_numba=False) # Initialize the AIR-T receiver using SoapyAIRT sdr = SoapySDR.Device(dict(driver="SoapyAIRT")) # Create AIR-T instance sdr.setSampleRate(SoapySDR.SOAPY_SDR_RX, 1, fs) # Set sample rate sdr.setGainMode(SoapySDR.SOAPY_SDR_RX, 1, True) # Set the gain mode sdr.setFrequency(SoapySDR.SOAPY_SDR_RX, 1, freq) # Tune the frequency rx_stream = sdr.setupStream(SoapySDR.SOAPY_SDR_RX, SoapySDR.SOAPY_SDR_CF32, [1]) sdr.activateStream(rx_stream) # Run test n_reads = int(t_test * fs / buffer_size) + 1 drop_count = 0 for _ in range(n_reads):
""" Naive vs Precompiling CUDA kernels benchmark for cuSignal""" import time import cupy as cp import numpy as np import cusignal buff_len = 2**19 n_test = 1000 # Create noise signal to simulate received data noise = np.random.randn(buff_len) + 1j * np.random.randn(buff_len) noise = noise.astype(np.complex64) # Create shared memory buffer buff = cusignal.get_shared_mem(buff_len, dtype=np.complex64) buff[:] = noise # Run benchmark cases # No precompile of kernel ti = time.monotonic() for _ in range(n_test): buff[:] = noise sig_power1 = cp.power(cp.abs(buff), 2) rate_msps = buff_len * n_test / (time.monotonic() - ti) / 1e6 print('Method 1: Data Rate = {:1.2f} MSPS'.format(rate_msps)) # Execute before loop to precompile kernel sig_power2 = cp.power(cp.abs(buff), 2) ti = time.monotonic()
def run_gpu_spectrum_int(num_samp, nbins, gain, rate, fc, t_int): ''' Inputs: num_samp: Number of elements to sample from the SDR IQ per call; use powers of 2 nbins: Number of frequency bins in the resulting power spectrum; powers of 2 are most efficient, and smaller numbers are faster on CPU. gain: Requested SDR gain (dB) rate: SDR sample rate, intrinsically tied to bandwidth in SDRs (Hz) fc: Base center frequency (Hz) t_int: Total effective integration time (s) Returns: freqs: Frequencies of the resulting spectrum, centered at fc (Hz), numpy array p_avg_db_hz: Power spectral density (dB/Hz) numpy array ''' import cupy as cp import cusignal # Force a choice of window to allow converting to PSD after averaging # power spectra WINDOW = 'hann' # Force a default nperseg for welch() because we need to get a window # of this size later. Use the scipy default 256, but enforce scipy # conditions on nbins vs. nperseg when nbins gets small. if nbins < 256: nperseg = nbins else: nperseg = 256 print('Initializing rtl-sdr with pyrtlsdr:') sdr = RtlSdr() try: sdr.rs = rate # Rate of Sampling (intrinsically tied to bandwidth with SDR dongles) sdr.fc = fc sdr.gain = gain print(' sample rate: %0.6f MHz' % (sdr.rs / 1e6)) print(' center frequency %0.6f MHz' % (sdr.fc / 1e6)) print(' gain: %d dB' % sdr.gain) print(' num samples per call: {}'.format(num_samp)) print(' PSD binning: {} bins'.format(nbins)) print(' requested integration time: {}s'.format(t_int)) N = int(sdr.rs * t_int) num_loops = int(N / num_samp) + 1 print(' => num samples to collect: {}'.format(N)) print(' => est. num of calls: {}'.format(num_loops - 1)) # Set up arrays to store power spectrum calculated from I-Q samples freqs = cp.zeros(nbins) p_xx_tot = cp.zeros(nbins, dtype=complex) # Create mapped, pinned memory for zero copy between CPU and GPU gpu_iq = cusignal.get_shared_mem(num_samp, dtype=np.complex128) cnt = 0 # Set the baseline time start_time = time.time() print('Integration began at {}'.format( time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime(start_time)))) # Time integration loop for cnt in range(num_loops): # Move USB-collected samples off CPU and onto GPU for calc gpu_iq[:] = sdr.read_samples(num_samp) freqs, p_xx = cusignal.welch(gpu_iq, fs=rate, nperseg=nperseg, nfft=nbins, noverlap=0, scaling='spectrum', window=WINDOW, detrend=False, return_onesided=False) p_xx_tot += p_xx end_time = time.time() print('Integration ended at {} after {} seconds.'.format( time.strftime('%a, %d %b %Y %H:%M:%S'), end_time - start_time)) print('{} spectra were measured at {}.'.format(cnt, fc)) print('for an effective integration time of {:.2f}s'.format( num_samp * cnt / rate)) half_len = len(freqs) // 2 # Swap frequencies: tmp_first = freqs[:half_len].copy() tmp_last = freqs[half_len:].copy() freqs[:half_len] = tmp_last freqs[half_len:] = tmp_first # Swap powers: tmp_first = p_xx_tot[:half_len].copy() tmp_last = p_xx_tot[half_len:].copy() p_xx_tot[:half_len] = tmp_last p_xx_tot[half_len:] = tmp_first # Compute the average power spectrum based on the number of spectra read p_avg = p_xx_tot / cnt # Convert to power spectral density # See the scipy docs for _spectral_helper(). win = get_window(WINDOW, nperseg) p_avg_hz = p_avg * ((win.sum()**2) / (win * win).sum()) / rate p_avg_db_hz = 10. * cp.log10(p_avg_hz) # Shift frequency spectra back to the intended range freqs = freqs + fc # nice and tidy sdr.close() except OSError as err: print("OS error: {0}".format(err)) raise (err) except: print('Unexpected error:', sys.exc_info()[0]) raise finally: sdr.close() return cp.asnumpy(freqs), cp.asnumpy(p_avg_db_hz)
def __init__(self, run_time=1, bandwidth=2.4e6, frequency=1.4204e9, num_samp=2**18, nbins=2**12, gain=49.6, mode='SPECTRUM', loglevel='INFO'): # --------------------------------------------------------------------- # LOGGING # --------------------------------------------------------------------- _level = getattr(logging, loglevel) # Set up our logger: self.logger = logging.getLogger(__name__) self.logger.setLevel(_level) _fh = logging.FileHandler('log_effex.log') _fh.setLevel(_level) _ch = logging.StreamHandler() _ch.setLevel(_level) # create formatter and add it to the handlers _formatter = logging.Formatter( '{asctime} - {name} - {levelname:<8} - {message}', style='{') _fh.setFormatter(_formatter) _ch.setFormatter(_formatter) # add the handlers to the logger self.logger.addHandler(_fh) self.logger.addHandler(_ch) # Threadsafe queue for child threads to report exceptions self.exc_queue = multiprocessing.Queue() # --------------------------------------------------------------------- # SDR INIT # --------------------------------------------------------------------- # Dithering depends on evanmayer's fork of roger-'s pyrtlsdr and # keenerd's experimental fork of librtlsdr self.sdr0 = RtlSdr(device_index=0, dithering_enabled=False) self.sdr1 = RtlSdr(device_index=1, dithering_enabled=False) self.run_time = run_time self.bandwidth = bandwidth self.frequency = frequency self.num_samp = num_samp self.nbins = nbins self.gain = gain # ---------------------------------------------------------------------- # STATE MACHINE INIT # ---------------------------------------------------------------------- self._state = 'OFF' self.mode = mode assert (self._state in self._states ), f'State {self._state} not in allowed states {self._states}.' self.start_time = -1 # ---------------------------------------------------------------------- # CPU & GPU MEMORY SETUP # ---------------------------------------------------------------------- # Store sample chunks in 2 queues self.buf0 = multiprocessing.Queue(Correlator._BUFFER_SIZE) self.buf1 = multiprocessing.Queue(Correlator._BUFFER_SIZE) # Create mapped, pinned memory for zero copy between CPU and GPU self.gpu_iq_0 = cusignal.get_shared_mem(self.num_samp, dtype=np.complex128) self.gpu_iq_1 = cusignal.get_shared_mem(self.num_samp, dtype=np.complex128) # ---------------------------------------------------------------------- # SPECTROMETER SETUP # ---------------------------------------------------------------------- self.ntaps = 4 # Number of taps in PFB # Constraint: input timeseries only affords us ntaps * n_int ffts # of length nbins in our PFB. n_int = len(self.gpu_iq_0) // self.ntaps // self.nbins assert(n_int >= 1), 'Assertion failed: there must be at least 1 window of '\ +'length n_branches*ntaps in each input timeseries.\n'\ +'timeseries len: {}\n'.format(len(self.gpu_iq_0))\ +'n_branches: {}\n'.format(self.nbins)\ +'ntaps: {}\n'.format(self.ntaps)\ +'n_branches*ntaps: {}'.format(self.nbins*self.ntaps) # Create window coefficients for spectrometer self.window = (cusignal.get_window("hamming", self.ntaps * nbins) * cusignal.firwin(self.ntaps * self.nbins, cutoff=1.0 / self.nbins, window='rectangular')) # --------------------------------------------------------------------- # SCIENCE DATA # --------------------------------------------------------------------- self.calibrated_delay = 0 # seconds # Store off cross-correlated chunks of IQ samples self.vis_out = multiprocessing.Queue() # A file to archive data self.output_file = time.strftime('visibilities_%Y%m%d-%H%M%S') + '.csv' # --------------------------------------------------------------------- # USER INPUT # --------------------------------------------------------------------- self.kbd_queue = multiprocessing.Queue(1) # --------------------------------------------------------------------- # TEST MODE PARAMS # --------------------------------------------------------------------- # In test mode, the delay between the channels is calibrated out, and # then an artifical sweep in delay-space begins. # To goal is to reproduce the fringe pattern of an interferometer, # a sinusoid of period 1/fc modulated by something resembling a # sinc function, having first nulls at ~+/-1/bandwidth. crit_delay = 1 / self.frequency # Step through delay space with balance between sampling fidelity and # sweep speed: self.test_delay_sweep_step = crit_delay / 10 self.test_delay_offset = self.test_delay_sweep_step * 200