def __init__(self, n_chans, n_filterbanks=1, taps=None, outchans=None, atten=100, bw=1.0, tb=0.2, ripple=0.1): if n_filterbanks > n_chans: n_filterbanks = n_chans if outchans is None: outchans = range(n_chans) gr.hier_block2.__init__( self, "pfb_channelizer_hier_ccf", gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(len(outchans), len(outchans), gr.sizeof_gr_complex)) if taps is None: taps = optfir.low_pass(1, n_chans, bw, bw+tb, ripple, atten) taps = list(taps) extra_taps = int(math.ceil(1.0*len(taps)/n_chans)*n_chans - len(taps)) taps = taps + [0] * extra_taps # Make taps for each channel chantaps = [list(reversed(taps[i: len(taps): n_chans])) for i in range(0, n_chans)] # Convert the input stream into a stream of vectors. self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, n_chans) # Create a mapping to separate out each filterbank (a group of channels to be processed together) # And a list of sets of taps for each filterbank. low_cpp = int(n_chans/n_filterbanks) extra = n_chans - low_cpp*n_filterbanks cpps = [low_cpp+1]*extra + [low_cpp]*(n_filterbanks-extra) splitter_mapping = [] filterbanktaps = [] total = 0 for cpp in cpps: splitter_mapping.append([(0, i) for i in range(total, total+cpp)]) filterbanktaps.append(chantaps[total: total+cpp]) total += cpp assert(total == n_chans) # Split the stream of vectors in n_filterbanks streams of vectors. self.splitter = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], splitter_mapping) # Create the filterbanks self.fbs = [filter.filterbank_vcvcf(taps) for taps in filterbanktaps] # Combine the streams of vectors back into a single stream of vectors. combiner_mapping = [[]] for i, cpp in enumerate(cpps): for j in range(cpp): combiner_mapping[0].append((i, j)) self.combiner = blocks.vector_map(gr.sizeof_gr_complex, cpps, combiner_mapping) # Add the final FFT to the channelizer. self.fft = fft.fft_vcc(n_chans, forward=True, window=[1.0]*n_chans) # Select the desired channels if outchans != range(n_chans): selector_mapping = [[(0, i) for i in outchans]] self.selector = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], selector_mapping) # Convert stream of vectors to a normal stream. self.v2ss = blocks.vector_to_streams(gr.sizeof_gr_complex, len(outchans)) self.connect(self, self.s2v, self.splitter) for i in range(0, n_filterbanks): self.connect((self.splitter, i), self.fbs[i], (self.combiner, i)) self.connect(self.combiner, self.fft) if outchans != range(n_chans): self.connect(self.fft, self.selector, self.v2ss) else: self.connect(self.fft, self.v2ss) for i in range(0, len(outchans)): self.connect((self.v2ss, i), (self, i))
def __init__(self, n_chans, n_filterbanks=1, taps=None, outchans=None, atten=100, bw=1.0, tb=0.2, ripple=0.1): if n_filterbanks > n_chans: n_filterbanks = n_chans if outchans is None: outchans = list(range(n_chans)) gr.hier_block2.__init__( self, "pfb_channelizer_hier_ccf", gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(len(outchans), len(outchans), gr.sizeof_gr_complex)) if taps is None: taps = optfir.low_pass(1, n_chans, bw, bw+tb, ripple, atten) taps = list(taps) extra_taps = int(math.ceil(1.0*len(taps)/n_chans)*n_chans - len(taps)) taps = taps + [0] * extra_taps # Make taps for each channel chantaps = [list(reversed(taps[i: len(taps): n_chans])) for i in range(0, n_chans)] # Convert the input stream into a stream of vectors. self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, n_chans) # Create a mapping to separate out each filterbank (a group of channels to be processed together) # And a list of sets of taps for each filterbank. low_cpp = int(n_chans / n_filterbanks) extra = n_chans - low_cpp*n_filterbanks cpps = [low_cpp+1]*extra + [low_cpp]*(n_filterbanks-extra) splitter_mapping = [] filterbanktaps = [] total = 0 for cpp in cpps: splitter_mapping.append([(0, i) for i in range(total, total+cpp)]) filterbanktaps.append(chantaps[total: total+cpp]) total += cpp assert(total == n_chans) # Split the stream of vectors in n_filterbanks streams of vectors. self.splitter = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], splitter_mapping) # Create the filterbanks self.fbs = [filter.filterbank_vcvcf(taps) for taps in filterbanktaps] # Combine the streams of vectors back into a single stream of vectors. combiner_mapping = [[]] for i, cpp in enumerate(cpps): for j in range(cpp): combiner_mapping[0].append((i, j)) self.combiner = blocks.vector_map(gr.sizeof_gr_complex, cpps, combiner_mapping) # Add the final FFT to the channelizer. self.fft = fft.fft_vcc(n_chans, forward=True, window=[1.0]*n_chans) # Select the desired channels if outchans != list(range(n_chans)): selector_mapping = [[(0, i) for i in outchans]] self.selector = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], selector_mapping) # Convert stream of vectors to a normal stream. self.v2ss = blocks.vector_to_streams(gr.sizeof_gr_complex, len(outchans)) self.connect(self, self.s2v, self.splitter) for i in range(0, n_filterbanks): self.connect((self.splitter, i), self.fbs[i], (self.combiner, i)) self.connect(self.combiner, self.fft) if outchans != list(range(n_chans)): self.connect(self.fft, self.selector, self.v2ss) else: self.connect(self.fft, self.v2ss) for i in range(0, len(outchans)): self.connect((self.v2ss, i), (self, i))
def test_interleaving(self): # Takes 3 streams (a, b and c) # Outputs 2 streams. # First (d) is interleaving of a and b. # Second (e) is interleaving of a and b and c. c is taken in # chunks of 2 which are reversed. A = (1, 2, 3, 4, 5) B = (11, 12, 13, 14, 15) C = (99, 98, 97, 96, 95, 94, 93, 92, 91, 90) expected_D = (1, 11, 2, 12, 3, 13, 4, 14, 5, 15) expected_E = (1, 11, 98, 99, 2, 12, 96, 97, 3, 13, 94, 95, 4, 14, 92, 93, 5, 15, 90, 91) mapping = [[(0, 0), (1, 0)], # mapping to produce D [(0, 0), (1, 0), (2, 1), (2, 0)], # mapping to produce E ] srcA = blocks.vector_source_f(A, False, 1) srcB = blocks.vector_source_f(B, False, 1) srcC = blocks.vector_source_f(C, False, 2) vmap = blocks.vector_map(gr.sizeof_int, (1, 1, 2), mapping) dstD = blocks.vector_sink_f(2) dstE = blocks.vector_sink_f(4) self.tb.connect(srcA, (vmap, 0)) self.tb.connect(srcB, (vmap, 1)) self.tb.connect(srcC, (vmap, 2)) self.tb.connect((vmap, 0), dstD) self.tb.connect((vmap, 1), dstE) self.tb.run() self.assertEqual(expected_D, dstD.data()) self.assertEqual(expected_E, dstE.data())
def test_reversing(self): # Chunk data in blocks of N and reverse the block contents. N = 5 src_data = range(0, 20) expected_result = [] for i in range(N-1, len(src_data), N): for j in range(0, N): expected_result.append(1.0*(i-j)) mapping = [list(reversed([(0, i) for i in range(0, N)]))] src = blocks.vector_source_f(src_data, False, N) vmap = blocks.vector_map(gr.sizeof_float, (N, ), mapping) dst = blocks.vector_sink_f(N) self.tb.connect(src, vmap, dst) self.tb.run() result_data = list(dst.data()) self.assertEqual(expected_result, result_data)
def test_vector_to_streams(self): # Split an input vector into N streams. N = 5 M = 20 src_data = range(0, M) expected_results = [] for n in range(0, N): expected_results.append(range(n, M, N)) mapping = [[(0, n)] for n in range(0, N)] src = blocks.vector_source_f(src_data, False, N) vmap = blocks.vector_map(gr.sizeof_float, (N, ), mapping) dsts = [blocks.vector_sink_f(1) for n in range(0, N)] self.tb.connect(src, vmap) for n in range(0, N): self.tb.connect((vmap, n), dsts[n]) self.tb.run() for n in range(0, N): result_data = list(dsts[n].data()) self.assertEqual(expected_results[n], result_data)