def test_003(self): # Test scenario where we have defined a puncture pattern with # more bits than the puncsize with a delay. The python code # doesn't account for this when creating self.expected, but # this should be equivalent to a puncpat of the correct size. self.puncsize = 4 self.puncpat0 = 0x5555 # too many bits set self.puncpat1 = 0x55 # num bits = puncsize self.delay = 1 src = blocks.vector_source_b(self.src_data) op0 = fec.puncture_bb(self.puncsize, self.puncpat0, self.delay) op1 = fec.puncture_bb(self.puncsize, self.puncpat1, self.delay) dst0 = blocks.vector_sink_b() dst1 = blocks.vector_sink_b() self.tb.connect(src, op0, dst0) self.tb.connect(src, op1, dst1) self.tb.run() dst_data0 = list(dst0.data()) for i in xrange(len(dst_data0)): dst_data0[i] = int(dst_data0[i]) dst_data1 = list(dst1.data()) for i in xrange(len(dst_data1)): dst_data1[i] = int(dst_data1[i]) self.assertEqual(dst_data1, dst_data0)
def __init__(self, encoder_obj_list, puncpat=None, lentagname=None, mtu=1500): gr.hier_block2.__init__(self, "extended_tagged_encoder", gr.io_signature(1, 1, gr.sizeof_char), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks = [] self.puncpat = puncpat # If it's a list of encoders, take the first one, unless it's # a list of lists of encoders. if (type(encoder_obj_list) == list): # This block doesn't handle parallelism of > 1 # We could just grab encoder [0][0], but we don't want to encourage this. if (type(encoder_obj_list[0]) == list): gr.log.info( "fec.extended_tagged_encoder: Parallelism must be 0 or 1.") raise AttributeError encoder_obj = encoder_obj_list[0] # Otherwise, just take it as is else: encoder_obj = encoder_obj_list # If lentagname is None, fall back to using the non tagged # stream version if type(lentagname) == str: if (lentagname.lower() == 'none'): lentagname = None if fec.get_encoder_input_conversion(encoder_obj) == "pack": self.blocks.append(blocks.pack_k_bits_bb(8)) if (not lentagname): self.blocks.append( fec.encoder(encoder_obj, gr.sizeof_char, gr.sizeof_char)) else: self.blocks.append( fec.tagged_encoder(encoder_obj, gr.sizeof_char, gr.sizeof_char, lentagname, mtu)) if self.puncpat != '11': self.blocks.append( fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0)) # Connect the input to the encoder and the output to the # puncture if used or the encoder if not. self.connect((self, 0), (self.blocks[0], 0)) self.connect((self.blocks[-1], 0), (self, 0)) # If using the puncture block, add it into the flowgraph after # the encoder. for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i + 1], 0))
def __init__(self, encoder_obj_list, puncpat=None, lentagname=None): gr.hier_block2.__init__(self, "extended_tagged_encoder", gr.io_signature(1, 1, gr.sizeof_char), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks=[] self.puncpat=puncpat # If it's a list of encoders, take the first one, unless it's # a list of lists of encoders. if(type(encoder_obj_list) == list): # This block doesn't handle parallelism of > 1 # We could just grab encoder [0][0], but we don't want to encourage this. if(type(encoder_obj_list[0]) == list): gr.log.info("fec.extended_tagged_encoder: Parallelism must be 0 or 1.") raise AttributeError encoder_obj = encoder_obj_list[0] # Otherwise, just take it as is else: encoder_obj = encoder_obj_list # If lentagname is None, fall back to using the non tagged # stream version if type(lentagname) == str: if(lentagname.lower() == 'none'): lentagname = None if fec.get_encoder_input_conversion(encoder_obj) == "pack": self.blocks.append(blocks.pack_k_bits_bb(8)) if(not lentagname): self.blocks.append(fec.encoder(encoder_obj, gr.sizeof_char, gr.sizeof_char)) else: self.blocks.append(fec.tagged_encoder(encoder_obj, gr.sizeof_char, gr.sizeof_char, lentagname)) if self.puncpat != '11': self.blocks.append(fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0)) # Connect the input to the encoder and the output to the # puncture if used or the encoder if not. self.connect((self, 0), (self.blocks[0], 0)); self.connect((self.blocks[-1], 0), (self, 0)); # If using the puncture block, add it into the flowgraph after # the encoder. for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
def __init__(self, encoder_obj_list, threading, puncpat=None): gr.hier_block2.__init__(self, "extended_encoder", gr.io_signature(1, 1, gr.sizeof_char), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks = [] self.puncpat = puncpat if (type(encoder_obj_list) == list): if (type(encoder_obj_list[0]) == list): gr.log.info("fec.extended_encoder: Parallelism must be 1.") raise AttributeError else: # If it has parallelism of 0, force it into a list of 1 encoder_obj_list = [ encoder_obj_list, ] if fec.get_encoder_input_conversion(encoder_obj_list[0]) == "pack": self.blocks.append(blocks.pack_k_bits_bb(8)) if threading == 'capillary': self.blocks.append( capillary_threaded_encoder(encoder_obj_list, gr.sizeof_char, gr.sizeof_char)) elif threading == 'ordinary': self.blocks.append( threaded_encoder(encoder_obj_list, gr.sizeof_char, gr.sizeof_char)) else: self.blocks.append( fec.encoder(encoder_obj_list[0], gr.sizeof_char, gr.sizeof_char)) if fec.get_encoder_output_conversion( encoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.packed_to_unpacked_bb( 1, gr.GR_MSB_FIRST)) if self.puncpat != '11': self.blocks.append( fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0)) # Connect the input to the encoder and the output to the # puncture if used or the encoder if not. self.connect((self, 0), (self.blocks[0], 0)) self.connect((self.blocks[-1], 0), (self, 0)) # If using the puncture block, add it into the flowgraph after # the encoder. for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i + 1], 0))
def __init__(self, encoder_obj_list, threading, puncpat=None): gr.hier_block2.__init__(self, "extended_encoder", gr.io_signature(1, 1, gr.sizeof_char), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks=[] self.puncpat=puncpat if(type(encoder_obj_list) == list): if(type(encoder_obj_list[0]) == list): gr.log.info("fec.extended_encoder: Parallelism must be 1.") raise AttributeError else: # If it has parallelism of 0, force it into a list of 1 encoder_obj_list = [encoder_obj_list,] if fec.get_encoder_input_conversion(encoder_obj_list[0]) == "pack": self.blocks.append(blocks.pack_k_bits_bb(8)) if threading == 'capillary': self.blocks.append(capillary_threaded_encoder(encoder_obj_list, gr.sizeof_char, gr.sizeof_char)) elif threading == 'ordinary': self.blocks.append(threaded_encoder(encoder_obj_list, gr.sizeof_char, gr.sizeof_char)) else: self.blocks.append(fec.encoder(encoder_obj_list[0], gr.sizeof_char, gr.sizeof_char)) if fec.get_encoder_output_conversion(encoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)) if self.puncpat != '11': self.blocks.append(fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0)) # Connect the input to the encoder and the output to the # puncture if used or the encoder if not. self.connect((self, 0), (self.blocks[0], 0)); self.connect((self.blocks[-1], 0), (self, 0)); # If using the puncture block, add it into the flowgraph after # the encoder. for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
def test_000(self): # Test normal operation of the puncture block self.puncsize = 8 self.puncpat = 0xEF self.delay = 0 self.puncture_setup() src = blocks.vector_source_b(self.src_data) op = fec.puncture_bb(self.puncsize, self.puncpat, self.delay) dst = blocks.vector_sink_b() self.tb.connect(src, op, dst) self.tb.run() dst_data = list(dst.data()) for i in xrange(len(dst_data)): dst_data[i] = int(dst_data[i]) self.assertEqual(self.expected, dst_data)
def test_002(self): # Test scenario where we have defined a puncture pattern with # more bits than the puncsize. self.puncsize = 4 self.puncpat = 0x5555 self.delay = 0 self.puncture_setup() src = blocks.vector_source_b(self.src_data) op = fec.puncture_bb(self.puncsize, self.puncpat, self.delay) dst = blocks.vector_sink_b() self.tb.connect(src, op, dst) self.tb.run() dst_data = list(dst.data()) for i in xrange(len(dst_data)): dst_data[i] = int(dst_data[i]) self.assertEqual(self.expected, dst_data)