def __init__(self, decoder_list_0, input_size, output_size): gr.hier_block2.__init__( self, "Threaded Decoder", gr.io_signature(1, 1, input_size*1), gr.io_signature(1, 1, output_size*1)) self.decoder_list_0 = decoder_list_0 self.deinterleave_0 = blocks.deinterleave(input_size, fec.get_decoder_input_size(decoder_list_0[0])) self.generic_decoders_0 = [] for i in range(len(decoder_list_0)): self.generic_decoders_0.append(fec.decoder(decoder_list_0[i], input_size, output_size)) self.interleave_0 = blocks.interleave(output_size, fec.get_decoder_output_size(decoder_list_0[0])) for i in range(len(decoder_list_0)): self.connect((self.deinterleave_0, i), (self.generic_decoders_0[i], 0)) for i in range(len(decoder_list_0)): self.connect((self.generic_decoders_0[i], 0), (self.interleave_0, i)) self.connect((self, 0), (self.deinterleave_0, 0)) self.connect((self.interleave_0, 0), (self, 0))
def __init__(self, decoder_obj_list, threading, ann=None, puncpat='11', integration_period=10000, flush=None, rotator=None): gr.hier_block2.__init__(self, "extended_decoder", gr.io_signature(1, 1, gr.sizeof_float), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks=[] self.ann=ann self.puncpat=puncpat self.flush=flush if(type(decoder_obj_list) == list): if(type(decoder_obj_list[0]) == list): gr.log.info("fec.extended_decoder: Parallelism must be 1.") raise AttributeError else: # If it has parallelism of 0, force it into a list of 1 decoder_obj_list = [decoder_obj_list,] message_collector_connected=False ##anything going through the annihilator needs shifted, uchar vals if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "uchar" or \ fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.multiply_const_ff(48.0)) if fec.get_shift(decoder_obj_list[0]) != 0.0: self.blocks.append(blocks.add_const_ff(fec.get_shift(decoder_obj_list[0]))) elif fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.add_const_ff(128.0)) if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "uchar" or \ fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.float_to_uchar()); const_index = 0; #index that corresponds to mod order for specinvert purposes if not self.flush: flush = 10000; else: flush = self.flush; if self.ann: #ann and puncpat are strings of 0s and 1s cat = fec.ULLVector(); for i in fec.read_big_bitlist(ann): cat.append(i); synd_garble = .49 idx_list = self.garbletable.keys() idx_list.sort() for i in idx_list: if 1.0/self.ann.count('1') >= i: synd_garble = self.garbletable[i] print 'using syndrom garble threshold ' + str(synd_garble) + 'for conv_bit_corr_bb' print 'ceiling: .0335 data garble rate' self.blocks.append(fec.conv_bit_corr_bb(cat, len(puncpat) - puncpat.count('0'), len(ann), integration_period, flush, synd_garble)) if self.puncpat != '11': self.blocks.append(fec.depuncture_bb(len(puncpat), read_bitlist(puncpat), 0)) if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits": self.blocks.append(blocks.uchar_to_float()) self.blocks.append(blocks.add_const_ff(-128.0)) self.blocks.append(digital.binary_slicer_fb()) self.blocks.append(blocks.unpacked_to_packed_bb(1,0)) if(len(decoder_obj_list) > 1): if(fec.get_history(decoder_obj_list[0]) != 0): gr.log.info("fec.extended_decoder: Cannot use multi-threaded parallelism on a decoder with history.") raise AttributeError if threading == 'capillary': self.blocks.append(capillary_threaded_decoder(decoder_obj_list, fec.get_decoder_input_item_size(decoder_obj_list[0]), fec.get_decoder_output_item_size(decoder_obj_list[0]))) elif threading == 'ordinary': self.blocks.append(threaded_decoder(decoder_obj_list, fec.get_decoder_input_item_size(decoder_obj_list[0]), fec.get_decoder_output_item_size(decoder_obj_list[0]))) else: self.blocks.append(fec.decoder(decoder_obj_list[0], fec.get_decoder_input_item_size(decoder_obj_list[0]), fec.get_decoder_output_item_size(decoder_obj_list[0]))) if fec.get_decoder_output_conversion(decoder_obj_list[0]) == "unpack": self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)); self.connect((self, 0), (self.blocks[0], 0)); self.connect((self.blocks[-1], 0), (self, 0)); for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
def __init__(self, decoder_obj_list, ann=None, puncpat='11', integration_period=10000, flush=None, rotator=None, lentagname=None): gr.hier_block2.__init__(self, "extended_decoder", gr.io_signature(1, 1, gr.sizeof_float), gr.io_signature(1, 1, gr.sizeof_char)) self.blocks = [] self.ann = ann self.puncpat = puncpat self.flush = flush if (type(decoder_obj_list) == list): # This block doesn't handle parallelism of > 1 # We could just grab encoder [0][0], but we don't want to encourage this. if (type(decoder_obj_list[0]) == list): gr.log.info( "fec.extended_tagged_decoder: Parallelism must be 1.") raise AttributeError decoder_obj = decoder_obj_list[0] # Otherwise, just take it as is else: decoder_obj = decoder_obj_list # If lentagname is None, fall back to using the non tagged # stream version if type(lentagname) == str: if (lentagname.lower() == 'none'): lentagname = None message_collector_connected = False ##anything going through the annihilator needs shifted, uchar vals if fec.get_decoder_input_conversion(decoder_obj) == "uchar" or \ fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.multiply_const_ff(48.0)) if fec.get_shift(decoder_obj) != 0.0: self.blocks.append(blocks.add_const_ff(fec.get_shift(decoder_obj))) elif fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.add_const_ff(128.0)) if fec.get_decoder_input_conversion(decoder_obj) == "uchar" or \ fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.float_to_uchar()) const_index = 0 #index that corresponds to mod order for specinvert purposes if not self.flush: flush = 10000 else: flush = self.flush if self.ann: #ann and puncpat are strings of 0s and 1s cat = fec.ULLVector() for i in fec.read_big_bitlist(ann): cat.append(i) synd_garble = .49 idx_list = self.garbletable.keys() idx_list.sort() for i in idx_list: if 1.0 / self.ann.count('1') >= i: synd_garble = self.garbletable[i] print 'using syndrom garble threshold ' + str( synd_garble) + 'for conv_bit_corr_bb' print 'ceiling: .0335 data garble rate' self.blocks.append( fec.conv_bit_corr_bb(cat, len(puncpat) - puncpat.count('0'), len(ann), integration_period, flush, synd_garble)) if self.puncpat != '11': self.blocks.append( fec.depuncture_bb(len(puncpat), read_bitlist(puncpat), 0)) if fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.uchar_to_float()) self.blocks.append(blocks.add_const_ff(-128.0)) self.blocks.append(digital.binary_slicer_fb()) self.blocks.append(blocks.unpacked_to_packed_bb(1, 0)) else: if (not lentagname): self.blocks.append( fec.decoder(decoder_obj, fec.get_decoder_input_item_size(decoder_obj), fec.get_decoder_output_item_size(decoder_obj))) else: self.blocks.append( fec.tagged_decoder( decoder_obj, fec.get_decoder_input_item_size(decoder_obj), fec.get_decoder_output_item_size(decoder_obj), lentagname)) if fec.get_decoder_output_conversion(decoder_obj) == "unpack": self.blocks.append(blocks.packed_to_unpacked_bb( 1, gr.GR_MSB_FIRST)) self.connect((self, 0), (self.blocks[0], 0)) self.connect((self.blocks[-1], 0), (self, 0)) for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i + 1], 0))
def __init__(self, decoder_list_0, input_size, output_size): gr.hier_block2.__init__(self, "Capillary Threaded Decoder", gr.io_signature(1, 1, input_size * 1), gr.io_signature(1, 1, output_size * 1)) self.decoder_list_0 = decoder_list_0 check = math.log10(len(self.decoder_list_0)) / math.log10(2.0) if (abs(check - int(check)) > 0): gr.log.info( "fec.capillary_threaded_decoder: number of decoders must be a power of 2." ) raise AttributeError self.deinterleaves_0 = [] for i in range(int(math.log(len(decoder_list_0), 2))): for j in range(int(math.pow(2, i))): self.deinterleaves_0.append( blocks.deinterleave( input_size, fec.get_decoder_input_size(decoder_list_0[0]))) self.generic_decoders_0 = [] for i in range(len(decoder_list_0)): self.generic_decoders_0.append( fec.decoder(decoder_list_0[i], input_size, output_size)) self.interleaves_0 = [] for i in range(int(math.log(len(decoder_list_0), 2))): for j in range(int(math.pow(2, i))): self.interleaves_0.append( blocks.interleave( output_size, fec.get_decoder_output_size(decoder_list_0[0]))) rootcount = 0 branchcount = 1 for i in range(int(math.log(len(decoder_list_0), 2)) - 1): for j in range(int(math.pow(2, i))): self.connect((self.deinterleaves_0[rootcount], 0), (self.deinterleaves_0[branchcount], 0)) self.connect((self.deinterleaves_0[rootcount], 1), (self.deinterleaves_0[branchcount + 1], 0)) rootcount += 1 branchcount += 2 codercount = 0 for i in range(len(decoder_list_0) / 2): self.connect((self.deinterleaves_0[rootcount], 0), (self.generic_decoders_0[codercount], 0)) self.connect((self.deinterleaves_0[rootcount], 1), (self.generic_decoders_0[codercount + 1], 0)) rootcount += 1 codercount += 2 rootcount = 0 branchcount = 1 for i in range(int(math.log(len(decoder_list_0), 2)) - 1): for j in range(int(math.pow(2, i))): self.connect((self.interleaves_0[branchcount], 0), (self.interleaves_0[rootcount], 0)) self.connect((self.interleaves_0[branchcount + 1], 0), (self.interleaves_0[rootcount], 1)) rootcount += 1 branchcount += 2 codercount = 0 for i in range(len(decoder_list_0) / 2): self.connect((self.generic_decoders_0[codercount], 0), (self.interleaves_0[rootcount], 0)) self.connect((self.generic_decoders_0[codercount + 1], 0), (self.interleaves_0[rootcount], 1)) rootcount += 1 codercount += 2 if ((len(self.decoder_list_0)) > 1): self.connect((self, 0), (self.deinterleaves_0[0], 0)) self.connect((self.interleaves_0[0], 0), (self, 0)) else: self.connect((self, 0), (self.generic_decoders_0[0], 0)) self.connect((self.generic_decoders_0[0], 0), (self, 0))
def __init__(self, decoder_list_0, input_size, output_size): gr.hier_block2.__init__( self, "Capillary Threaded Decoder", gr.io_signature(1, 1, input_size*1), gr.io_signature(1, 1, output_size*1)) self.decoder_list_0 = decoder_list_0 check = math.log10(len(self.decoder_list_0)) / math.log10(2.0) if(abs(check - int(check)) > 0): gr.log.info("fec.capillary_threaded_decoder: number of decoders must be a power of 2.") raise AttributeError self.deinterleaves_0 = [] for i in range(int(math.log(len(decoder_list_0), 2))): for j in range(int(math.pow(2, i))): self.deinterleaves_0.append(blocks.deinterleave(input_size, fec.get_decoder_input_size(decoder_list_0[0]))) self.generic_decoders_0 = [] for i in range(len(decoder_list_0)): self.generic_decoders_0.append(fec.decoder(decoder_list_0[i], input_size, output_size)) self.interleaves_0 = [] for i in range(int(math.log(len(decoder_list_0), 2))): for j in range(int(math.pow(2, i))): self.interleaves_0.append(blocks.interleave(output_size, fec.get_decoder_output_size(decoder_list_0[0]))) rootcount = 0 branchcount = 1 for i in range(int(math.log(len(decoder_list_0), 2)) - 1): for j in range(int(math.pow(2, i))): self.connect((self.deinterleaves_0[rootcount], 0), (self.deinterleaves_0[branchcount], 0)) self.connect((self.deinterleaves_0[rootcount], 1), (self.deinterleaves_0[branchcount + 1], 0)) rootcount += 1 branchcount += 2 codercount = 0 for i in range(len(decoder_list_0)/2): self.connect((self.deinterleaves_0[rootcount], 0), (self.generic_decoders_0[codercount], 0)) self.connect((self.deinterleaves_0[rootcount], 1), (self.generic_decoders_0[codercount + 1], 0)) rootcount += 1 codercount += 2 rootcount = 0 branchcount = 1 for i in range(int(math.log(len(decoder_list_0), 2)) - 1): for j in range(int(math.pow(2, i))): self.connect((self.interleaves_0[branchcount], 0), (self.interleaves_0[rootcount], 0)) self.connect((self.interleaves_0[branchcount + 1], 0), (self.interleaves_0[rootcount], 1)) rootcount += 1 branchcount += 2 codercount = 0 for i in range(len(decoder_list_0)/2): self.connect((self.generic_decoders_0[codercount], 0), (self.interleaves_0[rootcount], 0)) self.connect((self.generic_decoders_0[codercount + 1], 0), (self.interleaves_0[rootcount], 1)) rootcount += 1 codercount += 2 if ((len(self.decoder_list_0)) > 1): self.connect((self, 0), (self.deinterleaves_0[0], 0)) self.connect((self.interleaves_0[0], 0), (self, 0)) else: self.connect((self, 0), (self.generic_decoders_0[0], 0)) self.connect((self.generic_decoders_0[0], 0), (self, 0))
def __init__( self, decoder_obj_list, ann=None, puncpat="11", integration_period=10000, flush=None, rotator=None, lentagname=None, mtu=1500, ): gr.hier_block2.__init__( self, "extended_decoder", gr.io_signature(1, 1, gr.sizeof_float), gr.io_signature(1, 1, gr.sizeof_char) ) self.blocks = [] self.ann = ann self.puncpat = puncpat self.flush = flush if type(decoder_obj_list) == list: # This block doesn't handle parallelism of > 1 # We could just grab encoder [0][0], but we don't want to encourage this. if type(decoder_obj_list[0]) == list: gr.log.info("fec.extended_tagged_decoder: Parallelism must be 1.") raise AttributeError decoder_obj = decoder_obj_list[0] # Otherwise, just take it as is else: decoder_obj = decoder_obj_list # If lentagname is None, fall back to using the non tagged # stream version if type(lentagname) == str: if lentagname.lower() == "none": lentagname = None message_collector_connected = False ##anything going through the annihilator needs shifted, uchar vals if ( fec.get_decoder_input_conversion(decoder_obj) == "uchar" or fec.get_decoder_input_conversion(decoder_obj) == "packed_bits" ): self.blocks.append(blocks.multiply_const_ff(48.0)) if fec.get_shift(decoder_obj) != 0.0: self.blocks.append(blocks.add_const_ff(fec.get_shift(decoder_obj))) elif fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.add_const_ff(128.0)) if ( fec.get_decoder_input_conversion(decoder_obj) == "uchar" or fec.get_decoder_input_conversion(decoder_obj) == "packed_bits" ): self.blocks.append(blocks.float_to_uchar()) const_index = 0 # index that corresponds to mod order for specinvert purposes if not self.flush: flush = 10000 else: flush = self.flush if self.ann: # ann and puncpat are strings of 0s and 1s cat = fec.ULLVector() for i in fec.read_big_bitlist(ann): cat.append(i) synd_garble = 0.49 idx_list = self.garbletable.keys() idx_list.sort() for i in idx_list: if 1.0 / self.ann.count("1") >= i: synd_garble = self.garbletable[i] print "using syndrom garble threshold " + str(synd_garble) + "for conv_bit_corr_bb" print "ceiling: .0335 data garble rate" self.blocks.append( fec.conv_bit_corr_bb( cat, len(puncpat) - puncpat.count("0"), len(ann), integration_period, flush, synd_garble ) ) if self.puncpat != "11": self.blocks.append(fec.depuncture_bb(len(puncpat), read_bitlist(puncpat), 0)) if fec.get_decoder_input_conversion(decoder_obj) == "packed_bits": self.blocks.append(blocks.uchar_to_float()) self.blocks.append(blocks.add_const_ff(-128.0)) self.blocks.append(digital.binary_slicer_fb()) self.blocks.append(blocks.unpacked_to_packed_bb(1, 0)) else: if not lentagname: self.blocks.append( fec.decoder( decoder_obj, fec.get_decoder_input_item_size(decoder_obj), fec.get_decoder_output_item_size(decoder_obj), ) ) else: self.blocks.append( fec.tagged_decoder( decoder_obj, fec.get_decoder_input_item_size(decoder_obj), fec.get_decoder_output_item_size(decoder_obj), lentagname, mtu, ) ) if fec.get_decoder_output_conversion(decoder_obj) == "unpack": self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)) self.connect((self, 0), (self.blocks[0], 0)) self.connect((self.blocks[-1], 0), (self, 0)) for i in range(len(self.blocks) - 1): self.connect((self.blocks[i], 0), (self.blocks[i + 1], 0))