def __init__(self, args, p_array1, p_array2): super(DEC_LargeCNN2Int, self).__init__() self.args = args use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.interleaver1 = Interleaver(args, p_array1) self.deinterleaver1 = DeInterleaver(args, p_array1) self.interleaver2 = Interleaver(args, p_array2) self.deinterleaver2 = DeInterleaver(args, p_array2) self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() for idx in range(args.num_iteration): self.dec1_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec2_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1)) else: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
def __init__(self, args, p_array1, p_array2): # turbofy only for code rate 1/3 super(ENC_interCNN2Int, self).__init__(args) self.args = args # Encoder self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver1 = Interleaver(args, p_array1) self.interleaver2 = Interleaver(args, p_array2)
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_interCNN2Int, self).__init__(args) self.args = args # Encoder self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver1 = Interleaver(args, p_array) seed2 = 1000 rand_gen2 = mtrand.RandomState(seed2) p_array2 = rand_gen2.permutation(arange(args.block_len)) print('p_array1', p_array) print('p_array2', p_array2) self.interleaver2 = Interleaver(args, p_array2)
def __init__(self, args, p_array): super(TurboAE_decoder1D, self).__init__() self.args = args cuda = True if torch.cuda.is_available() else False self.this_device = torch.device("cuda" if cuda else "cpu") self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() for idx in range(args.num_iteration): self.dec1_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec2_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.code_rate_k)) else: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) # also need some CNN for f self.ftstart = SameShapeConv2d(num_layer=args.dec_num_layer, in_channels=args.img_channels, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) self.ftend = SameShapeConv2d(num_layer=1, in_channels=args.dec_num_unit, out_channels= args.img_channels, kernel_size = args.dec_kernel_size)
def __init__(self, args, p_array): super(ENC_interRNN_sys, self).__init__(args) self.args = args # Encoder self.enc_rnn = torch.nn.GRU(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear = torch.nn.Linear(2 * args.enc_num_unit, 1) self.enc_rnn_int = torch.nn.GRU(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_int = torch.nn.Linear(2 * args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_turbofy_rate2, self).__init__(args) self.args = args # Encoder self.enc_rnn_1 = torch.nn.GRU(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1) self.enc_rnn_2 = torch.nn.GRU(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)
def __init__(self, args, input_size=1, is_systematic_bit=False, is_interleave=False, p_array=[]): super(CNN_encoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args self.is_systematic_bit = is_systematic_bit self.is_interleave = is_interleave if self.is_interleave: self.interleaver = Interleaver(args, p_array) self.p_array = p_array # Encoder self.enc_cnn = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=input_size, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_linear = torch.nn.Linear(args.enc_num_unit, 1)
def __init__(self, args, p_array): super(FTAE_Shareddecoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args # interleaver self.p_array = p_array self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) if args.cnn_type == 'dense': CNNModel = DenseSameShapeConv1d else: CNNModel = SameShapeConv1d self.dec1_cnns = CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels=args.dec_num_unit, kernel_size=args.dec_kernel_size) self.dec1_outputs = torch.nn.Linear(args.dec_num_unit, args.num_iter_ft) self.dec2_cnns = CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels=args.dec_num_unit, kernel_size=args.dec_kernel_size) self.dec2_outputs = torch.nn.Linear(args.dec_num_unit, args.num_iter_ft) self.final_outputs = torch.nn.Linear(args.num_iter_ft, 1)
def __init__(self, args, p_array): super(CCE_Turbo_Encoder1D, self).__init__() self.args = args cuda = True if torch.cuda.is_available() else False self.this_device = torch.device("cuda" if cuda else "cpu") # Define 1D Network for TurboAE self.enc_cnn_1 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.cce_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.cce_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.cce_num_unit, 1) self.interleaver = Interleaver(args, p_array) self.norm = torch.nn.BatchNorm1d(self.args.code_rate_n, affine=True)
def __init__(self, args, p_array): super(FTAE_encoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args # interleaver self.p_array = p_array self.interleaver = Interleaver(args, p_array) if self.args.cnn_type == 'dense': CNNModel = DenseSameShapeConv1d else: CNNModel = SameShapeConv1d # Encoder self.enc_cnn_1 = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_2 = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k + 2, out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_3 = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k + 4, out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1)
def __init__(self, args, p_array): super(DEC_LargeRNN_rate2, self).__init__() self.args = args use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec1_rnns = torch.nn.ModuleList() self.dec2_rnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() for idx in range(args.num_iteration): self.dec1_rnns.append(torch.nn.GRU(1 + args.num_iter_ft, args.dec_num_unit, num_layers=2, bias=True, batch_first=True, dropout=args.dropout, bidirectional=True)) self.dec2_rnns.append(torch.nn.GRU(1+ args.num_iter_ft, args.dec_num_unit, num_layers=2, bias=True, batch_first=True, dropout=args.dropout, bidirectional=True)) self.dec1_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, 1)) else: self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))
def __init__(self, args, p_array): super(DEC_LargeCNN, self).__init__() self.args = args use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() if self.args.encoder == 'TurboAE_rate3_cnn': CNNLayer = SameShapeConv1d else: CNNLayer = DenseSameShapeConv1d for idx in range(args.num_iteration): self.dec1_cnns.append(CNNLayer(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec2_cnns.append(CNNLayer(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1)) else: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
def __init__(self, args, p_array): super(FTAE_decoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args # interleaver self.p_array = p_array self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) # Decoder self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() if args.cnn_type =='dense': CNNModel = DenseSameShapeConv1d else: CNNModel = SameShapeConv1d for idx in range(args.num_iteration): if self.args.codec == 'turboae_blockdelay_cnn': self.dec1_cnns.append(CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec2_cnns.append(CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.code_rate_k)) else: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) else: # RNN based self.dec1_cnns.append(torch.nn.GRU(2 + args.num_iter_ft, args.dec_num_unit, num_layers=args.dec_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) ) self.dec2_cnns.append(torch.nn.GRU(2 + args.num_iter_ft, args.dec_num_unit, num_layers=args.dec_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) ) self.dec1_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.code_rate_k)) else: self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_interRNN, self).__init__(args) self.enc_rnns = torch.nn.ModuleList() self.args = args # Encoder if args.enc_rnn == 'gru': RNN_MODEL = torch.nn.GRU elif args.enc_rnn == 'lstm': RNN_MODEL = torch.nn.LSTM else: RNN_MODEL = torch.nn.RNN self.enc_rnn_1 = RNN_MODEL(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1) self.enc_rnn_2 = RNN_MODEL(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1) self.enc_rnn_3 = RNN_MODEL(1, args.enc_num_unit, num_layers=args.enc_num_layer, bias=True, batch_first=True, dropout=0, bidirectional=True) self.enc_linear_3 = torch.nn.Linear(2 * args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)
def __init__(self, args, p_array): super(NeuralTurbofyDec, self).__init__() self.args = args self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec_rnn = torch.nn.GRU(args.code_rate_n + args.num_iter_ft - 1 , args.dec_num_unit, num_layers=2, bias=True, batch_first=True, dropout=args.dropout, bidirectional=True) self.dec_out = torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft) self.dec_final = torch.nn.Linear(args.num_iter_ft, 1) use_cuda = not args.no_cuda and torch.cuda.is_available() self.device = torch.device("cuda" if use_cuda else "cpu")
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_interCNN, self).__init__(args) self.args = args # Encoder if self.args.encoder == 'TurboAE_rate3_cnn': self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) else: # Dense self.enc_cnn_1 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_2 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_3 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)